content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
import numpy as np
from PIL import Image
import torch
import torchvision
from platforms.platform import get_platform
class Dataset(abc.ABC, torch.utils.data.Dataset):
"""The base class for all datasets in this framework."""
@staticmethod
@abc.abstractmethod
def num_test_examples() -> int:
pass
@staticmethod
@abc.abstractmethod
def num_train_examples() -> int:
pass
@staticmethod
@abc.abstractmethod
def num_classes() -> int:
pass
@staticmethod
@abc.abstractmethod
def get_train_set(use_augmentation: bool) -> 'Dataset':
pass
@staticmethod
@abc.abstractmethod
def get_test_set() -> 'Dataset':
pass
def __init__(self, examples: np.ndarray, labels, enumerate_examples: bool):
"""Create a dataset object.
examples is a numpy array of the examples (or the information necessary to get them).
Only the first dimension matters for use in this abstract class.
labels is a numpy array of the labels. Each entry is a zero-indexed integer encoding
of the label.
"""
if examples.shape[0] != labels.shape[0]:
raise ValueError('Different number of examples ({}) and labels ({}).'.format(
examples.shape[0], examples.shape[0]))
self._examples = examples
self._labels = labels if isinstance(labels, np.ndarray) else labels.numpy()
self._subsampled = False
self._enumerate_examples = enumerate_examples
def randomize_labels(self, seed: int, fraction: float) -> None:
"""Randomize the labels of the specified fraction of the dataset."""
num_to_randomize = np.ceil(len(self._labels) * fraction).astype(int)
randomized_labels = np.random.RandomState(seed=seed).randint(self.num_classes(), size=num_to_randomize)
examples_to_randomize = np.random.RandomState(seed=seed+1).permutation(len(self._labels))[:num_to_randomize]
self._labels[examples_to_randomize] = randomized_labels
def filter(self, mask: np.ndarray) -> None:
examples_to_retain = np.arange(len(self._labels))[mask == 1]
self._examples = self._examples[examples_to_retain]
self._labels = self._labels[examples_to_retain]
def subsample(self, seed: int, fraction: float) -> None:
"""Subsample the dataset."""
if self._subsampled:
raise ValueError('Cannot subsample more than once.')
self._subsampled = True
examples_to_retain = np.ceil(len(self._labels) * fraction).astype(int)
examples_to_retain = np.random.RandomState(seed=seed+1).permutation(len(self._labels))[:examples_to_retain]
self._examples = self._examples[examples_to_retain]
self._labels = self._labels[examples_to_retain]
def __len__(self):
return self._labels.size
def __getitem__(self, index):
"""If there is custom logic for example loading, this method should be overridden."""
output = (self._examples[index], self._labels[index])
return (index, output) if self._enumerate_examples else output
class ImageDataset(Dataset):
@abc.abstractmethod
def example_to_image(self, example: np.ndarray) -> Image: pass
def __init__(self, examples, labels, image_transforms=None, tensor_transforms=None,
joint_image_transforms=None, joint_tensor_transforms=None, enumerate_examples=False):
super(ImageDataset, self).__init__(examples, labels, enumerate_examples=enumerate_examples)
self._image_transforms = image_transforms or []
self._tensor_transforms = tensor_transforms or []
self._joint_image_transforms = joint_image_transforms or []
self._joint_tensor_transforms = joint_tensor_transforms or []
self._composed = None
def __getitem__(self, index):
if not self._composed:
self._composed = torchvision.transforms.Compose(
self._image_transforms + [torchvision.transforms.ToTensor()] + self._tensor_transforms)
example, label = self._examples[index], self._labels[index]
example = self.example_to_image(example)
for t in self._joint_image_transforms: example, label = t(example, label)
example = self._composed(example)
for t in self._joint_tensor_transforms: example, label = t(example, label)
return (index, (example, label)) if self._enumerate_examples else (example, label)
def blur(self, blur_factor: float) -> None:
"""Add a transformation that blurs the image by downsampling by blur_factor."""
def blur_transform(image):
size = list(image.size)
image = torchvision.transforms.Resize([int(s / blur_factor) for s in size])(image)
image = torchvision.transforms.Resize(size)(image)
return image
self._image_transforms.append(blur_transform)
def unsupervised_rotation(self, seed: int):
"""Switch the task to unsupervised rotation."""
self._labels = np.random.RandomState(seed=seed).randint(4, size=self._labels.size)
def rotate_transform(image, label):
return torchvision.transforms.RandomRotation(label*90)(image), label
self._joint_image_transforms.append(rotate_transform)
class ShuffleSampler(torch.utils.data.sampler.Sampler):
def __init__(self, num_examples):
self._num_examples = num_examples
self._seed = -1
def __iter__(self):
if self._seed == -1:
indices = list(range(self._num_examples))
elif self._seed is None:
indices = torch.randperm(self._num_examples).tolist()
else:
g = torch.Generator()
if self._seed is not None: g.manual_seed(self._seed)
indices = torch.randperm(self._num_examples, generator=g).tolist()
return iter(indices)
def __len__(self):
return self._num_examples
def shuffle_dataorder(self, seed: int):
self._seed = seed
class DistributedShuffleSampler(torch.utils.data.distributed.DistributedSampler):
def __init__(self, dataset):
super(DistributedShuffleSampler, self).__init__(
dataset, num_replicas=get_platform().world_size, rank=get_platform().rank)
self._seed = -1
def __iter__(self):
indices = torch.arange(len(self.dataset))
if self._seed != -1:
g = torch.Generator()
g.manual_seed(self._seed or np.random.randint(10e6))
perm = torch.randperm(len(indices), generator=g)
indices = indices[perm]
indices = indices[self.rank:self.total_size:self.num_replicas]
return iter(indices.tolist())
def shuffle_dataorder(self, seed: int):
self._seed = seed
class DataLoader(torch.utils.data.DataLoader):
"""A wrapper that makes it possible to access the custom shuffling logic."""
def __init__(self, dataset: Dataset, batch_size: int, num_workers: int, pin_memory: bool = True, force_sequential: bool = False):
if get_platform().is_distributed and not force_sequential:
self._sampler = DistributedShuffleSampler(dataset)
else:
self._sampler = ShuffleSampler(len(dataset))
self._iterations_per_epoch = np.ceil(len(dataset) / batch_size).astype(int)
if get_platform().is_distributed and not force_sequential:
batch_size //= get_platform().world_size
num_workers //= get_platform().world_size
super(DataLoader, self).__init__(
dataset, batch_size, sampler=self._sampler, num_workers=num_workers,
pin_memory=pin_memory and get_platform().torch_device.type == 'cuda' and not force_sequential)
def shuffle(self, seed: int):
self._sampler.shuffle_dataorder(seed)
@property
def iterations_per_epoch(self):
return self._iterations_per_epoch
| python |
import os
import time
from NMLearn.classifiers.tree.desicion_tree import classification_tree
from NMLearn.utilities.dataset_utils.mnist import load_mnist_data
from NMLearn.utilities.metrics import accuracy
##########
# config #
##########
# data parameters
DATA_PATH = "<Path to Dataset>"
# model parameters
MAX_FEATURES = 32
MAX_DEPTH = 7
OBJECTIVE_FCN = "gini"
TRAINING_ALGO = "CART"
################
# Load in Data #
################
# load in training data
X_train = load_mnist_data(os.path.join(DATA_PATH, 'train-images-idx3-ubyte.gz'))
Y_train = load_mnist_data(os.path.join(DATA_PATH, 'train-labels-idx1-ubyte.gz'))
# load in test data
X_test = load_mnist_data(os.path.join(DATA_PATH, 't10k-images-idx3-ubyte.gz'))
Y_test = load_mnist_data(os.path.join(DATA_PATH, 't10k-labels-idx1-ubyte.gz'))
#############
# Grow Tree #
#############
model = classification_tree(MAX_DEPTH, to_features_to_check=MAX_FEATURES, training_alogrithim=TRAINING_ALGO, obj_func=OBJECTIVE_FCN)
start = time.time()
model.fit(X_train, Y_train)
duration = time.time()-start
Y_train_prob = model.predict(X_train)
train_acc = accuracy(Y_train_prob, Y_train)
#########################
# Evaluate on test data #
#########################
Y_test_prob = model.predict(X_test)
test_acc = accuracy(Y_test_prob, Y_test)
print("Test Performance: {:.3f}".format(test_acc))
print("Train Performance: {:.3f}".format(train_acc))
| python |
## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
from io import open
# Launch command
from os import path
import re
here = path.abspath(path.dirname(__file__))
project_homepage = "https://github.com/rbonghi/ros_jetson_stats"
with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f:
requirements = f.read().splitlines()
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['ros_jetson_stats'],
package_dir={'': 'src'},
author_email="[email protected]",
description="ros_jetson_stats is a bridge from jetson-stats to your ROS controller",
license='MIT',
long_description=long_description,
long_description_content_type="text/markdown",
download_url=(project_homepage + "/archive/master.zip"),
project_urls={
"How To": (project_homepage + "/tree/master/docs"),
"Examples": (project_homepage + "/tree/master/examples"),
"Bug Reports": (project_homepage + "/issues"),
"Source": (project_homepage + "/tree/master")
},
install_requires=requirements,
)
setup(**setup_args) | python |
# misc.py --- Miscellaneous utility functions
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, 2016 Florent Rougon
#
# This file is distributed under the terms of the DO WHAT THE FUCK YOU WANT TO
# PUBLIC LICENSE version 2, dated December 2004, by Sam Hocevar. You should
# have received a copy of this license along with this file. You can also find
# it at <http://www.wtfpl.net/>.
import os
import sys
import platform
import enum
import gettext
import locale
import textwrap
import traceback
from .constants import PROGNAME
def pythonVersionString():
if sys.version_info[3] == "final":
compl = ""
else:
compl = " " + sys.version_info[3]
return "{major}.{minor}.{micro}{compl}".format(
major=sys.version_info[0],
minor=sys.version_info[1],
micro=sys.version_info[2],
compl=compl)
def executableFileName(base):
"""Return the platform-dependent name of an executable."""
if platform.system() == "Windows":
return base + ".exe"
else:
return base
def isDescendantWidget(maybeParent, widget):
"""Return True if 'widget' is 'maybeParent' or a descendant of it.
Widget parenthood is tested for Tk in this function.
"""
if widget is maybeParent:
return True
else:
return any(( isDescendantWidget(w, widget)
for w in maybeParent.winfo_children() ))
# Based on an example from the 'enum' documentation
class OrderedEnum(enum.Enum):
"""Base class for enumerations whose members can be ordered.
Contrary to enum.IntEnum, this class maintains normal enum.Enum
invariants, such as members not being comparable to members of other
enumerations (nor of any other class, actually).
"""
def __ge__(self, other):
if self.__class__ is other.__class__:
return self.value >= other.value
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self.value > other.value
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self.value <= other.value
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value < other.value
return NotImplemented
def __eq__(self, other):
if self.__class__ is other.__class__:
return self.value == other.value
return NotImplemented
def __ne__(self, other):
if self.__class__ is other.__class__:
return self.value != other.value
return NotImplemented
def normalizeHeading(azimuth):
# x % y always has the sign of y
a = round(azimuth % 360.0)
return a if a else 360
class DecimalCoord(float):
def __str__(self):
# 8 decimal places, as recommended for latitudes and longitudes in
# the apt.dat v1000 spec
return locale.format("%.08f", self)
def __repr__(self):
return "{}.{}({!r})".format(__name__, type(self).__name__, float(self))
def floatRepr(self):
return repr(float(self))
def precisionRepr(self):
# Used when passing --lat or --lon options to make sure we don't
# lose any precision because of the __str__() above. 10 should
# be largely enough, otherwise there is nothing magical about
# this value.
return "{:.010f}".format(self)
def __add__(self, other):
if self.__class__ is other.__class__:
return DecimalCoord(float(self) + float(other))
else:
return NotImplemented
def __sub__(self, other):
if self.__class__ is other.__class__:
return DecimalCoord(float(self) - float(other))
else:
return NotImplemented
def __mul__(self, other):
for klass in (int, float):
if isinstance(other, klass):
return DecimalCoord(float(self) * float(other))
else:
return NotImplemented
def __truediv__(self, other):
for klass in (int, float):
if isinstance(other, klass):
return DecimalCoord(float(self) / float(other))
else:
return NotImplemented
# Similar to processPosition() in src/Airports/dynamicloader.cxx of the
# FlightGear source code (version 3.7)
def mixedToDecimalCoords(s):
"""Convert from e.g., 'W122 22.994' to -122.38323333333334 (float).
The source format is used in FlightGear groundnet files. The first
number represents degrees and must be an integer. The second number
is written as a decimal number and represents minutes of angle.
"""
if not s:
raise ValueError(_("empty coordinate string"))
if s[0] in "NE":
sign = 1
elif s[0] in "SW":
sign = -1
else:
raise ValueError(_("unexpected first character in mixed-style "
"coordinate string: {char!r}").format(char=s[0]))
degree = int(s[1:s.index(' ', 1)])
minutes = float(s[s.index(' ', 1) + 1:])
return DecimalCoord(sign * (degree + minutes/60.0))
# ****************************************************************************
# Thin abstraction layer offering an API similar to that of pkg_resources. By
# changing the functions below, it would be trivial to switch to pkg_resources
# should the need arise (remove _localPath() and use the pkg_resources
# functions in the most straightforward way).
# ****************************************************************************
def _localPath(path):
return os.path.join(*([os.path.dirname(__file__)] + path.split('/')))
def resourceExists(path):
return os.path.exists(_localPath(path))
def resourcelistDir(path):
return os.listdir(_localPath(path))
def resourceIsDir(path):
return os.path.isdir(_localPath(path))
def binaryResourceStream(path):
# The returned stream is always in binary mode (yields bytes, not
# strings). It is a context manager (supports the 'with' statement).
return open(_localPath(path), mode="rb")
def textResourceStream(path, encoding='utf-8'):
# The return value is a context manager (supports the 'with' statement).
return open(_localPath(path), mode="r", encoding=encoding)
def textResourceString(path, encoding='utf-8'):
with textResourceStream(path, encoding=encoding) as f:
s = f.read()
return s
def resourceFilename(path):
return _localPath(path)
# **********************************************************************
# * Context-sensitive translation support *
# **********************************************************************
class TranslationHelper:
"""Class providing context-sensitive translations.
At the time of this writing, GNU gettext supports this, but not the
gettext module of the Python standard library.
"""
def __init__(self, config):
"""Constructor for TranslationHelper instances.
config -- a Config instance
"""
from .constants import MESSAGES, LOCALE_DIR
langCode = config.language.get()
if not langCode:
try:
langCode = gettext.translation(
MESSAGES, LOCALE_DIR).info()['language']
except OSError:
# There is no translation for the current locale, use English
langCode = "en"
try:
self.translator = gettext.translation(
MESSAGES, LOCALE_DIR, languages=[langCode])
except FileNotFoundError as e:
moResource = "data/locale/{}/LC_MESSAGES/{}.mo".format(langCode,
MESSAGES)
if not resourceExists(moResource):
msg = textwrap.dedent("""\
Error: unable to initialize the translation system. Your
installation is missing the file '{moFile}'. If you simply
cloned or downloaded {prg}'s Git repository, it is quite normal
that .mo files are missing (they must be generated from their
.po sources). Please refer to {prg}'s installation guide:
docs/INSTALL/INSTALL_en. It has specific instructions that
must be followed for a successful installation from the Git
repository.""").format(
moFile=resourceFilename(moResource), prg=PROGNAME)
l = [traceback.format_exc(), textwrap.fill(msg, width=78)]
print(*l, sep='\n', file=sys.stderr)
sys.exit(1)
else:
raise
def pgettext(self, context, msgid):
s = "{}\x04{}".format(context, msgid)
try:
transl = self.translator._catalog[s]
except KeyError:
if self.translator._fallback:
return self.translator._fallback.pgettext(context, msgid)
else:
return msgid
return transl
def ngettext(self, singular, plural, n):
return self.translator.ngettext(singular, plural, n)
def npgettext(self, context, singular, plural, n):
s = "{}\x04{}".format(context, singular)
pluralForm = self.translator.plural(n)
try:
transl = self.translator._catalog[(s, pluralForm)]
except KeyError:
if self._fallback:
return self.translator._fallback.npgettext(
context, singular, plural, n)
else:
return (singular if n == 1 else plural)
return transl
def gettext_noop(self, msgid):
return msgid
def N_(self, msgid): # short synonym of gettext_noop()
return msgid
def pgettext_noop(self, context, msgid):
return msgid
def npgettext_noop(self, context, singular, plural, n):
return singular
class Observable:
"""Class to which observers can be attached.
This class is similar to Tkinter variable classes such as StringVar
and IntVar, but accepts arbitrary Python types and is easier to
debug (exceptions raised in Tkinter variable observers are a pain to
debug because the tracebacks don't go beyond the <variable>.set()
calls---in other words, they don't cross the Tk barrier).
Performance should also be better with this class, since it doesn't
have to go through Python → Tk → Python layers. Of course, instances
of this class can't be used directly with Tkinter widgets as Tkinter
variables.
Except for implicit type conversions done by Tkinter, the syntax
used to manipulate a Tkinter StringVar or IntVar, and attach
observers to it, can be used unchanged here. The biggest difference
is that this class uses the values passed to set() as is instead of
automatically converting them as done with Tkinter methods. The
other difference is that callbacks written for this class can rely
on particular arguments being passed, which are not necessarily the
same for a Tkinter variable observer.
Apart from these differences, the semantics should be very close to
those provided by Tkinter variables. Most notably, a 'read' (resp.
'write') observer is called whenever the observable's get() (resp.
set()) method is called---whether the value is actually modified by
set() calls is irrelevant.
"""
def __init__(self, initValue=None):
self.value = initValue
self.readCallbacks = []
self.writeCallbacks = []
def get(self, runCallbacks=True):
value = self.value
if runCallbacks:
for cb in self.readCallbacks:
cb(value)
return value
def set(self, value, runCallbacks=True):
self.value = value
if runCallbacks:
for cb in self.writeCallbacks:
cb(value)
def trace(self, accessType, callback):
if accessType == "w":
self.writeCallbacks.append(callback)
elif accessType == "r":
self.readCallbacks.append(callback)
else:
raise ValueError("invalid access type for trace(): {accessType}"
.format(accessType=accessType))
class ProgressFeedbackHandler:
"""Simple class to interface with widgets indicating progress of a task."""
def __init__(self, text="", min=0.0, max=100.0, value=0.0):
self.setMinMax(min, max)
self.setTextAndValue(text, value)
def setMin(self, value):
self.min = float(value)
self.amplitude = self.max - self.min
def setMax(self, value):
self.max = float(value)
self.amplitude = self.max - self.min
def setMinMax(self, min, max):
self.min, self.max = float(min), float(max)
self.amplitude = self.max - self.min
def setText(self, text):
self.text = text
self.onUpdated()
def setValue(self, value):
self.value = float(value)
self.onUpdated()
def setTextAndValue(self, text, value):
self.text = text
self.value = float(value)
self.onUpdated()
def startPhase(self, text, min, max):
self.text = text
self.setMinMax(min, max)
self.setValue(min)
self.onUpdated()
def forceUpdate(self):
self.onUpdated()
def onUpdated(self):
"""No-op. To be overridden by subclasses."""
pass
| python |
import os
import operator
import unittest
from ..utils.py3compat import execfile
from .testing import assert_point_in_collection
def mapcall(name, iterative):
return list(map(operator.methodcaller(name), iterative))
class TestExamples(unittest.TestCase):
from os.path import abspath, dirname, join
root_path = join(dirname(dirname(dirname(abspath(__file__)))),
'doc', 'source', 'examples')
def run_example(self, name):
self.ns = ns = {}
filename = os.path.join(self.root_path, name)
execfile(filename, ns)
self.plotter = plotter = ns['plotter']
self.config = plotter.config
self.ax = plotter.cax.ax
def assert_number_of_lines(self, num):
lines = self.ax.get_lines()
assert len(lines) == num
def test_simple(self):
self.run_example('simple.py')
self.assert_number_of_lines(2)
def test_two(self):
self.run_example('two.py')
self.assert_number_of_lines(4)
def test_config_inheritance(self):
self.run_example('config_inheritance.py')
ax = self.ax
lines = ax.get_lines()
colors = mapcall('get_color', lines)
widths = mapcall('get_linewidth', lines)
assert colors == ['blue'] + ['black'] * 3
assert widths == [5] + [1.0] * 3
def test_switching(self):
self.run_example('switching.py')
self.assert_number_of_lines(8)
def test_switching_uniq_boundary(self):
self.run_example('switching_uniq_boundary.py')
ax = self.ax
lines = ax.get_lines()
colors = mapcall('get_color', lines)
assert colors == ['b', 'k', 'k', 'g', 'r']
def test_switching_region_color(self):
from matplotlib.colors import colorConverter
from numpy.testing import assert_almost_equal
self.run_example('switching_region_color.py')
actual_colors = mapcall('get_facecolor', self.ax.collections)
desired_colors = [[colorConverter.to_rgba('gray')]] * 3
assert_almost_equal(actual_colors, desired_colors)
def test_positive_direction(self):
self.run_example('positive_direction.py')
ax = self.ax
lines = ax.get_lines()
colors = mapcall('get_color', lines)
assert colors[:2] == ['b', 'g']
assert set(colors) == set(['b', 'g'])
def test_boundary_labels(self):
self.run_example('boundary_labels.py')
ax = self.ax
leg = ax.get_legend()
labels = [text.get_text() for text in leg.texts]
assert labels == ['$x ^ 2$', '$x + 5$']
def test_annotate_regions(self):
self.run_example('annotate_regions.py')
from matplotlib import pyplot
pyplot.draw()
def test_divide_regions(self):
self.run_example('divide_regions.py')
from matplotlib import pyplot
pyplot.draw()
def test_explicit_regions(self):
self.run_example('explicit_regions.py')
(r0, r1) = self.plotter.regions
assert len(r0.cax.collections) == 1
assert len(r1.cax.collections) == 1
c0 = r0.cax.collections[0]
c1 = r1.cax.collections[0]
assert_point_in_collection(c0, 0 + 0.5, 0.5)
assert_point_in_collection(c1, 1 + 0.5, 0.5)
assert_point_in_collection(c0, 0 + 1.0, 1.0, negate=True)
assert_point_in_collection(c1, 1 + 1.0, 1.0)
| python |
import re
import json
import urllib.error
import urllib.parse
import urllib.request
from lib.l2p_tools import handle_url_except, clean_exit
class DMAFinder():
location = {
"latitude": None,
"longitude": None,
"DMA": None,
"city": None,
"active": False
}
DEFAULT_USER_AGENT = 'Mozilla/5.0'
def __init__(self, config):
self.mock_location = config["main"]["mock_location"]
self.zipcode = config["main"]["override_zipcode"]
# Check for user's location
# Find the users location via lat\long or zipcode if specified,(lat\lon
# taking precedence if both are provided) otherwise use IP. Attempts to
# mirror the geolocation found at locast.org\dma. Also allows for a
# check that Locast reports the area as active.
if self.find_location():
print("Got location as {} - DMA {} - Lat\Lon {}\{}".format(self.location['city'],
self.location['DMA'],
self.location['latitude'],
self.location['longitude'])
)
else:
print("Could not acertain location. Exiting...")
clean_exit(1)
# Check that Locast reports this market is currently active and available.
if not self.location['active']:
print("Locast reports that this DMA\Market area is not currently active!")
clean_exit(1)
def set_location(self, geoRes):
self.location["latitude"] = str(geoRes['latitude'])
self.location["longitude"] = str(geoRes['longitude'])
self.location["DMA"] = str(geoRes['DMA'])
self.location["active"] = geoRes['active']
self.location["city"] = str(geoRes['name'])
def find_location(self):
'''
Mirror the geolocation options found at locast.org/dma since we can't
rely on browser geolocation. If the user provides override coords, or
override_zipcode, resolve location based on that data. Otherwise check
by external ip, (using ipinfo.io, as the site does).
Calls to Locast return JSON in the following format:
{
u'DMA': str (DMA Number),
u'large_url': str,
u'name': str,
u'longitude': lon,
u'latitude': lat,
u'active': bool,
u'announcements': list,
u'small_url': str
}
Note, lat/long is of the location given to the service, not the lat/lon
of the DMA
'''
zip_format = re.compile(r'^[0-9]{5}$')
# Check if the user provided override coords.
if self.mock_location:
return self.get_coord_location()
# Check if the user provided an override zipcode, and that it's valid.
elif self.zipcode and zip_format.match(self.zipcode):
return self.get_zip_location()
else:
# If no override zip, or not a valid ZIP, fallback to IP location.
return self.get_ip_location()
@handle_url_except
def get_zip_location(self):
print("Getting location via provided zipcode {}".format(self.zipcode))
# Get geolocation via Locast, based on user provided zipcode.
req = urllib.request.Request('https://api.locastnet.org/api/watch/dma/zip/{}'.format(self.zipcode))
req.add_header('User-agent', self.DEFAULT_USER_AGENT)
resp = urllib.request.urlopen(req)
geoRes = json.load(resp)
resp.close()
self.set_location(geoRes)
return True
@handle_url_except
def get_ip_location(self):
print("Getting location via IP Address.")
# Get geolocation via Locast. Mirror their website and use https://ipinfo.io/ip to get external IP.
ip_resp = urllib.request.urlopen('https://ipinfo.io/ip')
ip = ip_resp.read().strip()
ip_resp.close()
print("Got external IP {}.".format(ip.decode('utf-8')))
# Query Locast by IP, using a 'client_ip' header.
req = urllib.request.Request('https://api.locastnet.org/api/watch/dma/ip')
req.add_header('client_ip', ip)
req.add_header('User-agent', self.DEFAULT_USER_AGENT)
resp = urllib.request.urlopen(req)
geoRes = json.load(resp)
resp.close()
self.set_location(geoRes)
return True
@handle_url_except
def get_coord_location(self):
print("Getting location via provided lat\lon coordinates.")
# Get geolocation via Locast, using lat\lon coordinates.
lat = self.mock_location['latitude']
lon = self.mock_location['longitude']
req = urllib.request.Request('https://api.locastnet.org/api/watch/dma/{}/{}'.format(lat, lon))
req.add_header('Content-Type', 'application/json')
req.add_header('User-agent', self.DEFAULT_USER_AGENT)
resp = urllib.request.urlopen(req)
geoRes = json.load(resp)
resp.close()
self.set_location(geoRes)
return True
| python |
from PyQt5 import QtCore as qtc
import cv2
import numpy as np
class DetectionsDrawer(qtc.QObject):
detections_drawn = qtc.pyqtSignal(np.ndarray)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dst_h = None
self.dst_w = None
@qtc.pyqtSlot(tuple)
def run(self, inference_output):
uid, ndarr, detections = inference_output
self.dst_h, self.dst_w, _ = ndarr.shape
color = (0, 0, 255)
for label, confidence, bbox in detections:
print(str(label) + ": " + str(confidence))
left, top, right, bottom = self._relative_to_abs_rect(bbox)
cv2.rectangle(ndarr, (left, top), (right, bottom), color, 1)
cv2.putText(ndarr, "{} [{:.0f}]".format(label, float(confidence)), (left, top - 5),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
self.detections_drawn.emit(ndarr)
def _relative_to_abs_rect(self, bbox):
x, y, w, h = bbox
abs_x = x * self.dst_w
abs_y = y * self.dst_h
abs_w = w * self.dst_w
abs_h = h * self.dst_h
left = int(abs_x - (abs_w / 2))
top = int(abs_y - (abs_h / 2))
right = int(abs_x + (abs_w / 2))
bottom = int(abs_y + (abs_h / 2))
return left, top, right, bottom
| python |
from __future__ import print_function
import sys
import numpy as np
from yggdrasil.interface.YggInterface import YggRpcServer
from yggdrasil.tools import sleep
def fibServer(args):
sleeptime = float(args[0])
print('Hello from Python rpcFibSrv: sleeptime = %f' % sleeptime)
# Create server-side rpc conneciton using model name
rpc = YggRpcServer("rpcFibSrv", "%d", "%d %d")
# Continue receiving requests until error occurs (the connection is closed
# by all clients that have connected).
while True:
print('rpcFibSrv(P): receiving...')
retval, rpc_in = rpc.rpcRecv()
if not retval:
print('rpcFibSrv(P): end of input')
break
# Compute fibonacci number
print('rpcFibSrv(P): <- input %d' % rpc_in[0], end='')
pprev = 0
prev = 1
result = 1
fib_no = 1
arg = rpc_in[0]
while fib_no < arg:
result = prev + pprev
pprev = prev
prev = result
fib_no = fib_no + 1
print(' ::: ->(%2d %2d)' % (arg, result))
# Sleep and then send response back
sleep(float(sleeptime))
flag = rpc.rpcSend(arg, np.int32(result))
if not flag:
raise RuntimeError('rpcFibSrv(P): ERROR sending')
print('Goodbye from Python rpcFibSrv')
if __name__ == '__main__':
fibServer(sys.argv[1:])
| python |
from bs4 import BeautifulSoup, SoupStrainer
import requests
import time
def extrai_html(url_pronta):
# PASSAR TAG PRINCIPAL
custom = SoupStrainer('div', {'class': 'item'})
header = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36",
"X-Requested-With": "XMLHttpRequest"
}
req = ''
while req == '':
try:
req = requests.get(url_pronta, headers=header)
break
except:
print("Connection refused by the server..")
print("Let me sleep for 5 seconds")
print("ZZzzzz...")
time.sleep(5)
print("Was a nice sleep, now let me continue...")
continue
response = req.text
html = BeautifulSoup(response, 'lxml', parse_only=custom)
return html
def extrai_html_artigo(url_pronta):
# PASSAR TAG PRINCIPAL
custom = SoupStrainer('article', {'id': 'materia_texto'})
header = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36",
"X-Requested-With": "XMLHttpRequest"
}
req = ''
while req == '':
try:
req = requests.get(url_pronta, headers=header)
break
except:
print("Connection refused by the server..")
print("Let me sleep for 5 seconds")
print("ZZzzzz...")
time.sleep(5)
print("Was a nice sleep, now let me continue...")
continue
response = req.text
html = BeautifulSoup(response, 'lxml', parse_only=custom)
return html
| python |
import pprint
from uuid import uuid4
from twisted.internet.defer import Deferred, DeferredList, maybeDeferred
from twisted.web.resource import Resource
from twisted.internet import reactor
from twisted.web import server
from .base import BaseServer, LOGGER
from ..resources import InterfaceResource, ExposedResource
from ..aws import sdb_now
from ..evaluateboolean import evaluateBoolean
PRETTYPRINTER = pprint.PrettyPrinter(indent=4)
class InterfaceServer(BaseServer):
exposed_functions = []
exposed_function_resources = {}
def __init__(self,
aws_access_key_id,
aws_secret_access_key,
aws_sdb_reservation_domain,
aws_s3_reservation_cache_bucket=None,
aws_s3_http_cache_bucket=None,
aws_s3_storage_bucket=None,
aws_sdb_coordination_domain=None,
max_simultaneous_requests=50,
max_requests_per_host_per_second=1,
max_simultaneous_requests_per_host=5,
port=5000,
log_file='interfaceserver.log',
log_directory=None,
log_level="debug",
name=None,
time_offset=None):
if name == None:
name = "AWSpider Interface Server UUID: %s" % self.uuid
resource = Resource()
interface_resource = InterfaceResource(self)
resource.putChild("interface", interface_resource)
self.function_resource = Resource()
resource.putChild("function", self.function_resource)
self.site_port = reactor.listenTCP(port, server.Site(resource))
BaseServer.__init__(
self,
aws_access_key_id,
aws_secret_access_key,
aws_s3_reservation_cache_bucket=aws_s3_reservation_cache_bucket,
aws_s3_http_cache_bucket=aws_s3_http_cache_bucket,
aws_sdb_reservation_domain=aws_sdb_reservation_domain,
aws_s3_storage_bucket=aws_s3_storage_bucket,
aws_sdb_coordination_domain=aws_sdb_coordination_domain,
max_simultaneous_requests=max_simultaneous_requests,
max_requests_per_host_per_second=max_requests_per_host_per_second,
max_simultaneous_requests_per_host=max_simultaneous_requests_per_host,
log_file=log_file,
log_directory=log_directory,
log_level=log_level,
name=name,
time_offset=time_offset,
port=port)
def start(self):
reactor.callWhenRunning(self._start)
return self.start_deferred
def _start(self):
deferreds = []
if self.time_offset is None:
deferreds.append(self.getTimeOffset())
d = DeferredList(deferreds, consumeErrors=True)
d.addCallback(self._startCallback)
def _startCallback(self, data):
for row in data:
if row[0] == False:
d = self.shutdown()
d.addCallback(self._startHandleError, row[1])
return d
d = BaseServer.start(self)
def shutdown(self):
deferreds = []
LOGGER.debug("%s stopping on main HTTP interface." % self.name)
d = self.site_port.stopListening()
if isinstance(d, Deferred):
deferreds.append(d)
if len(deferreds) > 0:
d = DeferredList(deferreds)
d.addCallback(self._shutdownCallback)
return d
else:
return self._shutdownCallback(None)
def _shutdownCallback(self, data):
return BaseServer.shutdown(self)
def makeCallable(self, func, interval=0, name=None, expose=False):
function_name = BaseServer.makeCallable(
self,
func,
interval=interval,
name=name,
expose=expose)
if expose:
self.exposed_functions.append(function_name)
er = ExposedResource(self, function_name)
function_name_parts = function_name.split("/")
if len(function_name_parts) > 1:
if function_name_parts[0] in self.exposed_function_resources:
r = self.exposed_function_resources[function_name_parts[0]]
else:
r = Resource()
self.exposed_function_resources[function_name_parts[0]] = r
self.function_resource.putChild(function_name_parts[0], r)
r.putChild(function_name_parts[1], er)
else:
self.function_resource.putChild(function_name_parts[0], er)
LOGGER.info("Function %s is now available via the HTTP interface." % function_name)
def createReservation(self, function_name, **kwargs):
if not isinstance(function_name, str):
for key in self.functions:
if self.functions[key]["function"] == function_name:
function_name = key
break
if function_name not in self.functions:
raise Exception("Function %s does not exist." % function_name)
function = self.functions[function_name]
filtered_kwargs = {}
for key in function["required_arguments"]:
if key in kwargs:
#filtered_kwargs[key] = convertToUTF8(kwargs[key])
filtered_kwargs[key] = kwargs[key]
else:
raise Exception("Required parameter '%s' not found. Required parameters are %s. Optional parameters are %s." % (key, function["required_arguments"], function["optional_arguments"]))
for key in function["optional_arguments"]:
if key in kwargs:
#filtered_kwargs[key] = convertToUTF8(kwargs[key])
filtered_kwargs[key] = kwargs[key]
if function["interval"] > 0:
reserved_arguments = {}
reserved_arguments["reservation_function_name"] = function_name
reserved_arguments["reservation_created"] = sdb_now(offset=self.time_offset)
reserved_arguments["reservation_next_request"] = reserved_arguments["reservation_created"]
reserved_arguments["reservation_error"] = "0"
arguments = {}
arguments.update(reserved_arguments)
arguments.update(filtered_kwargs)
uuid = uuid4().hex
LOGGER.debug("Creating reservation on SimpleDB for %s, %s." % (function_name, uuid))
a = self.sdb.putAttributes(self.aws_sdb_reservation_domain, uuid, arguments)
a.addCallback(self._createReservationCallback, function_name, uuid)
a.addErrback(self._createReservationErrback, function_name, uuid)
if "call_immediately" in kwargs and not evaluateBoolean(kwargs["call_immediately"]):
d = DeferredList([a], consumeErrors=True)
else:
LOGGER.debug("Calling %s immediately with arguments:\n%s" % (function_name, PRETTYPRINTER.pformat(filtered_kwargs)))
self.active_jobs[uuid] = True
b = self.callExposedFunction(function["function"], filtered_kwargs, function_name, uuid=uuid)
d = DeferredList([a,b], consumeErrors=True)
d.addCallback(self._createReservationCallback2, function_name, uuid)
d.addErrback(self._createReservationErrback2, function_name, uuid)
return d
else:
LOGGER.debug("Calling %s with arguments:\n%s" % (function_name, PRETTYPRINTER.pformat(filtered_kwargs)))
d = self.callExposedFunction(function["function"], filtered_kwargs, function_name)
return d
def _createReservationCallback(self, data, function_name, uuid):
LOGGER.error(data)
LOGGER.debug("Created reservation on SimpleDB for %s, %s." % (function_name, uuid))
return uuid
def _createReservationErrback(self, error, function_name, uuid):
LOGGER.error("Unable to create reservation on SimpleDB for %s:%s, %s.\n" % (function_name, uuid, error))
return error
def _createReservationCallback2(self, data, function_name, uuid):
for row in data:
if row[0] == False:
raise row[1]
if len(data) == 1:
return {data[0][1]:{}}
else:
return {data[0][1]:data[1][1]}
def _createReservationErrback2(self, error, function_name, uuid):
LOGGER.error("Unable to create reservation for %s:%s, %s.\n" % (function_name, uuid, error))
return error
def showReservation(self, uuid):
d = self.sdb.getAttributes(self.aws_sdb_reservation_domain, uuid)
return d
def executeReservation(self, uuid):
sql = "SELECT * FROM `%s` WHERE itemName() = '%s'" % (self.aws_sdb_reservation_domain, uuid)
LOGGER.debug("Querying SimpleDB, \"%s\"" % sql)
d = self.sdb.select(sql)
d.addCallback(self._executeReservationCallback)
d.addErrback(self._executeReservationErrback)
return d
def _executeReservationCallback(self, data):
if len(data) == 0:
raise Exception("Could not find reservation.")
uuid = data.keys()[0]
kwargs_raw = {}
reserved_arguments = {}
# Load attributes into dicts for use by the system or custom functions.
for key in data[uuid]:
if key in self.reserved_arguments:
reserved_arguments[key] = data[uuid][key][0]
else:
kwargs_raw[key] = data[uuid][key][0]
# Check to make sure the custom function is present.
function_name = reserved_arguments["reservation_function_name"]
if function_name not in self.functions:
raise Exception("Unable to process function %s for UUID: %s" % (function_name, uuid))
return
# Check for the presence of all required system attributes.
if "reservation_function_name" not in reserved_arguments:
self.deleteReservation(uuid)
raise Exception("Reservation %s does not have a function name." % uuid)
if "reservation_created" not in reserved_arguments:
self.deleteReservation(uuid, function_name=function_name)
raise Exception("Reservation %s, %s does not have a created time." % (function_name, uuid))
if "reservation_next_request" not in reserved_arguments:
self.deleteReservation(uuid, function_name=function_name)
raise Exception("Reservation %s, %s does not have a next request time." % (function_name, uuid))
if "reservation_error" not in reserved_arguments:
self.deleteReservation(uuid, function_name=function_name)
raise Exception("Reservation %s, %s does not have an error flag." % (function_name, uuid))
# Load custom function.
if function_name in self.functions:
exposed_function = self.functions[function_name]
else:
raise Exception("Could not find function %s." % function_name)
return
# Check for required / optional arguments.
kwargs = {}
for key in kwargs_raw:
if key in exposed_function["required_arguments"]:
kwargs[key] = kwargs_raw[key]
if key in exposed_function["optional_arguments"]:
kwargs[key] = kwargs_raw[key]
has_reqiured_arguments = True
for key in exposed_function["required_arguments"]:
if key not in kwargs:
has_reqiured_arguments = False
raise Exception("%s, %s does not have required argument %s." % (function_name, uuid, key))
LOGGER.debug("Executing function.\n%s" % function_name)
return self.callExposedFunction(exposed_function["function"], kwargs, function_name, uuid=uuid)
def _executeReservationErrback(self, error):
LOGGER.error("Unable to query SimpleDB.\n%s" % error)
| python |
# Generated by Django 3.0.6 on 2020-05-25 10:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sim', '0007_game_cost'),
]
operations = [
migrations.AddField(
model_name='game',
name='budget',
field=models.IntegerField(default=0),
),
]
| python |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-09-27 18:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('magic_cards', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='card',
name='loyalty',
field=models.SmallIntegerField(blank=True, null=True),
),
]
| python |
from ursinanetworking import *
from easyursinanetworking import *
Server = UrsinaNetworkingServer("localhost", 25565)
Easy = EasyUrsinaNetworkingServer(Server)
Easy.create_replicated_variable("MyVariable", {"name" : "kevin"})
Easy.update_replicated_variable_by_name("MyVariablee", "name", "jean")
Easy.remove_replicated_variable_by_name("MyVariablee")
while True:
Easy.process_net_events() | python |
from PIL import Image, ImageDraw, ImageFont
from pkg_resources import resource_exists, resource_filename, cleanup_resources
def watermark_image(image, wtrmrk_path, corner=2):
'''Adds a watermark image to an instance of a PIL Image.
If the provided watermark image (wtrmrk_path) is
larger than the provided base image (image), then
the watermark image will be automatically resized to
roughly 1/8 the size of the base image.
Args:
image: An instance of a PIL Image. This is the base image.
wtrmrk_path: Path to the watermark image to use.
corner: An integer between 0 and 3 representing the corner
where the watermark image should be placed on top of the
base image. 0 is top left, 1 is top right, 2 is bottom
right and 3 is bottom left. NOTE: Right now, this is
permanently set to 2 (bottom right) but this can be
changed in the future by either creating a new cmd-line
flag or putting this in the config file.
Returns: The watermarked image
'''
padding = 2
wtrmrk_img = Image.open(wtrmrk_path)
#Need to perform size check in here rather than in options.py because this is
# the only place where we know the size of the image that the watermark is
# being placed onto
if wtrmrk_img.width > (image.width - padding * 2) or wtrmrk_img.height > (
image.height - padding * 2):
res = (int(image.width / 8.0), int(image.height / 8.0))
resize_in_place(wtrmrk_img, res)
pos = get_pos(corner, image.size, wtrmrk_img.size, padding)
was_P = image.mode == 'P'
was_L = image.mode == 'L'
# Fix PIL palette issue by converting palette images to RGBA
if image.mode not in ['RGB', 'RGBA']:
if image.format in ['JPG', 'JPEG']:
image = image.convert('RGB')
else:
image = image.convert('RGBA')
image.paste(wtrmrk_img.convert('RGBA'), pos, wtrmrk_img.convert('RGBA'))
if was_P:
image = image.convert('P', palette=Image.ADAPTIVE, colors=256)
elif was_L:
image = image.convert('L')
return image
def watermark_text(image, text, corner=2):
'''Adds a text watermark to an instance of a PIL Image.
The text will be sized so that the height of the text is
roughly 1/20th the height of the base image. The text will
be white with a thin black outline.
Args:
image: An instance of a PIL Image. This is the base image.
text: Text to use as a watermark.
corner: An integer between 0 and 3 representing the corner
where the watermark image should be placed on top of the
base image. 0 is top left, 1 is top right, 2 is bottom
right and 3 is bottom left. NOTE: Right now, this is
permanently set to 2 (bottom right) but this can be
changed in the future by either creating a new cmd-line
flag or putting this in the config file.
Returns: The watermarked image
'''
# Load Font
FONT_PATH = ''
if resource_exists(__name__, 'resources/fonts/SourceSansPro-Regular.ttf'):
FONT_PATH = resource_filename(
__name__, 'resources/fonts/SourceSansPro-Regular.ttf')
padding = 5
was_P = image.mode == 'P'
was_L = image.mode == 'L'
# Fix PIL palette issue by converting palette images to RGBA
if image.mode not in ['RGB', 'RGBA']:
if image.format in ['JPG', 'JPEG']:
image = image.convert('RGB')
else:
image = image.convert('RGBA')
# Get drawable image
img_draw = ImageDraw.Draw(image)
fontsize = 1 # starting font size
# portion of image width you want text height to be.
# default font size will have a height that is ~1/20
# the height of the base image.
img_fraction = 0.05
# attempt to use Aperture default font. If that fails, use ImageFont default
try:
font = ImageFont.truetype(font=FONT_PATH, size=fontsize)
was_over = False
inc = 2
while True:
if font.getsize(text)[1] > img_fraction * image.height:
if not was_over:
was_over = True
inc = -1
else:
if was_over:
break
# iterate until the text size is just larger than the criteria
fontsize += inc
font = ImageFont.truetype(font=FONT_PATH, size=fontsize)
fontsize -= 1
font = ImageFont.truetype(font=FONT_PATH, size=fontsize)
except:
# replace with log message
print('Failed to load Aperture font. Using default font instead.')
font = ImageFont.load_default() # Bad because default is suuuuper small
# get position of text
pos = get_pos(corner, image.size, font.getsize(text), padding)
# draw a thin black border
img_draw.text((pos[0] - 1, pos[1]), text, font=font, fill='black')
img_draw.text((pos[0] + 1, pos[1]), text, font=font, fill='black')
img_draw.text((pos[0], pos[1] - 1), text, font=font, fill='black')
img_draw.text((pos[0], pos[1] + 1), text, font=font, fill='black')
# draw the actual text
img_draw.text(pos, text, font=font, fill='white')
# Remove cached font file
cleanup_resources()
del img_draw
if was_P:
image = image.convert('P', palette=Image.ADAPTIVE, colors=256)
elif was_L:
image = image.convert('L')
return image
# Internal method
def resize_in_place(image, res):
image.thumbnail(res)
# Internal method
def get_pos(corner, main_size, sub_size, padding):
if (corner == 0): #top left
position = (padding, padding)
elif (corner == 1): #top right
position = ((main_size[0] - sub_size[0] - padding), padding)
elif (corner == 3): #bottom left
position = (padding, (main_size[1] - sub_size[1] - padding))
else: #bottom right (default)
position = ((main_size[0] - sub_size[0] - padding),
(main_size[1] - sub_size[1] - padding))
return position | python |
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import json
import pprint
import requests
import sys
import urllib
import sqlalchemy
from sqlalchemy import *
import pymysql
from coffeeshop import CoffeeShop
from configparser import SafeConfigParser
pymysql.install_as_MySQLdb()
# This client code can run on Python 2.x or 3.x. Your imports can be
# simpler if you only need one of those.
try:
# For Python 3.0 and later
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.parse import urlencode
except ImportError:
# Fall back to Python 2's urllib2 and urllib
from urllib2 import HTTPError
from urllib import quote
from urllib import urlencode
# read congig file for secrets
parser = SafeConfigParser()
parser.read('config.ini')
# wrapper function for parsing config file
def my_parser(section, option):
return str(parser.get(section, option).encode('ascii','ignore').decode('utf-8'))
# Yelp Fusion no longer uses OAuth as of December 7, 2017.
# You no longer need to provide Client ID to fetch Data
# It now uses private keys to authenticate requests (API Key)
# You can find it on
# https://www.yelp.com/developers/v3/manage_app
API_KEY = my_parser('coffeeshops', 'API_KEY')
# API constants, you shouldn't have to change these.
API_HOST = 'https://api.yelp.com'
SEARCH_PATH = '/v3/businesses/search'
BUSINESS_PATH = '/v3/businesses/' # Business ID will come after slash.
# Defaults for our simple example.
DEFAULT_TERM = 'coffee'
DEFAULT_LOCATION = 'Austin, TX'
SEARCH_LIMIT = 27
# called in #3 and #6
def request(host, path, api_key, url_params=None):
"""Given your API_KEY, send a GET request to the API.
Args:
host (str): The domain host of the API.
path (str): The path of the API after the domain.
API_KEY (str): Your API Key.
url_params (dict): An optional set of query parameters in the request.
Returns:
dict: The JSON response from the request.
Raises:
HTTPError: An error occurs from the HTTP request.
"""
url_params = url_params or {}
url = '{0}{1}'.format(host, quote(path.encode('utf8')))
headers = {
'Authorization': 'Bearer %s' % api_key,
}
response = requests.request('GET', url, headers=headers, params=url_params)
return response.json()
#5
def get_business(business_id, coffeeshop):
"""Query the Business API by a business ID.
Args:
business_id (str): The ID of the business to query.
Returns:
dict: The JSON response from the request.
"""
global API_KEY
business_path = BUSINESS_PATH + business_id
response = request(API_HOST, business_path, API_KEY)
pprint.pprint(response, indent=2)
hours = "Hours Not Found"
if(('hours' in response)) :
hours = response["hours"]
location = "Location Not Found"
if(('location' in response)) :
location = response["location"]["display_address"]
latitude = "Latitude Not Found"
if(('coordinates' in response)) :
latitude = response["coordinates"]["latitude"]
longitude = "Longitude Not Found"
if(('coordinates' in response)) :
longitude = response["coordinates"]["longitude"]
contact = "No Contact Info"
if(('contact' in response)) :
contact = response["display_phone"]
coffeeshop.location = location
coffeeshop.latitude = latitude
coffeeshop.longitude = longitude
coffeeshop.hours = hours
coffeeshop.phone = contact
#4
def coffee_shop_results(response):
'''
Parse JSON Object, iterate through results and create coffeeshop object for each
coffeeshop in JSOM object
return the list of coffee shops
'''
list_shops = []
for obj in response["businesses"] :
if(obj is not None) :
price = "Price Not Found"
if(('price' in obj)) :
price = obj["price"]
rating = "No Ratings"
if(('rating' in obj)) :
rating = obj["rating"]
img_url = "No Image Found"
if(('image_url' in obj)) :
img_url = obj["image_url"]
coffeeshop = CoffeeShop(obj["name"],
obj["id"],
"n/a",
price,
rating,
img_url,
"n/a")
get_business(coffeeshop.id, coffeeshop)
list_shops.append(coffeeshop)
return list_shops
#3
def search(api_key, term, location):
"""Query the Search API by a search term and location.
Args:
term (str): The search term passed to the API.
location (str): The search location passed to the API.
Returns:
dict: The JSON response from the request.
"""
url_params = {
'term': term.replace(' ', '+'),
'location': location.replace(' ', '+'),
'limit': SEARCH_LIMIT
}
return request(API_HOST, SEARCH_PATH, api_key, url_params=url_params)
#2
def query_api(term, location):
"""Queries the API by the input values from the user.
Args:
term (str): The search term to query.
location (str): The location of the business to query.
"""
response = search(API_KEY, term, location)
businesses = response.get('businesses')
if not businesses:
print(u'No businesses for {0} in {1} found.'.format(term, location))
return
coffee_shops = coffee_shop_results(response)
return coffee_shops
# get DB creds
user = my_parser('database', 'user')
pwd = my_parser('database', 'pwd')
host = my_parser('database', 'host')
db = my_parser('database', 'db')
uri = 'mysql://%s:%s@%s/%s' % (user, pwd, host, db)
#1
def main():
'''
Requests the coffeeshops by each city and stores them in our mySQL db.
'''
try:
db = create_engine(uri)
metadata = MetaData()
metadata.reflect(bind=db)
conn = db.connect()
select_st = select([metadata.tables['Cities']])
res = conn.execute(select_st)
for _row in res:
print (_row[1])
coffee_shops = query_api('coffee', _row[1])
for shop in coffee_shops :
ins = insert(metadata.tables['Shops']).values(
shop_name = bytes(shop.name, 'utf8'),
shop_address = bytes(shop.location, 'utf8'),
shop_contact = bytes(shop.phone, 'utf8'),
shop_price = bytes(shop.price, 'utf8'),
shop_hours = bytes(shop.hours, 'utf8'),
shop_rating = shop.rating,
shop_picture = bytes(shop.imageUrl, 'utf8'),
shop_latitude = shop.latitude,
shop_longitude = shop.longitude,
city_id = _row[0]
)
conn = db.connect()
conn.execute(ins)
except HTTPError as error:
sys.exit(
'Encountered HTTP error {0} on {1}:\n {2}\nAbort program.'.format(
error.code,
error.url,
error.read(),
)
)
if __name__ == '__main__':
main()
| python |
import deeplift
import numpy as np
def deeplift_zero_ref(X,score_func,batch_size=200,task_idx=0):
# use a 40% GC reference
input_references = [np.array([0.0, 0.0, 0.0, 0.0])[None, None, None, :]]
# get deeplift scores
deeplift_scores = score_func(
task_idx=task_idx,
input_data_list=[X],
batch_size=batch_size,
progress_update=None,
input_references_list=input_references)
return deeplift_scores
def deeplift_gc_ref(X,score_func,batch_size=200,task_idx=0):
# use a 40% GC reference
input_references = [np.array([0.3, 0.2, 0.2, 0.3])[None, None, None, :]]
# get deeplift scores
deeplift_scores = score_func(
task_idx=task_idx,
input_data_list=[X],
batch_size=batch_size,
progress_update=None,
input_references_list=input_references)
return deeplift_scores
def deeplift_shuffled_ref(X,score_func,batch_size=200,task_idx=0,num_refs_per_seq=10):
deeplift_scores=score_func(
task_idx=task_idx,
input_data_sequences=X,
num_refs_per_seq=num_refs_per_seq,
batch_size=batch_size)
return deeplift_scores
def get_deeplift_scoring_function(model,target_layer_idx=-2,task_idx=0, num_refs_per_seq=10,reference="shuffled_ref",one_hot_func=None):
"""
Arguments:
model -- a string containing the path to the hdf5 exported model
target_layer_idx -- Layer in the model whose outputs will be interpreted. For classification models we \
interpret the logit (input to the sigmoid), which is the output of layer -2.
For regression models we intepret the model output, which is the output of layer -1.
reference -- one of 'shuffled_ref','gc_ref','zero_ref'
one_hot_func -- one hot function to use for encoding FASTA string inputs; if the inputs are already one-hot-encoded, use the default of None
Returns:
deepLIFT scoring function
"""
assert reference in ["shuffled_ref","gc_ref","zero_ref"]
from deeplift.conversion import kerasapi_conversion as kc
deeplift_model = kc.convert_model_from_saved_files(model,verbose=False)
#get the deeplift score with respect to the logit
score_func = deeplift_model.get_target_contribs_func(
find_scores_layer_idx=0,
target_layer_idx=target_layer_idx)
if reference=="shuffled_ref":
from deeplift.util import get_shuffle_seq_ref_function
from deeplift.dinuc_shuffle import dinuc_shuffle
score_func=get_shuffle_seq_ref_function(
score_computation_function=score_func,
shuffle_func=dinuc_shuffle,
one_hot_func=one_hot_func)
return score_func
def deeplift(score_func, X, batch_size=200,task_idx=0, num_refs_per_seq=10,reference="shuffled_ref",one_hot_func=None):
"""
Arguments:
score_func -- deepLIFT scoring function
X -- numpy array with shape (n_samples, 1, n_bases_in_sample,4) or list of FASTA sequences
batch_size -- number of samples to interpret at once
task_idx -- index indicating which task to perform interpretation on
reference -- one of 'shuffled_ref','gc_ref','zero_ref'
num_refs_per_seq -- integer indicating number of references to use for each input sequence if the reference is set to 'shuffled_ref';if 'zero_ref' or 'gc_ref' is used, this argument is ignored.
one_hot_func -- one hot function to use for encoding FASTA string inputs; if the inputs are already one-hot-encoded, use the default of None
Returns:
(num_task, num_samples, 1, num_bases, sequence_length) deeplift score array.
"""
assert reference in ["shuffled_ref","gc_ref","zero_ref"]
if one_hot_func==None:
#check that dataset has been one-hot-encoded
assert len(np.shape(X)) == 4 and np.shape(X)[1] == 1
if reference=="shuffled_ref":
deeplift_scores=deeplift_shuffled_ref(X,score_func,batch_size,task_idx,num_refs_per_seq)
elif reference=="gc_ref":
deeplift_scores=deeplift_gc_ref(X,score_func,batch_size,task_idx)
elif reference=="zero_ref":
deeplift_scores=deeplift_zero_ref(X,score_func,batch_size,task_idx)
else:
raise Exception("supported DeepLIFT references are 'shuffled_ref','gc_ref', 'zero_ref'")
return np.asarray(deeplift_scores)
| python |
from utils import utils
from enums.enums import MediusEnum, RtIdEnum, MediusChatMessageType
from medius.mediuspackets.chatfwdmessage import ChatFwdMessageSerializer
import logging
logger = logging.getLogger('robo.chat')
class ChatCommands:
def __init__(self):
pass
def process_chat(self, player, text):
self._set_agg_time(player, text)
def _set_agg_time(self, player, text):
if "!tagg" in text or "!uagg" in text:
try:
text_split = text.split()
agg_time = int(text_split[1])
if text_split[0] == '!tagg':
player.set_dmetcp_aggtime(agg_time * 0.001)
resp_text = f'0TCP Agg set to {agg_time}ms. WARNING: Experimental mod'
else:
player.set_dmeudp_aggtime(agg_time * 0.001)
resp_text = f'0UDP Agg set to {agg_time}ms. WARNING: Experimental mod'
# Send the player a whisper
packet = [{'name': 'Server app'}, {'rtid': RtIdEnum.SERVER_APP}]
packet.append({'payload':ChatFwdMessageSerializer.build(utils.str_to_bytes("",MediusEnum.MESSAGEID_MAXLEN),
0, "SYSTEM", MediusChatMessageType.WHISPER, utils.str_to_bytes(resp_text, MediusEnum.CHATMESSAGE_MAXLEN))})
packet = utils.rtpacket_to_bytes(packet)
player.send_mls(packet)
except:
logger.exception('error')
| python |
# -*- coding: utf-8 -*-
# Python import
import sys
# Local import
import settings
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
#following from Python cookbook, #475186
def has_colors(stream):
if not hasattr(stream, "isatty") or not stream.isatty():
return False
try:
import curses
curses.setupterm()
return curses.tigetnum("colors") > 2
except:
return False
has_colors = has_colors(sys.stdout)
def printout(text, color=WHITE):
if has_colors:
seq = "\x1b[1;%dm" % (30+color) + text + "\x1b[0m"
return seq
else:
return text
class LocalPrinter:
"""
Print all outputs on standard output, with all the colors and stuff
"""
def __init__(self, verbosity):
self.verbosity = verbosity
def printErrorNoSetFound(self):
"""
Print 'ErrorNoSetFound' error message
"""
print printout(settings.strings['errorNoSetFound'], settings.colors['errors'])
def printIntro(self):
"""
Print the intro sentence, before testing starts
"""
print printout(settings.strings['intro'], settings.colors['intro'])
def printSetIntro(self, u):
"""
Print the set intro sentence, before the beginning of each test set
"""
if self.verbosity > 0:
print printout(u.__class__.__name__ + ': ' + u.__doc__, settings.colors['setIntro'])
def printTestOutput(self, data, doc):
"""
Print the output of a test
"""
if data['success']:
success = printout(settings.strings['testSuccess'], settings.colors['testSuccess'])
else:
success = printout(settings.strings['testFailure'], settings.colors['testFailure'])
output = settings.strings['testOutputFormat'].format(success=success, return_code=data['code'], elapsed=data['elapsed'], doc=doc)
if self.verbosity > 1:
print output
def printTestDirtyFailure(self, data):
"""
Print the output of a dirty failed test (aka Exception was thrown during test execution)
"""
output = printout(settings.strings['testDirtyFailure'], settings.colors['testDirtyFailure']) + str(data['exception'])
if self.verbosity > 1:
print output
def printSetResult(self, test_set, nb_tests, nb_ok, total_response_time):
"""
Print set results, after the end of each test set
"""
if self.verbosity > 0:
percent = int(100 * (float(nb_ok) / float(nb_tests)))
print printout(
settings.strings['setResult'].format(nb_tests_passed=nb_ok,
nb_tests_total=nb_tests,
percent=percent,
className=test_set.__class__.__name__),
settings.colors['setResult'])
def printTotalResult(self, nb_tests, nb_ok, total_response_time):
"""
Print total results, after the end of all test sets
"""
percent = int(100 * (float(nb_ok) / float(nb_tests)))
print printout(
settings.strings['totalResult'].format(nb_tests_passed=nb_ok,
nb_tests_total=nb_tests,
percent=percent),
settings.colors['totalResult'])
if percent == 100:
print printout(settings.strings['buildOk'], settings.colors['buildOk'])
else:
print printout(settings.strings['buildKo'], settings.colors['buildKo'])
| python |
# Copyright 2021 Beijing DP Technology Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods for inferencing with Uni-Fold."""
from absl import logging
import json
import os
import numpy as np
import pickle
import time
from typing import Dict, Optional
from unifold.common import protein
from unifold.data.pipeline import DataPipeline
from unifold.model.features import FeatureDict
from unifold.model.model import RunModel
from unifold.relax.relax import AmberRelaxation
def generate_pkl_features_from_fasta(
fasta_path: str,
name: str,
output_dir: str,
data_pipeline: DataPipeline,
timings: Optional[Dict[str, float]] = None):
"""Predicts structure using Uni-Fold for the given sequence."""
if timings is None:
timings = {}
# Check output dir.
output_dir = os.path.join(output_dir, name)
# TODO: temp change for the feature generation, remember to fix this
if not os.path.exists(output_dir):
os.makedirs(output_dir)
else:
if os.path.exists(os.path.join(output_dir, "timings.json")):
print(f"skip {fasta_path}")
return
msa_output_dir = os.path.join(output_dir, 'msas')
if not os.path.exists(msa_output_dir):
os.makedirs(msa_output_dir)
# Get features.
pt = time.time()
logging.info(f"processing file {fasta_path}...")
features = data_pipeline.process(
input_fasta_path=fasta_path,
msa_output_dir=msa_output_dir)
timings['data_pipeline'] = time.time() - pt
# Write out features as a pickled dictionary.
features_output_path = os.path.join(output_dir, 'features.pkl')
with open(features_output_path, 'wb') as f:
pickle.dump(features, f, protocol=4)
logging.info(f"process file {fasta_path} done.")
# Save timings.
timings_output_path = os.path.join(output_dir, 'timings.json')
with open(timings_output_path, 'w') as fp:
json.dump(timings, fp, indent=4)
return features
def predict_from_pkl(
features: FeatureDict,
name: str,
output_dir: str,
model_runners: Dict[str, RunModel],
amber_relaxer: Optional[AmberRelaxation],
random_seed: int,
benchmark: bool = False,
dump_pickle: bool = True,
timings: Optional[Dict[str, float]] = None):
"""Predicts structure using Uni-Fold for the given features."""
if not timings:
timings = {}
output_dir = os.path.join(output_dir, name)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_pdbs = {}
plddts = {}
# Run the models.
for model_name, model_runner in model_runners.items():
logging.info(f"Running model {model_name} ...")
# Process features.
pt = time.time()
processed_features = model_runner.process_features(
features, random_seed=random_seed)
timings[f'process_features_{model_name}'] = time.time() - pt
# Run the prediction code.
pt = time.time()
prediction_result = model_runner.predict(processed_features)
t_diff = time.time() - pt
timings[f'predict_and_compile_{model_name}'] = t_diff
logging.info(f"Total JAX model {model_name} predict time (compilation "
f"included): {t_diff:.0f}.")
# If benchmarking, re-run to test JAX running time without compilation.
if benchmark:
pt = time.time()
model_runner.predict(processed_features)
timings[f'predict_benchmark_{model_name}'] = time.time() - pt
# Save the model outputs in pickle format.
if dump_pickle:
result_output_path = os.path.join(output_dir, f'result_{model_name}.pkl')
with open(result_output_path, 'wb') as fp:
pickle.dump(prediction_result, fp, protocol=4)
# Save residue-wise pLDDT.
plddt_out_path = os.path.join(output_dir, f'res_plddt_{model_name}.txt')
np.savetxt(plddt_out_path, prediction_result['plddt'])
# Get mean pLDDT confidence metric.
plddts[model_name] = np.mean(prediction_result['plddt'])
# Get and save unrelaxed protein.
unrelaxed_protein = protein.from_prediction(processed_features,
prediction_result)
unrelaxed_pdb_path = os.path.join(output_dir, f'unrelaxed_{model_name}.pdb')
unrelaxed_pdb_str = protein.to_pdb(unrelaxed_protein)
with open(unrelaxed_pdb_path, 'w') as fp:
fp.write(unrelaxed_pdb_str)
# Relax the prediction.
if amber_relaxer is not None:
# Run the relaxation.
pt = time.time()
relaxed_pdb_str, _, _ = amber_relaxer.process(prot=unrelaxed_protein)
timings[f'relax_{model_name}'] = time.time() - pt
# Save the relaxed PDB.
output_pdbs[model_name] = relaxed_pdb_str
relaxed_output_path = os.path.join(output_dir, f'relaxed_{model_name}.pdb')
with open(relaxed_output_path, 'w') as fp:
fp.write(relaxed_pdb_str)
else:
output_pdbs[model_name] = unrelaxed_pdb_str
# Rank by pLDDT and write out PDBs in rank order.
ranked_order = []
for idx, (model_name, _) in enumerate(
sorted(plddts.items(), key=lambda x: x[1], reverse=True)):
ranked_order.append(model_name)
ranked_output_path = os.path.join(output_dir, f'ranked_{idx}.pdb')
with open(ranked_output_path, 'w') as fp:
fp.write(output_pdbs[model_name])
ranking_output_path = os.path.join(output_dir, 'ranking_debug.json')
with open(ranking_output_path, 'w') as fp:
json.dump({'plddts': plddts, 'order': ranked_order}, fp, indent=4)
logging.info(f"Final timings for {name}: {timings}")
timings_output_path = os.path.join(output_dir, 'timings.json')
with open(timings_output_path, 'w') as fp:
json.dump(timings, fp, indent=4)
return output_pdbs, plddts
def predict_from_fasta(
fasta_path: str,
name: str,
output_dir: str,
data_pipeline: DataPipeline,
model_runners: Dict[str, RunModel],
amber_relaxer: Optional[AmberRelaxation],
random_seed: int,
benchmark: bool = False,
dump_pickle: bool = True,
timings: Optional[Dict[str, float]] = None): # kwargs are passed to predict_from_pkl.
"""Predicts structure using Uni-Fold for the given fasta file: """
"""generates a features.pkl file and then calls predict_from_pkl."""
timings = {}
# generate feature dict
features = generate_pkl_features_from_fasta(
fasta_path=fasta_path,
name=name,
output_dir=output_dir,
data_pipeline=data_pipeline,
timings=timings)
output_pdbs, plddts = predict_from_pkl(
features=features,
name=name,
output_dir=output_dir,
model_runners=model_runners,
amber_relaxer=amber_relaxer,
random_seed=random_seed,
benchmark=benchmark,
dump_pickle=dump_pickle,
timings=timings)
return features, output_pdbs, plddts
| python |
def disemvowel(string):
return "".join(i for i in string if not (i.lower() in "aeiou")) | python |
"""Tests for Broadlink devices."""
from unittest.mock import patch
import broadlink.exceptions as blke
from openpeerpower.components.broadlink.const import DOMAIN
from openpeerpower.components.broadlink.device import get_domains
from openpeerpower.config_entries import ConfigEntryState
from openpeerpower.helpers.entity_registry import async_entries_for_device
from . import get_device
from tests.common import mock_device_registry, mock_registry
async def test_device_setup(opp):
"""Test a successful setup."""
device = get_device("Office")
with patch.object(
opp.config_entries, "async_forward_entry_setup"
) as mock_forward, patch.object(opp.config_entries.flow, "async_init") as mock_init:
mock_api, mock_entry = await device.setup_entry(opp)
assert mock_entry.state == ConfigEntryState.LOADED
assert mock_api.auth.call_count == 1
assert mock_api.get_fwversion.call_count == 1
forward_entries = {c[1][1] for c in mock_forward.mock_calls}
domains = get_domains(mock_api.type)
assert mock_forward.call_count == len(domains)
assert forward_entries == domains
assert mock_init.call_count == 0
async def test_device_setup_authentication_error(opp):
"""Test we handle an authentication error."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.auth.side_effect = blke.AuthenticationError()
with patch.object(
opp.config_entries, "async_forward_entry_setup"
) as mock_forward, patch.object(opp.config_entries.flow, "async_init") as mock_init:
mock_api, mock_entry = await device.setup_entry(opp, mock_api=mock_api)
assert mock_entry.state == ConfigEntryState.SETUP_ERROR
assert mock_api.auth.call_count == 1
assert mock_forward.call_count == 0
assert mock_init.call_count == 1
assert mock_init.mock_calls[0][2]["context"]["source"] == "reauth"
assert mock_init.mock_calls[0][2]["data"] == {
"name": device.name,
**device.get_entry_data(),
}
async def test_device_setup_network_timeout(opp):
"""Test we handle a network timeout."""
device = get_device("Office")
mock_api = device.get_mock_api()
mock_api.auth.side_effect = blke.NetworkTimeoutError()
with patch.object(
opp.config_entries, "async_forward_entry_setup"
) as mock_forward, patch.object(opp.config_entries.flow, "async_init") as mock_init:
mock_api, mock_entry = await device.setup_entry(opp, mock_api=mock_api)
assert mock_entry.state is ConfigEntryState.SETUP_RETRY
assert mock_api.auth.call_count == 1
assert mock_forward.call_count == 0
assert mock_init.call_count == 0
async def test_device_setup_os_error(opp):
"""Test we handle an OS error."""
device = get_device("Office")
mock_api = device.get_mock_api()
mock_api.auth.side_effect = OSError()
with patch.object(
opp.config_entries, "async_forward_entry_setup"
) as mock_forward, patch.object(opp.config_entries.flow, "async_init") as mock_init:
mock_api, mock_entry = await device.setup_entry(opp, mock_api=mock_api)
assert mock_entry.state is ConfigEntryState.SETUP_RETRY
assert mock_api.auth.call_count == 1
assert mock_forward.call_count == 0
assert mock_init.call_count == 0
async def test_device_setup_broadlink_exception(opp):
"""Test we handle a Broadlink exception."""
device = get_device("Office")
mock_api = device.get_mock_api()
mock_api.auth.side_effect = blke.BroadlinkException()
with patch.object(
opp.config_entries, "async_forward_entry_setup"
) as mock_forward, patch.object(opp.config_entries.flow, "async_init") as mock_init:
mock_api, mock_entry = await device.setup_entry(opp, mock_api=mock_api)
assert mock_entry.state is ConfigEntryState.SETUP_ERROR
assert mock_api.auth.call_count == 1
assert mock_forward.call_count == 0
assert mock_init.call_count == 0
async def test_device_setup_update_network_timeout(opp):
"""Test we handle a network timeout in the update step."""
device = get_device("Office")
mock_api = device.get_mock_api()
mock_api.check_sensors.side_effect = blke.NetworkTimeoutError()
with patch.object(
opp.config_entries, "async_forward_entry_setup"
) as mock_forward, patch.object(opp.config_entries.flow, "async_init") as mock_init:
mock_api, mock_entry = await device.setup_entry(opp, mock_api=mock_api)
assert mock_entry.state is ConfigEntryState.SETUP_RETRY
assert mock_api.auth.call_count == 1
assert mock_api.check_sensors.call_count == 1
assert mock_forward.call_count == 0
assert mock_init.call_count == 0
async def test_device_setup_update_authorization_error(opp):
"""Test we handle an authorization error in the update step."""
device = get_device("Office")
mock_api = device.get_mock_api()
mock_api.check_sensors.side_effect = (
blke.AuthorizationError(),
{"temperature": 30},
)
with patch.object(
opp.config_entries, "async_forward_entry_setup"
) as mock_forward, patch.object(opp.config_entries.flow, "async_init") as mock_init:
mock_api, mock_entry = await device.setup_entry(opp, mock_api=mock_api)
assert mock_entry.state is ConfigEntryState.LOADED
assert mock_api.auth.call_count == 2
assert mock_api.check_sensors.call_count == 2
forward_entries = {c[1][1] for c in mock_forward.mock_calls}
domains = get_domains(mock_api.type)
assert mock_forward.call_count == len(domains)
assert forward_entries == domains
assert mock_init.call_count == 0
async def test_device_setup_update_authentication_error(opp):
"""Test we handle an authentication error in the update step."""
device = get_device("Garage")
mock_api = device.get_mock_api()
mock_api.check_sensors.side_effect = blke.AuthorizationError()
mock_api.auth.side_effect = (None, blke.AuthenticationError())
with patch.object(
opp.config_entries, "async_forward_entry_setup"
) as mock_forward, patch.object(opp.config_entries.flow, "async_init") as mock_init:
mock_api, mock_entry = await device.setup_entry(opp, mock_api=mock_api)
assert mock_entry.state is ConfigEntryState.SETUP_RETRY
assert mock_api.auth.call_count == 2
assert mock_api.check_sensors.call_count == 1
assert mock_forward.call_count == 0
assert mock_init.call_count == 1
assert mock_init.mock_calls[0][2]["context"]["source"] == "reauth"
assert mock_init.mock_calls[0][2]["data"] == {
"name": device.name,
**device.get_entry_data(),
}
async def test_device_setup_update_broadlink_exception(opp):
"""Test we handle a Broadlink exception in the update step."""
device = get_device("Garage")
mock_api = device.get_mock_api()
mock_api.check_sensors.side_effect = blke.BroadlinkException()
with patch.object(
opp.config_entries, "async_forward_entry_setup"
) as mock_forward, patch.object(opp.config_entries.flow, "async_init") as mock_init:
mock_api, mock_entry = await device.setup_entry(opp, mock_api=mock_api)
assert mock_entry.state is ConfigEntryState.SETUP_RETRY
assert mock_api.auth.call_count == 1
assert mock_api.check_sensors.call_count == 1
assert mock_forward.call_count == 0
assert mock_init.call_count == 0
async def test_device_setup_get_fwversion_broadlink_exception(opp):
"""Test we load the device even if we cannot read the firmware version."""
device = get_device("Office")
mock_api = device.get_mock_api()
mock_api.get_fwversion.side_effect = blke.BroadlinkException()
with patch.object(opp.config_entries, "async_forward_entry_setup") as mock_forward:
mock_api, mock_entry = await device.setup_entry(opp, mock_api=mock_api)
assert mock_entry.state is ConfigEntryState.LOADED
forward_entries = {c[1][1] for c in mock_forward.mock_calls}
domains = get_domains(mock_api.type)
assert mock_forward.call_count == len(domains)
assert forward_entries == domains
async def test_device_setup_get_fwversion_os_error(opp):
"""Test we load the device even if we cannot read the firmware version."""
device = get_device("Office")
mock_api = device.get_mock_api()
mock_api.get_fwversion.side_effect = OSError()
with patch.object(opp.config_entries, "async_forward_entry_setup") as mock_forward:
_, mock_entry = await device.setup_entry(opp, mock_api=mock_api)
assert mock_entry.state is ConfigEntryState.LOADED
forward_entries = {c[1][1] for c in mock_forward.mock_calls}
domains = get_domains(mock_api.type)
assert mock_forward.call_count == len(domains)
assert forward_entries == domains
async def test_device_setup_registry(opp):
"""Test we register the device and the entries correctly."""
device = get_device("Office")
device_registry = mock_device_registry(opp)
entity_registry = mock_registry(opp)
_, mock_entry = await device.setup_entry(opp)
await opp.async_block_till_done()
assert len(device_registry.devices) == 1
device_entry = device_registry.async_get_device({(DOMAIN, mock_entry.unique_id)})
assert device_entry.identifiers == {(DOMAIN, device.mac)}
assert device_entry.name == device.name
assert device_entry.model == device.model
assert device_entry.manufacturer == device.manufacturer
assert device_entry.sw_version == device.fwversion
for entry in async_entries_for_device(entity_registry, device_entry.id):
assert entry.original_name.startswith(device.name)
async def test_device_unload_works(opp):
"""Test we unload the device."""
device = get_device("Office")
with patch.object(opp.config_entries, "async_forward_entry_setup"):
mock_api, mock_entry = await device.setup_entry(opp)
with patch.object(
opp.config_entries, "async_forward_entry_unload", return_value=True
) as mock_forward:
await opp.config_entries.async_unload(mock_entry.entry_id)
assert mock_entry.state is ConfigEntryState.NOT_LOADED
forward_entries = {c[1][1] for c in mock_forward.mock_calls}
domains = get_domains(mock_api.type)
assert mock_forward.call_count == len(domains)
assert forward_entries == domains
async def test_device_unload_authentication_error(opp):
"""Test we unload a device that failed the authentication step."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.auth.side_effect = blke.AuthenticationError()
with patch.object(opp.config_entries, "async_forward_entry_setup"), patch.object(
opp.config_entries.flow, "async_init"
):
_, mock_entry = await device.setup_entry(opp, mock_api=mock_api)
with patch.object(
opp.config_entries, "async_forward_entry_unload", return_value=True
) as mock_forward:
await opp.config_entries.async_unload(mock_entry.entry_id)
assert mock_entry.state is ConfigEntryState.NOT_LOADED
assert mock_forward.call_count == 0
async def test_device_unload_update_failed(opp):
"""Test we unload a device that failed the update step."""
device = get_device("Office")
mock_api = device.get_mock_api()
mock_api.check_sensors.side_effect = blke.NetworkTimeoutError()
with patch.object(opp.config_entries, "async_forward_entry_setup"):
_, mock_entry = await device.setup_entry(opp, mock_api=mock_api)
with patch.object(
opp.config_entries, "async_forward_entry_unload", return_value=True
) as mock_forward:
await opp.config_entries.async_unload(mock_entry.entry_id)
assert mock_entry.state is ConfigEntryState.NOT_LOADED
assert mock_forward.call_count == 0
async def test_device_update_listener(opp):
"""Test we update device and entity registry when the entry is renamed."""
device = get_device("Office")
device_registry = mock_device_registry(opp)
entity_registry = mock_registry(opp)
mock_api, mock_entry = await device.setup_entry(opp)
await opp.async_block_till_done()
with patch(
"openpeerpower.components.broadlink.device.blk.gendevice", return_value=mock_api
):
opp.config_entries.async_update_entry(mock_entry, title="New Name")
await opp.async_block_till_done()
device_entry = device_registry.async_get_device({(DOMAIN, mock_entry.unique_id)})
assert device_entry.name == "New Name"
for entry in async_entries_for_device(entity_registry, device_entry.id):
assert entry.original_name.startswith("New Name")
| python |
import time
import numpy as np
import sys
sys.path.append('..//Drivers')
sys.path.append('..//PlotModules')
import math
import csv
import matplotlib.pyplot as plt
from waferscreen.inst_control.Keysight_USB_VNA import USBVNA
#####
# Code which will take an S21 measurement with a Keysight USB VNA (P937XA) and plot it LM and in a Smith Chart
# And then write the data to a file with (freq, s21A, s21B) where A and B are determined by the data_format
#####
outputfilename = "C:\\Users\\jac15\\Code\\VNA\\Data\\test_sweep" # leave extension off, added according to file type
#group delay removel settings
group_delay = 2.787 #nanoseconds
remove_group_delay = True #just removes phase delay
#output format settings
data_format = 'RI' # 'LM' or 'RI' # records this data type in file
output_format = 'TXT' # 'TXT' or 'CSV' or 'BOTH'
plotphase = 1
#User VNA settings
vna_address = "TCPIP0::687JC1::hislip0,4880::INSTR" #go into Keysight GUI, enable HiSlip Interface, find address in SCPI Parser I/O
fcenter = 6 #GHz
fspan = 4000 #MHz
num_freq_points = 201 #number of frequency points to measure at
sweeptype = 'lin' #lin or log in freq space
if_bw = 10 #Hz
ifbw_track = False #ifbw tracking, reduces IFBW at low freq to overcome 1/f noise
port_power = -40 #dBm
vna_avg = 1 #number of averages. if one, set to off
preset_vna = False #preset the VNA? Do if you don't know the state of the VNA ahead of time
##########################################################
####Code begins here######################################
##########################################################
#Set up Network Analyzer
vna = USBVNA(address=vna_address) #"PXI10::0-0.0::INSTR") #"PXI10::CHASSIS1::SLOT1::FUNC0::INSTR"
if preset_vna:
vna.preset()
vna.setup_thru()
vna.set_cal(calstate = 'OFF') # get raw S21 data
vna.set_freq_center(center = fcenter, span = fspan/1000.0)
vna.set_sweep(num_freq_points, type = sweeptype)
vna.set_avg(count = vna_avg)
vna.set_ifbw(if_bw,track = ifbw_track)
vna.set_power(port = 1, level = port_power, state = "ON")
time.sleep(1.0) #sleep for a second in case we've just over-powered the resonators
#Figure out frequency points for recording
fmin = fcenter - fspan/(2000.0)
fmax = fcenter + fspan/(2000.0)
if sweeptype == "lin":
freqs = np.linspace(fmin,fmax,num_freq_points)
elif sweeptype == 'log':
logfmin = np.log10(fmin)
logfmax = np.log10(fmax)
logfreqs = np.linspace(logfmin,logfmax,num_freq_points)
freqs = 10**logfreqs
#trigger a sweep to be done
vna.reset_sweep()
vna.trig_sweep()
#collect data according to data_format LM or RI
(s21Au,s21Bu) = vna.get_S21(format = 'RI')
print("Trace Acquired")
#put uncalibrated data in complex format
s21data = []
for i in range(0,len(freqs)):
s21data.append(s21Au[i] + 1j*s21Bu[i])
s21data = np.array(s21data)
#remove group delay if desired
if not remove_group_delay:
group_delay = 0.0
phase_delay = np.exp(-1j*freqs*2.0*math.pi*group_delay)
#calculate the 'calibrated' S21 data by dividing by phase delay
s21R = []
s21I = []
for i in range(0, len(freqs)):
s21R.append(np.real(s21data[i]/phase_delay[i]))
s21I.append(np.imag(s21data[i]/phase_delay[i]))
s21R = np.array(s21R)
s21I = np.array(s21I)
#convert data from data_format to both LM for plotting
s21LM = []
s21PH = []
for i in range(0, len(freqs)):
s21LM.append(10*np.log10(s21R[i]**2 + s21I[i]**2))
s21PH.append(180.0/math.pi*math.atan2(s21I[i],s21R[i]))
s21LM = np.array(s21LM)
s21PH = np.array(s21PH)
vna.reset_sweep()
vna.close()
plot_freqs = []
for i in range(0,len(freqs)):
plot_freqs.append(freqs[i])
plot_freqs = np.array(plot_freqs)
fig1 = plt.figure(1)
ax11 = fig1.add_subplot(121)
ax11.set_xlabel("Freq. (GHz)")
if sweeptype == 'log':
ax11.set_xscale('log')
ax11.set_ylabel("S21 (dB)")
if plotphase:
ax11t = ax11.twinx()
ax11t.set_ylabel("S21 (deg)")
ax12 = pySmith.get_smith(fig1, 122)
#plot Log Magnitude and possibly Phase data
ax11.plot(plot_freqs,s21LM)
if plotphase == 1:
ax11t.plot(plot_freqs,s21PH,c='r')
#plot Smith Chart data
ax12.plot(s21R,s21I)
#Save the data
if output_format == "TXT" or output_format == "BOTH":
fout = open(outputfilename + '.txt', 'w')
for i in range(0,len(freqs)):
if data_format == 'LM':
out = str(freqs[i]) + " " + str(s21LM[i]) + " " + str(s21PH[i]) + "\n"
elif data_format == 'RI':
out = str(freqs[i]) + " " + str(s21R[i]) + " " + str(s21I[i]) + "\n"
else:
print('Data format not recognized!')
fout.write(out)
fout.close()
print('TXT file written')
if output_format == "CSV" or output_format == "BOTH":
with open(outputfilename + '.csv', 'w') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
for i in range(0,len(freqs)):
if data_format == 'LM':
csvwriter.writerow([freqs[i],s21LM[i],s21PH[i]])
elif data_format == 'RI':
csvwriter.writerow([freqs[i],s21R[i],s21I[i]])
else:
print('Data format not recognized!')
print('CSV file written')
else:
print('Output file format not recoginzed!')
#show maximized plot
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
plt.show() | python |
from .Algorithm import PoblationalAlgorithm
from ..Agents.RealAgent import RealAgent
class EvolutionStrategie(PoblationalAlgorithm):
def __init__(self, function, ind_size, p_size, generations, selection_op,
mutation_op, recombination_op, marriage_size=2, agent_args={}, **kwargs):
self.ind_size = ind_size
self.generations = generations
self.marriage_size = marriage_size
self.agent_args = agent_args
super().__init__(function, p_size, **kwargs)
#Operators
self.selection_op = selection_op
self.mutation_op = mutation_op
self.recombination_op = recombination_op
def init_population(self, p_size):
population = []
for _ in range(p_size):
ind = RealAgent()
ind.init(self.ind_size, exogenous=True, **self.agent_args)
population.append(ind)
self.evaluate(population)
return population
def stop(self, population, k):
return self.generations <= k
def replace(self, population, children):
total = population + children
total.sort()
return total[:len(population)]
def grow(self, population, k):
children = []
for _ in range(len(population)):
parents = self.selection_op.apply(population, size=self.marriage_size)
ind = self.recombination_op.apply(parents)
ind = self.mutation_op.apply(ind)[0]
children.append(ind)
self.evaluate(children)
return self.replace(population, children)
| python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# # MIT License
#
# Copyright (c) 2020 Mike Simms
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import os
import re
import sys
valid_zfs_file_name = re.compile(r"^[\s\.\:\_\-\*\,a-zA-Z0-9]+") # Source https://unix.stackexchange.com/questions/23569/allowed-and-safe-characters-for-zfs-filesystem-in-freebsd
valid_fat_file_name = re.compile(r"^[\s\.\_\$\%\@\~\!\(\)\{\}\^\+\-\,\;\=\[\]\#\&a-zA-Z0-9]+") # Matches long FAT file names, source http://averstak.tripod.com/fatdox/names.htm
valid_ntfs_file_name = re.compile(r"^[\s\.\:\_\$\%\@\~\!\/\(\)\{\}\^\+\-\,\;\=\[\]\#\&a-zA-Z0-9]+")
valid_hfs_file_name = re.compile(r"^[\s\.\_\$\%\@\~\!\\\/\(\)\{\}\^\+\-\,\;\=\[\]\#\&a-zA-Z0-9]+")
def search_dir(dir, recurse, zfs, fat, ntfs, hfs):
for file_name in os.listdir(dir):
# Generate the complete path.
complete_file_name = os.path.join(dir, file_name)
# Check for validity.
if zfs:
matched = re.match(valid_zfs_file_name, file_name)
if matched is None or matched.group() != file_name:
print(complete_file_name + " is invalid for ZFS.")
if fat:
matched = re.match(valid_fat_file_name, file_name)
if matched is None or matched.group() != file_name:
print(file_name + " is invalid for FAT.")
if ntfs:
matched = re.match(valid_ntfs_file_name, file_name)
if matched is None or matched.group() != file_name:
print(complete_file_name + " is invalid for NTFS.")
if hfs:
matched = re.match(valid_hfs_file_name, file_name)
if matched is None or matched.group() != file_name:
print(complete_file_name + " is invalid for HFS.")
# Dir:
if recurse and os.path.isdir(complete_file_name):
search_dir(os.path.join(dir, file_name), recurse, zfs, fat, ntfs, hfs)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--dir", type=str, action="store", default=".", help="Directory in which to search", required=True)
parser.add_argument("--recurse", action="store_true", default=True, help="Perform the sync recursively", required=False)
parser.add_argument("--zfs", action="store_true", default=False, help="Print files containing invalid characters incompatible with the ZFS file system", required=False)
parser.add_argument("--fat", action="store_true", default=False, help="Print files containing invalid characters incompatible with the FAT file system", required=False)
parser.add_argument("--ntfs", action="store_true", default=False, help="Print files containing invalid characters incompatible with the NTFS file system", required=False)
parser.add_argument("--hfs", action="store_true", default=False, help="Print files containing invalid characters incompatible with the HFS file system", required=False)
try:
args = parser.parse_args()
except IOError as e:
parser.error(e)
sys.exit(1)
if args.zfs or args.fat or args.ntfs or args.hfs:
search_dir(args.dir, args.recurse, args.zfs, args.fat, args.ntfs, args.hfs)
else:
print("No file system formats were specified.")
if __name__ == "__main__":
main()
| python |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from gym.spaces import Discrete
from compiler_gym.spaces import Tuple
from tests.test_main import main
def test_equal():
assert Tuple([Discrete(2), Discrete(3)], name="test_tuple") == Tuple(
[Discrete(2), Discrete(3)], name="test_tuple"
)
def test_not_equal():
tuple_space = Tuple([Discrete(2), Discrete(3)], name="test_tuple")
assert tuple_space != Tuple([Discrete(3), Discrete(3)], name="test_tuple")
assert tuple_space != Tuple([Discrete(2)], name="test_tuple")
assert tuple_space != Tuple([Discrete(2), Discrete(3)], name="test_tuple_2")
assert tuple_space != "not_a_tuple"
if __name__ == "__main__":
main()
| python |
import os
import sys
from configobj import ConfigObj
import click
import requests
from kaos_cli.utils.helpers import run_cmd
from ..constants import KAOS_STATE_DIR, CONFIG_PATH, ENV_DICT
def pass_obj(obj_id):
def decorator(f):
def new_func(*args, **kwargs):
ctx = click.get_current_context()
obj = ctx.obj[obj_id]
if obj_id is None:
raise RuntimeError('Managed to invoke callback without a '
'context object of type %r existing'
% obj_id)
return ctx.invoke(f, obj, *args, **kwargs)
return new_func
return decorator
def pass_config(fun):
def decorator(*args, **kwargs):
ctx = click.get_current_context()
state = ctx.obj['state']
config = state.config
return fun(config, *args, **kwargs)
return decorator
def build_env_check(func):
"""
Decorator for confirming the env vars are set.
- Checks if the KAOS_HOME is set and is valid.
- Checks if k8s cluster is setup and running for a local build.
"""
def wrapper(*args, **kwargs):
kaos_home_path = os.getenv("KAOS_HOME")
if not kaos_home_path:
click.echo("{} - Please set the KAOS_HOME environment variable to the source project directory".format(
click.style("Warning", bold=True, fg='yellow')))
sys.exit(1)
kaos_config_path = kaos_home_path + "/.git/config"
if not os.path.exists(kaos_config_path):
click.echo("{} - Please ensure that KAOS_HOME points to a valid directory containing kaos".format(
click.style("Warning", bold=True, fg='yellow')))
sys.exit(1)
line_list = [line.rstrip('\n') for line in open(kaos_config_path) if "KI-labs/kaos.git" in line]
if not line_list:
click.echo("{} - Please ensure that KAOS_HOME points to a valid directory containing kaos".format(
click.style("Warning", bold=True, fg='yellow')))
sys.exit(1)
provider = kwargs["cloud"]
if provider == "DOCKER":
# Docker Desktop is running WITH single-node kubernetes cluster
cmd = "kubectl get services --context docker-for-desktop"
exitcode, out, err = run_cmd(cmd)
error_codes = ["Unable to connect to the server",
"did you specify the right host or port?"]
if any([e in str(err) for e in error_codes]):
click.echo(
"{} - Docker Desktop with Kubernetes is currently {}\n\n"
"Please {} Docker Desktop and {} Kubernetes".format(
click.style("Warning", bold=True, fg='yellow'),
click.style("disabled", bold=True, fg='red'),
click.style("start", bold=True, fg='green'),
click.style("enable", bold=True, fg='green')))
sys.exit(1)
# Docker Desktop context is set
cmd = "kubectl config current-context"
exitcode, out, err = run_cmd(cmd)
docker_contexts = ["docker-desktop", "docker-for-desktop"]
if out.decode("utf-8").rstrip() not in docker_contexts:
click.echo(
"{} - Cluster context {} set to Docker Desktop\n\n"
"Please run {}".format(
click.style("Warning", bold=True, fg='yellow'),
click.style("not", bold=True, fg='red'),
click.style("kubectl config use-context docker-desktop", bold=True, fg='green')))
sys.exit(1)
required_envs = list(filter(lambda e: not os.environ.get(e, None), ENV_DICT[provider]))
if required_envs:
click.echo("{} - Please set the following environment variables:".format(
click.style("Warning", bold=True, fg='yellow')))
for env in required_envs:
click.echo("- {}".format((click.style(env, bold=True, fg='red'))))
sys.exit(1)
func(*args, **kwargs)
return wrapper
def init_check(func):
"""
Decorator for confirming the KAOS_STATE_DIR is present (i.e. initialized correctly).
"""
def wrapper(*args, **kwargs):
if not os.path.exists(KAOS_STATE_DIR):
click.echo("{} - {} directory does not exist - first run {}".format(
click.style("Warning", bold=True, fg='yellow'),
click.style(os.path.split(KAOS_STATE_DIR)[-1], bold=True, fg='red'),
click.style("kaos init", bold=True, fg='green')))
sys.exit(1)
if not os.path.exists(CONFIG_PATH):
click.echo("{} - {} does not exist - run {}".format(
click.style("Warning", bold=True, fg='yellow'),
click.style("./kaos/config", bold=True, fg='red'),
click.style("kaos init", bold=True, fg='green')))
sys.exit(1)
func(*args, **kwargs)
return wrapper
def workspace_check(func):
"""
Decorator for confirming <workspace> is defined in the CONFIG_PATH (i.e. kaos workspace set has been run).
"""
def wrapper(*args, **kwargs):
config = ConfigObj(CONFIG_PATH)
if 'pachyderm' not in config:
click.echo("{} - {} not defined - first run {}".format(
click.style("Warning", bold=True, fg='yellow'),
click.style("workspace", bold=True, fg='red'),
click.style("kaos workspace set", bold=True, fg='green')))
sys.exit(1)
# get active context
active_context = config['active']['environment']
# get base_url
base_url = config[active_context]['backend']['url']
token = config[active_context]['backend']['token']
current_workspace = config['pachyderm']['workspace']
# GET all workspaces: /workspace
r = requests.get(f"{base_url}/workspace", headers={"X-Token": token})
if r.status_code == 401:
click.echo("Unauthorized token")
sys.exit(1)
data = r.json()
workspaces_list = [v for v in data['names']]
if current_workspace not in workspaces_list:
click.echo("{} - Workspace {} has been {}. \n\n"
"Please ensure the kaos train/serve commands are run on an active workspace. \n\n"
"Check available workspaces with - {}".format(
click.style("Warning", bold=True, fg='yellow'),
click.style(current_workspace, bold=True, fg='green'),
click.style("deleted/killed", bold=True, fg='red'),
click.style("kaos workspace list", bold=True, fg='green')))
sys.exit(1)
func(*args, **kwargs)
return wrapper
def context_check(func):
"""
Decorator for confirming an active_context is defined in the CONFIG_PATH (i.e. kaos build set has been run).
"""
def wrapper(*args, **kwargs):
config = ConfigObj(CONFIG_PATH)
if 'active' not in config:
click.echo("{} - {} not defined - first run {}".format(
click.style("Warning", bold=True, fg='yellow'),
click.style("active context", bold=True, fg='red'),
click.style("kaos build set", bold=True, fg='green')))
sys.exit(1)
# get active context
active_context = config['active']['environment']
# GET all contexts
contexts = config['contexts']['environments']
def __validate_context(context, active_context):
return context == active_context
if isinstance(contexts, list):
for context in contexts:
active_context_exists = __validate_context(context, active_context)
elif isinstance(contexts, str):
active_context_exists = __validate_context(contexts, active_context)
if not active_context_exists:
click.echo("{} - Active context/build {} has been {}. \n\n"
"Please ensure the kaos build set is done on an existing/available deployment. \n\n"
"Check available contexts with - {}".format(
click.style("Warning", bold=True, fg='yellow'),
click.style(active_context, bold=True, fg='green'),
click.style("destroyed", bold=True, fg='red'),
click.style("kaos build list", bold=True, fg='green')))
sys.exit(1)
func(*args, **kwargs)
return wrapper
def health_check(func):
"""
Decorator for confirming endpoint is running.
"""
def wrapper(*args, **kwargs):
config = ConfigObj(CONFIG_PATH)
# get active context
active_context = config['active']['environment']
# get base_url
base_url = config[active_context]['backend']['url']
try:
func(*args, **kwargs)
except (requests.exceptions.InvalidURL, requests.exceptions.InvalidSchema):
click.echo("{} - Please run {} with a valid URL - {} is invalid!".format(
click.style("Warning", bold=True, fg='yellow'),
click.style("kaos init", bold=True, fg='green'),
click.style(base_url, bold=True, fg='red')), err=True)
sys.exit(1)
except requests.exceptions.ConnectionError:
click.echo("{} - Please ensure the endpoint is available - {} is unreachable!".format(
click.style("Warning", bold=True, fg='yellow'),
click.style(base_url, bold=True, fg='red')), err=True)
sys.exit(1)
except requests.exceptions.MissingSchema:
click.echo("{} - Missing endpoint! Please set with - {}".format(
click.style("Warning", bold=True, fg='yellow'),
click.style("kaos init", bold=True, fg='green')), err=True)
sys.exit(1)
return wrapper
| python |
from ModelCreator import get_proportions_model
from ModelEvaluator import plot, show_images
from CustomDataProcessor import get_processed_data
import keras.models as models
import tensorflow as tf
import argparse
import os
import numpy as np
tf.config.experimental.list_physical_devices('GPU')
def train(directory, image_shape, proportions_path, bag_size, batch_size, filter1, kernel1, filter2, kernel2, epochs):
# Get proportions
proportions = np.loadtxt(proportions_path)
# Get data
data_train, labels_train = get_processed_data(directory, bag_size, proportions)
#Create model
model = get_proportions_model(image_shape + (1,), bag_size, filter1, kernel1, filter2, kernel2)
# Round data size to batch size
if len(data_train) % batch_size != 0:
data_train = data_train[0:len(data_train) - (len(data_train) % batch_size)]
labels_train = labels_train[0:len(labels_train) - (len(labels_train) % batch_size)]
labels_train = labels_train.reshape(labels_train.shape + (1,))
data_train = data_train.reshape(data_train.shape + (1,))
# Train the model
history = model.fit(data_train, labels_train, batch_size, epochs, 1, None, 0.1)
# Plot progression
plot(history.history["acc"], history.history["val_acc"], 'Model Accuracy', 'Accuracy', 'Epoch')
plot(history.history["loss"], history.history["val_loss"], 'Model Loss', 'Loss', 'Epoch')
# Get the single image prediction model
intermediate_layer_model = models.Model(inputs=model.input,outputs=model.get_layer('inter').output)
intermediate_output = intermediate_layer_model.predict(data_train)
# Predict single images and show result
show_images(data_train, labels_train, intermediate_output, 4, 5, bag_size)
def parse_tuple(str):
return tuple(map(lambda str: int(str.strip()), str.split(',')))
def is_valid_path(arg):
if not os.path.exists(arg):
raise argparse.ArgumentTypeError('File %s does not exist.' % arg)
else:
return arg
def is_valid_data_path(arg):
path = ''
if '/' in arg:
path = '/'.join(arg.split('/')[:-1])
else:
path = '\\'.join(arg.split('\\')[:-1])
if not os.path.exists(path):
raise argparse.ArgumentTypeError('File %s does not exist.' % path)
else:
return arg
parser = argparse.ArgumentParser(description='Trains a neural network to classify images based on a dataset of bag of those images along with their labels.')
parser.add_argument('-dir', dest='directory', help='path to the data directory, plus the shared initial name of the sub-directory names without the index. Defaults to "{current_dir}/data/tag_".', default=os.path.join(os.getcwd(), 'data', 'tag_'), type=is_valid_data_path)
parser.add_argument('-shape', dest='image_shape', help='width and height of one image. Defaults to (140, 140).', default=(140, 140), type=parse_tuple)
parser.add_argument('-prop', dest='proportions_path', help='path to the text file containing the proportion labels. Each line of the text file must contain on value. Defaults to "{current_dir}/data/labelproportions.txt".', default=os.path.join(os.getcwd(), 'data', 'labelproportions.txt'), type=is_valid_path)
parser.add_argument('-bag', dest='bag_size', help='Defaults to 100.', default=100, type=int)
parser.add_argument('-batch', dest='batch_size', help='Defaults to 1.', default=1, type=int)
parser.add_argument('-f1', dest='filter1', help='number of filters of the first convolutional layer. Defaults to 3.', default=3, type=int)
parser.add_argument('-k1', dest='kernel1', help='shape of filters of the first convolutional layer. Defaults to (50, 50).', default=(50, 50), type=parse_tuple)
parser.add_argument('-f2', dest='filter2', help='number of filters of the second convolutional layer. Defaults to 5.', default=5, type=int)
parser.add_argument('-k2', dest='kernel2', help='shape of filters of the second convolutional layer. Defaults to (10, 10).', default=(10,10), type=parse_tuple)
parser.add_argument('-epochs', dest='epochs', help='Defaults to 5.', default=5, type=int)
namespace = parser.parse_args()
train(namespace.directory, namespace.image_shape, namespace.proportions_path, namespace.bag_size, namespace.batch_size, namespace.filter1, namespace.kernel1, namespace.filter2, namespace.kernel2, namespace.epochs)
| python |
from pyalgotrade.barfeed import ibfeed
import datetime
class Parser(object):
def parse(self, filename):
slashIndex = filename.rfind('/')
if (slashIndex > -1):
filename = filename[slashIndex + 1:]
underscoreIndex = filename.rfind('_')
hyphenIndex = filename.rfind('-')
zinstrument = filename[0:underscoreIndex]
zStrikePrice = filename[underscoreIndex+1:hyphenIndex]
zDate = filename[hyphenIndex+2:hyphenIndex+10]
zID = filename[0:hyphenIndex+10]
optiontype = filename[hyphenIndex+1]
if (optiontype.lower() == "p"):
optiontype = "PUT"
elif (optiontype.lower() == "c"):
optiontype = "CALL"
else:
optiontype = str(None)
#Todo Gerer mauvaise date
date = datetime.datetime.strptime(zDate, '%Y%m%d')
floatStrike = float(zStrikePrice[:len(zStrikePrice)-2] + '.' + zStrikePrice[len(zStrikePrice)-2:])
instrument = ibfeed.Instrument(zinstrument,floatStrike,optiontype,date,filename,zID)
return instrument
| python |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Filename: __init__.py
# Project: helpers
# Author: Brian Cherinka
# Created: Monday, 19th October 2020 5:49:35 pm
# License: BSD 3-clause "New" or "Revised" License
# Copyright (c) 2020 Brian Cherinka
# Last Modified: Monday, 19th October 2020 5:49:35 pm
# Modified By: Brian Cherinka
from __future__ import print_function, division, absolute_import
| python |
from flask_bcrypt import generate_password_hash, check_password_hash
from sqlalchemy import Column, ForeignKey, Integer, String, Time, UniqueConstraint, text, Float, Index, Boolean, \
DateTime, CHAR
from sqlalchemy.dialects.postgresql import BIT
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
Base = declarative_base()
metadata = Base.metadata
class Province(Base):
__tablename__ = 'province'
province_id = Column(Integer, primary_key=True, unique=True,
server_default=text("nextval('province_province_id_seq'::regclass)"))
province_name = Column(String(45), nullable=False, unique=True)
class Train(Base):
__tablename__ = 'train'
train_id = Column(Integer, primary_key=True, server_default=text("nextval('train_train_id_seq'::regclass)"))
train_name = Column(String(15), nullable=False)
available = Column(Boolean, nullable=False, server_default=text("true"))
class User(Base):
__tablename__ = 'users'
user_id = Column(Integer, primary_key=True, unique=True,
server_default=text("nextval('user_user_id_seq'::regclass)"))
username = Column(String(255), nullable=False, unique=True)
phone_number = Column(String(45), nullable=False)
real_name = Column(String(45), nullable=False)
email = Column(String(45), nullable=False)
password = Column(String(100), nullable=False)
id_card = Column(CHAR(18))
is_admin = Column(Boolean, nullable=False, server_default=text("false"))
def hash_password(self):
self.password = generate_password_hash(self.password).decode('utf8')
def check_password(self, password):
return check_password_hash(self.password, password)
def to_dict(self):
return {
'username': self.username,
'phone_number': self.phone_number,
'real_name': self.real_name,
'email': self.email,
'id_card': self.id_card,
'is_admin': self.is_admin
}
class City(Base):
__tablename__ = 'city'
city_id = Column(Integer, primary_key=True, server_default=text("nextval('city_city_id_seq'::regclass)"))
city_name = Column(String(32), nullable=False, unique=True)
province_id = Column(ForeignKey('province.province_id'), nullable=False)
province = relationship('Province')
class District(Base):
__tablename__ = 'district'
district_id = Column(Integer, primary_key=True, unique=True,
server_default=text("nextval('district_district_id_seq'::regclass)"))
district_name = Column(String(45), nullable=False)
city_id = Column(ForeignKey('city.city_id'), nullable=False)
city = relationship('City')
class Station(Base):
__tablename__ = 'station'
station_id = Column(Integer, primary_key=True, server_default=text("nextval('station_station_id_seq'::regclass)"))
station_name = Column(String(32), nullable=False, unique=True)
district_id = Column(ForeignKey('district.district_id'), nullable=False)
available = Column(Boolean, nullable=False, server_default=text("true"))
district = relationship('District')
class Interval(Base):
__tablename__ = 'interval'
__table_args__ = (
UniqueConstraint('train_id', 'dep_station', 'arv_station'),
)
interval_id = Column(Integer, primary_key=True,
server_default=text("nextval('interval_interval_id_seq'::regclass)"))
train_id = Column(ForeignKey('train.train_id'), nullable=False)
dep_station = Column(ForeignKey('station.station_id'), nullable=False)
arv_station = Column(ForeignKey('station.station_id'), nullable=False)
dep_datetime = Column(Time, nullable=False)
arv_datetime = Column(Time, nullable=False)
prev_id = Column(Integer)
next_id = Column(Integer)
available = Column(Boolean, nullable=False, server_default=text("true"))
station = relationship('Station', primaryjoin='Interval.arv_station == Station.station_id')
station1 = relationship('Station', primaryjoin='Interval.dep_station == Station.station_id')
train = relationship('Train')
class Price(Base):
__tablename__ = 'prices'
__table_args__ = (
Index('prices_interval_id_seat_type_id_uindex', 'interval_id', 'seat_type_id', unique=True),
)
price_id = Column(Integer, primary_key=True, server_default=text("nextval('prices_price_id_seq'::regclass)"))
interval_id = Column(ForeignKey('interval.interval_id'), nullable=False)
seat_type_id = Column(ForeignKey('seat_type.seat_type_id'), nullable=False)
price = Column(Float(53), nullable=False)
interval = relationship('Interval')
seat_type = relationship('SeatType')
class Seat(Base):
__tablename__ = 'seat'
__table_args__ = (
Index('seat_carriage_number_seat_number_interval_id_uindex', 'carriage_number', 'seat_number', 'train_id',
unique=True),
)
seat_id = Column(Integer, primary_key=True, unique=True,
server_default=text("nextval('seat_seat_id_seq'::regclass)"))
carriage_number = Column(Integer, nullable=False)
seat_number = Column(String(10), nullable=False)
seat_type_id = Column(ForeignKey('seat_type.seat_type_id'), nullable=False, index=True)
occupied = Column(BIT(40), nullable=False,
server_default=text("B'0000000000000000000000000000000000000000'::\"bit\""))
train_id = Column(ForeignKey('train.train_id'), nullable=False)
seat_type = relationship('SeatType')
train = relationship('Train')
class Ticket(Base):
__tablename__ = 'ticket'
__table_args__ = (
Index('ticket_first_interval_last_interval_seat_id_available_uindex', 'first_interval', 'last_interval',
'seat_id', 'available', unique=True),
)
ticket_id = Column(Integer, primary_key=True, unique=True,
server_default=text("nextval('ticket_ticket_id_seq'::regclass)"))
first_interval = Column(ForeignKey('interval.interval_id'), nullable=False)
last_interval = Column(ForeignKey('interval.interval_id'), nullable=False)
seat_id = Column(ForeignKey('seat.seat_id'), nullable=False)
available = Column(Boolean, nullable=False)
interval = relationship('Interval', primaryjoin='Ticket.first_interval == Interval.interval_id')
interval1 = relationship('Interval', primaryjoin='Ticket.last_interval == Interval.interval_id')
seat = relationship('Seat')
class Order(Base):
__tablename__ = 'orders'
__table_args__ = (
UniqueConstraint('order_timestamp', 'ticket_id', 'order_status'),
)
order_id = Column(Integer, primary_key=True, server_default=text("nextval('orders_order_id_seq'::regclass)"))
order_timestamp = Column(DateTime, nullable=False, server_default=text("now()"))
ticket_id = Column(ForeignKey('ticket.ticket_id'))
order_status = Column(String(16), nullable=False)
user_id = Column(ForeignKey('users.user_id'), nullable=False)
price = Column(Float(53))
ticket = relationship('Ticket')
user = relationship('User')
class SeatType(Base):
__tablename__ = 'seat_type'
seat_type_id = Column(Integer, primary_key=True, unique=True,
server_default=text("nextval('table_name_seat_type_id_seq'::regclass)"))
name = Column(String(16), nullable=False, unique=True)
| python |
"""Typical Queueing Theory Processes"""
from math import erf, exp, log, pi, sqrt
from nc_arrivals.arrival_distribution import ArrivalDistribution
from utils.exceptions import ParameterOutOfBounds
class DM1(ArrivalDistribution):
"""Corresponds to D/M/1 queue."""
def __init__(self, lamb: float, n=1) -> None:
self.lamb = lamb
self.n = n
def sigma(self, theta=0.0) -> float:
"""
:param theta: mgf parameter
:return: sigma(theta)
"""
return 0.0
def rho(self, theta: float) -> float:
"""
rho(theta)
:param theta: mgf parameter
"""
if theta <= 0:
raise ParameterOutOfBounds(f"theta = {theta} must be > 0")
if theta >= self.lamb:
raise ParameterOutOfBounds(
f"theta = {theta} must be < lambda = {self.lamb}")
return (self.n / theta) * log(self.lamb / (self.lamb - theta))
def is_discrete(self) -> bool:
return True
def average_rate(self) -> float:
return self.n / self.lamb
def __str__(self) -> str:
return f"D/M/1_lambda={self.lamb}_n={self.n}"
def to_value(self, number=1, show_n=False) -> str:
if show_n:
return "lambda{0}={1}_n{0}={2}".format(str(number), str(self.lamb),
str(self.n))
else:
return "lambda{0}={1}".format(str(number), str(self.lamb))
class DGamma1(ArrivalDistribution):
"""Corresponds to D/Gamma/1 queue."""
def __init__(self, alpha_shape: float, beta_rate: float, n=1) -> None:
self.alpha_shape = alpha_shape
self.beta_rate = beta_rate
self.n = n
def sigma(self, theta=0.0) -> float:
"""
:param theta: mgf parameter
:return: sigma(theta)
"""
return 0.0
def rho(self, theta: float) -> float:
"""
rho(theta)
:param theta: mgf parameter
"""
if theta <= 0:
raise ParameterOutOfBounds(f"theta = {theta} must be > 0")
if theta >= self.beta_rate:
raise ParameterOutOfBounds(
f"theta = {theta} must be < beta = {self.beta_rate}")
return (self.n * self.alpha_shape / theta) * log(
self.beta_rate / (self.beta_rate - theta))
def is_discrete(self) -> bool:
return True
def average_rate(self) -> float:
return self.n * self.alpha_shape / self.beta_rate
def __str__(self) -> str:
return f"D/Gamma/1_alpha={self.alpha_shape}_" \
f"beta={self.beta_rate}_n={self.n}"
def to_value(self, number=1, show_n=False) -> str:
if show_n:
return "alpha{0}={1}_beta{0}={2}_n{0}={3}".format(
str(number), str(self.alpha_shape), str(self.beta_rate),
str(self.n))
else:
return "alpha{0}={1}_beta{0}={2}".format(str(number),
str(self.alpha_shape),
str(self.beta_rate))
class MD1(ArrivalDistribution):
"""Corresponds to M/D/1 queue."""
def __init__(self, lamb: float, mu: float, n=1) -> None:
self.lamb = lamb
self.mu = mu
self.n = n
def sigma(self, theta=0.0) -> float:
return 0.0
def rho(self, theta: float) -> float:
if theta <= 0:
raise ParameterOutOfBounds(f"theta = {theta} must be > 0")
return (self.n / theta) * self.lamb * (exp(theta / self.mu) - 1)
def is_discrete(self) -> bool:
return False
def average_rate(self):
return self.n * self.lamb / self.mu
def __str__(self) -> str:
return f"M/D/1_lambda={self.lamb}_mu={self.mu}_n={self.n}"
def to_value(self, number=1, show_n=False) -> str:
if show_n:
return "lambda{0}={1}_mu{0}={2}_n{0}={3}".format(
str(number), str(self.lamb), str(self.mu), str(self.n))
else:
return "lambda{0}={1}_mu{0}={2}".format(str(number),
str(self.lamb),
str(self.mu))
class MM1(ArrivalDistribution):
"""Corresponds to M/M/1 queue."""
def __init__(self, lamb: float, mu: float, n=1) -> None:
self.lamb = lamb
self.mu = mu
self.n = n
def sigma(self, theta=0.0) -> float:
return 0.0
def rho(self, theta: float) -> float:
if theta <= 0:
raise ParameterOutOfBounds(f"theta = {theta} must be > 0")
if theta >= self.mu:
raise ParameterOutOfBounds(f"theta = {theta} must"
f"be < mu = {self.mu}")
return self.n * self.lamb / (self.mu - theta)
def is_discrete(self) -> bool:
return False
def average_rate(self):
return self.n * self.lamb / self.mu
def __str__(self) -> str:
return f"M/M/1_lambda={self.lamb}_mu={self.mu}_n={self.n}"
def to_value(self, number=1, show_n=False) -> str:
if show_n:
return "lambda{0}={1}_mu{0}={2}_n{0}={3}".format(
str(number), str(self.lamb), str(self.mu), str(self.n))
else:
return "lambda{0}={1}_mu{0}={2}".format(str(number),
str(self.lamb),
str(self.mu))
class DPoisson1(ArrivalDistribution):
"""Corresponds to D/Poisson/1 queue."""
def __init__(self, lamb: float, n=1) -> None:
self.lamb = lamb
self.n = n
def sigma(self, theta=0.0) -> float:
"""
:param theta: mgf parameter
:return: sigma(theta)
"""
return 0.0
def rho(self, theta: float) -> float:
"""
rho(theta)
:param theta: mgf parameter
"""
if theta <= 0:
raise ParameterOutOfBounds(f"theta = {theta} must be > 0")
return (self.n / theta) * self.lamb * (exp(theta) - 1)
def is_discrete(self) -> bool:
return True
def average_rate(self) -> float:
return self.n * self.lamb
def __str__(self) -> str:
return f"Poisson_lambda={self.lamb}_n={self.n}"
def to_value(self, number=1, show_n=False) -> str:
if show_n:
return "lambda{0}={1}_n{0}={2}".format(str(number), str(self.lamb),
str(self.n))
else:
return "lambda{0}={1}".format(str(number), str(self.lamb))
class DWeibull1(ArrivalDistribution):
"""Corresponds to D/Weibull/1 queue."""
def __init__(self, lamb: float, n=1) -> None:
self.lamb = lamb
self.n = n
def sigma(self, theta=0.0) -> float:
"""
:param theta: mgf parameter
:return: sigma(theta)
"""
return 0.0
def rho(self, theta: float) -> float:
"""
rho(theta)
:param theta: mgf parameter
"""
if theta <= 0:
raise ParameterOutOfBounds(f"theta = {theta} must be > 0")
sigma = self.lamb / sqrt(2)
error_part = erf(sigma * theta / sqrt(2)) + 1
return self.n * log(1 + sigma * theta * exp(0.5 * (sigma * theta)**2) *
sqrt(0.5 * pi) * error_part) / theta
def is_discrete(self) -> bool:
return True
def average_rate(self) -> float:
sigma = self.lamb / sqrt(2)
return self.n * sigma * sqrt(0.5 * pi)
def __str__(self) -> str:
return f"Weibull_lambda={self.lamb}_n={self.n}"
def to_value(self, number=1, show_n=False) -> str:
if show_n:
return "lambda{0}={1}_n{0}={2}".format(str(number), str(self.lamb),
str(self.n))
else:
return "lambda{0}={1}".format(str(number), str(self.lamb))
| python |
from .swear_handler import swear
from .error_handler import VKErrorHandler, DefaultErrorHandler
| python |
def prime2(a):
if a == 2: return True
if a < 2 or a % 2 == 0: return False
return not any(a % x == 0 for x in range(3, int(a**0.5) + 1, 2))
| python |
# -*- coding: utf-8 -*-
from datetime import datetime
import threading
import time
from logger import logger
LOCK_POOL_WORKERS = threading.RLock()
POOL_WORKERS = {}
def _register_new_worker(worker_id, host, port, datetime_now, ttl=600):
""" Нельзя использовать без блокировки LOCK_POOL_WORKERS """
worker = {
'id': worker_id,
'last_registration': datetime_now,
'last_task_done': None,
'ttl': ttl,
'status': 'free',
'host': host,
'port': port,
}
POOL_WORKERS[worker_id] = worker
return worker
def _update_last_registration_in_worker(worker_id, datetime_now):
""" Нельзя использовать без блокировки LOCK_POOL_WORKERS """
worker = POOL_WORKERS.get(worker_id)
if not worker:
return
worker['last_registration'] = datetime_now
return worker
def register_worker(command, client, ttl=600):
"""
Функция занимается регистрацией новых воркеров и
обновлением регастрационных данных старых воркеров.
"""
port = command['port']
datetime_now = datetime.now()
with LOCK_POOL_WORKERS:
if command['id'] not in POOL_WORKERS:
result = _register_new_worker(
command['id'], client[0], port, datetime_now, ttl)
else:
result = _update_last_registration_in_worker(
command['id'], datetime_now)
logger.info('worker "%s" registered', result)
return result
def _get_free_worker():
free_worker = None
with LOCK_POOL_WORKERS:
for worker in POOL_WORKERS.values():
if worker.get('status') == 'free':
worker['status'] = 'busy'
free_worker = worker
break
return free_worker
def get_free_worker(frequency=2):
while True:
worker = _get_free_worker()
logger.debug('free worker: %s', worker)
if worker:
break
time.sleep(frequency)
return worker
def set_status_worker(worker_id, status):
if worker_id not in POOL_WORKERS:
return
with LOCK_POOL_WORKERS:
worker = POOL_WORKERS[worker_id]
worker['status'] = status
logger.debug('set_status_worker: %s', worker)
return worker
def set_status_task_done_in_worker(worker_id):
if worker_id not in POOL_WORKERS:
return
with LOCK_POOL_WORKERS:
worker = POOL_WORKERS[worker_id]
worker['status'] = 'free'
worker['last_task_done'] = datetime.now()
logger.debug('set_status_task_done_in_worker: %s', worker)
return worker
def delete_worker_of_pool(worker_id):
with LOCK_POOL_WORKERS:
worker = POOL_WORKERS.pop(worker_id)
logger.info('delete worker: %s', worker)
return worker
def is_datetime_old(current_datetime, datetime_now, ttl):
if not current_datetime:
return True
time_to_last_registration = datetime_now - current_datetime
if time_to_last_registration.seconds > ttl:
return True
return False
def clean_pool_worker():
"""
Функция для чистки пула воркеров
Воркер считаем плохим (мёртвым), если время с последней регистрации
и время с последней решённой задачи превысило TTL
"""
datetime_now = datetime.now()
bad_worker_ids = []
with LOCK_POOL_WORKERS:
for worker_id in POOL_WORKERS:
worker = POOL_WORKERS[worker_id]
ttl = worker.get('ttl', 600)
last_registration = worker.get('last_registration')
last_task_done = worker.get('last_task_done')
registration_is_old = is_datetime_old(
last_registration, datetime_now, ttl)
last_task_done_is_old = is_datetime_old(
last_task_done, datetime_now, ttl)
if registration_is_old and last_task_done_is_old:
bad_worker_ids.append(worker.get('id'))
continue
for worker_id in bad_worker_ids:
POOL_WORKERS.pop(worker_id)
logger.debug('clean pool worker: %s', bad_worker_ids)
return bad_worker_ids
| python |
import re
import sys
fileName = sys.argv[1]
with open('./'+fileName+'.g', 'r') as rf:
with open('./'+fileName+'-format.g', 'w') as wf:
line = rf.readline()
while line:
infos = re.split(r'[\s]', line)
if infos[0] == 'v':
wf.write('v {} {}\n'.format(int(infos[1]) + 1, infos[2]))
if infos[0] == 'e':
wf.write('e {} {} {}\n'.format(int(infos[1]) +1, int(infos[2]) + 1, infos[3]))
line = rf.readline()
| python |
import numpy as np
import scipy.sparse as sp
## sc-pml and the nonuniform grid are both examples of diagonal scaling operators...we can symmetrize them both
def create_symmetrizer(Sxf, Syf, Szf, Sxb, Syb, Szb):
'''
input Sxf, Syf, etc. are the 3D arrays generated by create_sc_pml in pml.py
#usage should be symmetrized_A = Pl@A@Pr
'''
sxf = Sxf.flatten(order = 'F')
sxb = Sxb.flatten(order = 'F')
syf = Syf.flatten(order = 'F')
syb = Syb.flatten(order = 'F')
szf = Szf.flatten(order = 'F')
szb = Szb.flatten(order = 'F')
numerator1 = np.sqrt((sxf*syb*szb));
numerator2 = np.sqrt((sxb*syf*szb));
numerator3 = np.sqrt((sxb*syb*szf));
numerator = np.concatenate((numerator1, numerator2, numerator3), axis = 0);
M =len(numerator);
denominator = 1/numerator
Pl = sp.spdiags(numerator, 0, M,M)
Pr = sp.spdiags(denominator, 0, M,M);
return Pl, Pr
| python |
# -*- coding:UTF-8 -*-
# Author:Tiny Snow
# Date: Wed, 24 Feb 2021, 00:50
# Project Euler # 055 Lychrel numbers
#=================================================Solution
lychrel_numbers = 0
for n in range(1, 10000):
flag = True
str_n = str(n)
reverse_n = ''.join(reversed(str_n))
for _ in range(50):
str_n = str(int(str_n) + int(reverse_n))
reverse_n = ''.join(reversed(str_n))
if str_n == reverse_n:
flag = False
break
if flag == True:
lychrel_numbers += 1
print(lychrel_numbers) | python |
"""Apps for cms"""
from django.apps import AppConfig
class CMSConfig(AppConfig):
"""AppConfig for cms"""
name = "cms"
def ready(self):
"""Application is ready"""
import cms.signals # pylint:disable=unused-import, unused-variable
| python |
import abc
import logging
from typing import Optional
from ..defaults import Defaults, Key
from ..errors import MenuConfigError
from ..helpers import Utils
logger = logging.getLogger(__name__)
class AbstractMenu(abc.ABC):
def __init__(self, **config):
self._config = config
self.validate__config()
@abc.abstractmethod
def validate__config(self) -> None:
pass # pragma: no cover
@abc.abstractmethod
def label(self) -> Optional[str]:
pass # pragma: no cover
@property
def config(self) -> dict:
return self._config
class LinkPage(AbstractMenu):
"""Creates a LinkPage Menu object from a dictionary with the following
attributes:
{
"type": "link-page",
"label": [str: None],
"links-to": [str: None],
}
"""
is_link_page: bool = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.config[Key.LINKS_TO] = Utils.normalize_page_path(
path=self.config[Key.LINKS_TO]
)
def __repr__(self) -> str:
return (
f"<{self.__class__.__name__}: label:{self.label} links_to:{self.links_to}>"
)
def validate__config(self) -> None:
try:
self.config[Key.LABEL]
except KeyError as error:
raise MenuConfigError(
f"Missing required key '{Key.LABEL}' "
f"for {self.__class__.__name__} in {Defaults.FILENAME_SITE_YAML}."
) from error
try:
self.config[Key.LINKS_TO]
except KeyError as error:
raise MenuConfigError(
f"Missing required key '{Key.LINKS_TO}' "
f"for {self.__class__.__name__} in {Defaults.FILENAME_SITE_YAML}."
) from error
@property
def label(self) -> str:
return self.config[Key.LABEL]
@property
def links_to(self) -> str:
return self.config[Key.LINKS_TO]
@property
def url(self) -> str:
return Utils.urlify(self.links_to)
class LinkURL(AbstractMenu):
"""Creates an LinkURL Menu object from a dictionary with the following
attributes:
{
"type": "link-url",
"label": [str: None],
"url": [str: None],
}
"""
is_link_url: bool = True
def __repr__(self) -> str:
return f"<{self.__class__.__name__}: url:{self.url}>"
def validate__config(self) -> None:
try:
self.config[Key.LABEL]
except KeyError as error:
raise MenuConfigError(
f"Missing required key '{Key.LABEL}' "
f"for {self.__class__.__name__} in {Defaults.FILENAME_SITE_YAML}."
) from error
try:
self.config[Key.URL]
except KeyError as error:
raise MenuConfigError(
f"Missing required key '{Key.URL}' "
f"for {self.__class__.__name__} in {Defaults.FILENAME_SITE_YAML}."
) from error
@property
def label(self) -> str:
return self.config[Key.LABEL]
@property
def url(self) -> str:
return self.config[Key.URL]
class Spacer(AbstractMenu):
"""Creates an Spacer Menu object from a dictionary with the following
attributes:
{
"type": "spacer",
"label": [str?: None],
"size": [str?: None]
}
"""
is_spacer: bool = True
def __repr__(self) -> str:
return f"<{self.__class__.__name__}: size:{self.size}>"
def validate__config(self) -> None:
if self.size is not None and self.size not in Defaults.VALID_SIZES:
raise MenuConfigError(
f"Unsupported value '{self.size}' for {Key.SIZE} for "
f"{self.__class__.__name__} in {Defaults.FILENAME_SITE_YAML}."
)
@property
def label(self) -> Optional[str]:
return self.config.get(Key.LABEL, None)
@property
def size(self) -> str:
return self.config.get(Key.SIZE, None)
| python |
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
probes = (
('pEN1', 100423573, 100433412, 'Linx'),
('pEN2', 100622909, 100632521, 'Xite'),
('pLG1', 100456274, 100465704, 'Linx'),
('pLG10', 100641750, 100646253, 'Dxpas34'),
('pLG11', 100583328, 100588266, 'Chic1'),
('X3', 100512892, 100528952, 'Cdx4'),
('X4', 100557118, 100569724, 'Chic1')
)
dpath = os.path.expanduser('~/projects/ensemble_hic/data/nora2012/giorgetti2014/DNA_FISH_resume.xlsx')
from xlrd import open_workbook
wb = open_workbook(dpath)
sheet = wb.sheets()[0]
table = np.array([np.array(sheet.row_values(j))[1:13]
for j in [2,3]+range(7, sheet.nrows)])
data = {'{}:{}'.format(x[0], x[1]): np.array([float(y) for y in x[2:] if len(y) > 0])
for x in table.T}
region_start = 100378306
X_highres = np.load("plot_data/samples_full.pickle", allow_pickle=True)
X_highres = np.array([x.variables['structures'] for x in X_highres])
X_highres = X_highres.reshape(-1,308,3) * 53
X_lowres = np.load("plot_data/samples_lowres.pickle", allow_pickle=True)
X_lowres = np.array([x.variables['structures'] for x in X_lowres])
X_lowres = X_lowres.reshape(-1, 62, 3) * (5 * 53 ** 3) ** 0.33333
X_null = np.load("plot_data/samples_prior.pickle", allow_pickle=True)
X_null = np.array([x.variables['structures'].reshape(-1, 308, 3)
for x in X_null])
X_null = X_null.reshape(-1, 308, 3) * 53
Xs_alber = []
for i in (100, 1000, 10000):
X_temp = np.load('plot_data/alber_ensemble_n{}.npy'.format(i))
Xs_alber.append(X_temp)
get_bead = lambda p, bead_size: int((np.mean(p[1:3]) - region_start) / bead_size)
combs = ((1,2), (1,6), (1,5), (5,6), (2,1), (0,3), (1,4))
mapping = (data['pEN2:pLG1'], data['pEN2:X4'], data['pEN2:X3'], data['X4:X3'],
data['pLG1:pEN2'], data['Dxpas34:pEN1'], data['pEN2:pLG11'])
def plot_distance_hists(ax, X, i, l1, l2, bead_size, ls):
ax.hist(np.linalg.norm(X[:,get_bead(probes[l1], bead_size)] -
X[:,get_bead(probes[l2], bead_size)],
axis=1),
bins=int(np.sqrt(len(X)) / 3.0), histtype='step',# label='model',
normed=True, color='black', lw=2, ls=ls)
def plot_FISH_hists(ax, i, l1, l2):
ax.hist(mapping[i-1],
bins=int(np.sqrt(len(mapping[i-1]))), histtype='step',
#label='FISH',
normed=True, color='gray', lw=2)
def plot_alber_distance_hists(ax, i, l1, l2):
from ensemble_hic.analysis_functions import calculate_KL_KDE_log
from scipy.linalg import norm
bead_size = 3000
h = lambda p, q: norm(np.sqrt(p) - np.sqrt(q)) / np.sqrt(2)
for j in range(len(Xs_alber)):
alber_ds = np.linalg.norm(Xs_alber[j][:,get_bead(probes[l1], bead_size)] -
Xs_alber[j][:,get_bead(probes[l2], bead_size)],
axis=1)
ax.hist(alber_ds,
bins=int(np.sqrt(len(alber_ds)) / 3.0), histtype='step',
normed=True,
#color=('blue', 'red', 'green')[j],
lw=2)
def plot_all_hists(axes, X, bead_size, ls):
for i, (l1, l2) in enumerate(combs):
plot_distance_hists(axes[i], X, i, l1, l2, bead_size, ls)
def plot_all_FISH_hists(axes):
for i, (l1, l2) in enumerate(combs):
plot_FISH_hists(axes[i], i, l1, l2)
def plot_all_hists_alber(axes):
for i, (l1, l2) in enumerate(combs):
plot_alber_distance_hists(axes[i], i, l1, l2)
fig, axes = plt.subplots(6, 3)
for i in range(3):
pairs = [(axes[2*i,j], axes[2*i+1,j]) for j in range(3)]
for ax1, ax2 in pairs:
ax1.get_shared_x_axes().join(ax1, ax2)
ax1.set_xticklabels([])
plot_all_hists_alber(axes[1::2].ravel())
plot_all_hists(axes[::2].ravel(), X_highres, 3000, ls='-')
plot_all_hists(axes[::2].ravel(), X_lowres, 15000, ls='--')
plot_all_hists(axes[::2].ravel(), X_null, 3000, ls=':')
plot_all_FISH_hists(axes[1::2].ravel())
plot_all_FISH_hists(axes[::2].ravel())
for i, (l1, l2) in enumerate(combs):
ax = axes[::2].ravel()[i]
ax.text(0.5, 0.8, '{} - {}'.format(probes[l1][0], probes[l2][0]),
transform=ax.transAxes)
for ax in axes.ravel():
ax.set_yticks(())
ax.set_xticks((0, 400, 800))
ax.set_xlim((0, 1200))
for x in ('left', 'top', 'right'):
ax.spines[x].set_visible(False)
for ax in axes[-2][1:]:
ax.set_visible(False)
for ax in axes[-1][1:]:
ax.set_visible(False)
l1 = axes[0,0].legend(labels=('ISD (high-res, $n=30$)',
'ISD (low-res, $n=30$)',
'ISD (high-res, prior only)',
'FISH'))
l2 = axes[1,0].legend(labels=(r'PGS ($n=2\times100$)',
r'PGS ($n=2\times1000$)',
r'PGS ($n=2\times10000$)'))
# handles1, labels1 = axes[0,0].get_legend_handles_labels()
# handles2, labels2 = axes[0,1].get_legend_handles_labels()
handles1 = l1.legendHandles
handles2 = l2.legendHandles
labels1 = l1.texts
labels2 = l2.texts
l1.set_visible(False)
l2.set_visible(False)
new_handles = [Line2D([], [], linewidth=3, ls='--' if i == 1 else '-',
c=h.get_edgecolor())
for i, h in enumerate(handles1 + handles2)]
new_handles[2].set_linestyle(':')
l3 = axes[-2,1].legend(frameon=False, handles=new_handles,
labels=[x.get_text() for x in labels1 + labels2])
axes[-2,1].set_visible(True)
axes[-2,1].spines['bottom'].set_visible(False)
axes[-2,1].set_xticks(())
| python |
features_dict = {
"Name":{
"Description":"String",
"Pre_Action":'''
''',
"Post_Action":'''
''',
"Equip":'''
''',
"Unequip":'''
'''
},
"Dual Wielding":{
"Description":"You can use this weapon in your Off Hand (if available) and attack for -1 AP but with no Techinques. ",
"Pre_Action":'''
weapon = input("Do you want to use your\n" + source.Equipment["Main Hand"] + "\n or your\n" + source.Equipment["Off Hand"])
''',
"Equip":'''
if slot == "Off Hand":
source.Equipment[slot][item]["AP"] -= 1
source.Equipment[slot][item]["Techniques] = {}
source.Pre_Action.update("Dual Wielding" = features_dict["Dual Wielding"]["Pre_Action"])
''',
"Unequip":'''
source.Pre_Action.pop("Dual Wielding")
'''
},
"Dueling":{
"Description":"You can perform Feint, Parry, Riposte, and Disarm for -1 AP/RP respectively. ",
"Pre_Action":'''
if action == "Feint" or "Disarm":
source.AP += 1
''',
"Pre_Reaction":'''
if reaction == "Parry" or "Riposte":
source.RP += 1
''',
"Equip":'''
source.Pre_Action.update(Dueling = features_dict["Dueling"]["Pre_Action"])
source.Pre_Reaction.update(Dueling = features_dict["Dueling"]["Pre_Reaction"])
''',
"Unequip":'''
source.Pre_Action.pop("Dueling")
source.Pre_Reaction.pop("Dueling")
'''
},
"Finesse":{
"Description":"You can Replace your Muscle skill with your Finesse Skill",
"Pre_Action":'''
if action == "Weapon Attack":
source.misc_bonus -= mods(source.Attributes["STR"])
source.misc_bonus -= source.Skills["Muscle"]
source.misc_bonus += mods(source.Attributes["DEX"])
source.misc_bonus += source.Skills["Finesse"]
''',
"Post_Action":'''
if action == "Weapon Attack":
source.misc_bonus -= mods(source.Attributes["DEX"])
source.misc_bonus -= source.Skills["Finesse"]
source.misc_bonus += mods(source.Attributes["STR"])
source.misc_bonus += source.Skills["Muscle"]
''',
"Equip":'''
source.Pre_Action.update(Finesse = features_dict["Finesse"]["Pre_Action"])
source.Post_Action.update(Finesse = features_dict["Finesse"]["Post_Action"])
''',
"Unequip":'''
source.Pre_Action.pop("Finesse")
souce.Post_Action.pop("Finesse")
'''
},
"Grappling":{
"Description":"You can perform Wrestle checks with this weapon against a target",
"Pre_Action":'''
''',
"Post_Action":'''
''',
"Equip":'''
''',
"Unequip":'''
'''
},
"Heavy":{
"Description":"You can use 2 techniques per attack",
"Pre_Action":'''
''',
"Post_Action":'''
''',
"Equip":'''
''',
"Unequip":'''
'''
},
"Light":{
"Description":"Doesn't damage Heavy armors Durability",
"Post_Roll":'''
if action == "Weapon Attack":
target_armor = target.Equipment["Armor"]
if target_armor["Type"] == "Heavy":
target.Equipment["Armor"][target_armor]["Durability"] += 1
''',
"Equip":'''
source.Post_Roll.update(Light = features_dict["Light"][Post_Roll])
''',
"Unequip":'''
source.Post_Roll.pop("Light")
'''
},
"Thrown":{
"Description":"You can add 1 stage of momentum to your impact equation when you attack with this weapon at range.",
"Pre_Action":'''
range = distance(source,target)
if action == "Weapon Attack" and range > 1:
status(source,momentum,1)
''',
"Post_Action":'''
if action == "Weapon Attack" and range > 1:
status(source,momentum,-1)
''',
"Equip":'''
source.Pre_Action.update(Thrown = features_dict["Thrown"]["Pre_Action"])
source.Post_Action.update(Thrown = features_dict["Thrown"]["Post_Action"])
''',
"Unequip":'''
source.Pre_Action.pop("Thrown")
source.Post_Action.pop("Thrown")
'''
},
"Versatile":{
"Description":"You can use the weapon as a Piercing or Slashing weapon.",
"Pre_Action":'''
if action == "Weapon Attack":
choice = input("Do you want to use slashing or piercing?")
if choice == "slashing":
source.Equipment[weapon]["Type"] = "Slashing"
else:
source.Equipment[weapon]["Type"] = "Piercing"
''',
"Equip":'''
source.Pre_Action.update(Versatile = features_dict["Thrown"]["Pre_Action"])
''',
"Unequip":'''
source.Pre_Action.pop("Versatile)
'''
},
}
| python |
import os
import subprocess
import pytest
from app.synspec import wrapper
def test_synspecwrapper_remove_spectrum(mocker):
syn = wrapper.SynspecWrapper(teff=20000, logg=4, wstart=4400, wend=4600)
mocker.patch("os.remove")
syn._remove_spectrum()
os.remove.assert_called_once()
def test_synspecwrapper_no_spectrum():
syn = wrapper.SynspecWrapper(teff=20000, logg=4, wstart=4400, wend=4401)
with pytest.raises(wrapper.NoSpectrumError):
syn.spectrum
def test_synspecwrapper_spectrum(mocker):
syn = wrapper.SynspecWrapper(teff=20000, logg=4, wstart=4400, wend=4401)
mock_spectrum_file = " 4400.000 3.508E+07\n 4400.010 3.507E+07\n"
test_spectrum = [
{"wavelength": 4400, "flux": 35080000},
{"wavelength": 4400.01, "flux": 35070000},
]
mocker.patch("builtins.open", mocker.mock_open(read_data=mock_spectrum_file))
returned_spectrum = syn.spectrum
assert returned_spectrum == test_spectrum # nosec
def test_synspecwrapper_calculate_spectrum(mocker):
syn = wrapper.SynspecWrapper(teff=20000, logg=4, wstart=4400, wend=4401)
mocker.patch("subprocess.call")
syn.calculate_spectrum()
subprocess.call.assert_called_once()
def test_synspec():
wstart, wend = 4000, 5000
syn = wrapper.SynspecWrapper(teff=20000, logg=4, wstart=wstart, wend=wend)
syn.calculate_spectrum()
assert syn.spectrum[0]["wavelength"] == pytest.approx(wstart) # nosec
assert syn.spectrum[-1]["wavelength"] == pytest.approx(wend) # nosec
| python |
# curl -i -X GET 'http://192.168.0.146:8000/v2/projects'
import requests
SERVER_IP = '192.168.0.146'
SERVER_PORT = '8000'
r = requests.get('http://'+SERVER_IP+':'+SERVER_PORT+'/v2/projects')
#print(r.status_code)
#print(r.headers['content-type'])
#print(r.encoding)
#print(r.text)
#print(type(r.json()))
ALL_PROJECT=[]
OPENED_PROJECT=[]
for i in r.json():
#print(i)
ALL_PROJECT.append([i['name'], i['project_id'],i['status']])
if i['status'] == 'opened':
OPENED_PROJECT.append([i['name'], i['project_id'], i['status']])
#print(PROJECT_LIST)
#for i in ALL_PROJECT:
# print(i)
for i in OPENED_PROJECT:
print(i)
MYPROJECT=OPENED_PROJECT[0][2]
MYPROJECT='017a3d81-ad55-48f3-adc1-695fa58e9078'
REST_TAIL='/nodes'
nodes = requests.get('http://'+SERVER_IP+':'+SERVER_PORT+'/v2/projects/'+ MYPROJECT + REST_TAIL)
print('### Nodes')
for i in nodes.json():
print(i)
print(i['node_id'])
print(i['ports'])
REST_TAIL='/links'
links = requests.get('http://' + SERVER_IP + ':' + SERVER_PORT + '/v2/projects/' + MYPROJECT + REST_TAIL)
print('### Links')
for i in links.json():
print(i)
#create_links
ADAPTER_NBR1="0"
NODE_ID1='"5cc4a8f6-f4f2-4a0f-8d08-86d041601284"'
PORT_NBR1="0"
ADAPTER_NBR2="0"
NODE_ID2='"e8cfb52f-ee29-4c3b-b8be-f55dc6e1cea5"'
PORT_NBR2="0"
CreateLinkUrl='http://' + SERVER_IP + ':' + SERVER_PORT + '/v2/projects/' + MYPROJECT + REST_TAIL
data='{"nodes": [{"adapter_number": '+ ADAPTER_NBR1 +', "node_id": '+NODE_ID1+', "port_number": '+PORT_NBR1+'}, {"adapter_number": '+ADAPTER_NBR2+', "node_id": '+NODE_ID2+', "port_number": '+ PORT_NBR2+'}]}'
print(CreateLinkUrl)
CreateLinkRequest = requests.post(CreateLinkUrl, data)
print(CreateLinkRequest)
#linkReq=
#requests.get('http://' + SERVER_IP + ':' + SERVER_PORT + '/v2/projects/' + MYPROJECT + REST_TAIL+ ' -d' + ' {"nodes": [{"adapter_number": 0, "node_id": "f124dec0-830a-451e-a314-be50bbd58a00", "port_number": 0}, {"adapter_number": 0, "node_id": "83892a4d-aea0-4350-8b3e-d0af3713da74", "port_number": 0}]}'
# Working shell request
# curl -X POST "http://192.168.0.146:8000/v2/projects/017a3d81-ad55-48f3-adc1-695fa58e9078/links" -d '{"nodes": [{"adapter_number": 0, "node_id": "5cc4a8f6-f4f2-4a0f-8d08-86d041601284", "port_number": 0}, {"adapter_number": 0, "node_id": "e8cfb52f-ee29-4c3b-b8be-f55dc6e1cea5", "port_number": 0}]}'
#TODO
# compare API version <> GNS3 version
#list all projects + name + id + status
#list all opened projects + name + id + status
#for a given project id
# ==> list of nodes all property
#for a given node id
# ==> list all properties
# ==> list connections
# ==> list interfaces
# if i['status'] == 'opened':
# print(i['project_id'])
# thisproject=i['project_id']
# for key, value in i:
# print(i['status']) | python |
import cx_Oracle
import log
import define_data_type as DTF
class Cache:
def __init__(self):
self._results = {}
def execute(self, conn, table, param, value):
sql_request = f"SELECT * FROM {table} WHERE {param}='{value}'"
try:
return self._results[sql_request]
except KeyError:
with conn.cursor() as cursor:
res = cursor.execute(sql_request)
self._results[sql_request] = res
return res
def __connection() -> tuple:
return "SYS", cx_Oracle.connect(
"SYSDBA",
"",
"localhost:1521/xe",
encoding="UTF-8",
mode=cx_Oracle.SYSDBA,
)
def connection(*, commit=False):
def wrapper(func):
def wrapper_func(*args):
conn = None
try:
user_name, conn = __connection()
return func(conn, user_name, *args)
# except Exception as e:
# log.error(e)
finally:
if conn is not None:
if commit:
conn.commit()
conn.close()
return wrapper_func
return wrapper
@connection(commit=True)
def init_tables(conn, user_name):
sqls = [
f"""\
CREATE TABLE relationship_in_tables(
table1_name VARCHAR2(64),
column_from_table1 VARCHAR2(64),
table2_name VARCHAR2(64),
column_from_table2 VARCHAR2(64),
primary key(table1_name, column_from_table1, table2_name, column_from_table2)
)""",
f"""\
CREATE TABLE type_columns_in_tables(
table_name VARCHAR2(64),
column_name VARCHAR2(64),
column_type VARCHAR2(64),
primary key(table_name, column_name, column_type)
)""",
f"""\
CREATE TABLE enrichment_tables(
table_name VARCHAR2(64),
column_name VARCHAR2(64),
data_type VARCHAR2(64),
column_id INTEGER,
primary key(table_name, column_name)
)""",
]
with conn.cursor() as cursor:
for sql in sqls:
sql = sql.replace(" ", "")
try:
print(sql)
cursor.execute(sql)
except Exception as e:
print("error")
print(e)
else:
print("good")
@connection()
def get_relationship(conn, user_name):
retval = {}
with conn.cursor() as cursor:
for row in cursor.execute(
f"SELECT table1_name, column_from_table1, table2_name, column_from_table2 FROM relationship_in_tables"
):
try:
retval[row[0]].add((row[2], row[1], row[3]))
except KeyError:
retval[row[0]] = set((row[2], row[1], row[3]))
try:
retval[row[2]].add((row[0], row[3], row[1]))
except KeyError:
retval[row[2]] = set((row[0], row[3], row[1]))
return retval
# @connection()
# def insert_into_select_request_log(conn, user_name, table, param, value):
# if not IS_LOG:
# return
# conn.execute(
# "INSERT INTO select_request_log (table_name, column_name, column_value, request_time) "
# "VALUES($1, $2, $3, current_timestamp)",
# table, param, value
# )
@connection()
def get_info(conn, user_name, table_name, param_name, param_value):
tree = get_relationship()
info = {}
paths = {}
current_tables = [(table_name, param_name, param_value)]
cache = Cache()
while current_tables:
table, param, value = current_tables.pop(0)
if table not in paths:
paths[table] = set()
try:
datas = cache.execute(conn, user_name, table, param, value)
# datas = conn.fetch(f"SELECT * FROM {table} WHERE {param}='{value}'")
except Exception as e:
continue
if not datas:
continue
try:
_ = info[table]
except Exception:
info[table] = set()
is_added = False
for data in datas:
if data not in info[table]:
is_added = True
info[table].add(data)
if not is_added:
continue
next_tables = tree.get(table)
if next_tables is None:
continue
for (next_table, prev_param, next_param) in next_tables:
if not (next_table in paths and table in paths[next_table]):
paths[table].add(next_table)
for data in datas:
current_tables.append([next_table, next_param, data[prev_param]])
print(f"Был пройден следующий путь начиная с {table_name}")
return info
@connection()
def get_tables(conn, user_name):
sql = (
"SELECT table_name, column_name, data_type "
f"FROM enrichment_tables "
"order by table_name"
)
tables = {}
with conn.cursor() as cursor:
for row in cursor.execute(sql):
try:
tables[row[0]].append([row[1], row[2]])
except KeyError:
tables[row[0]] = [[row[1], row[2]]]
return tables
@connection()
def analyze_relationship(
conn, user_name, tables: list, curr_table: str, curr_columns: list
):
# делает проход по всем таблицам и пытается найти связь на основе содержимого
for name, columns in tables.items():
if name == curr_table:
continue
for column in columns:
for curr_column in curr_columns:
if curr_column[1] == column[1]:
similar_procent = analyze_two_columns(
curr_table, curr_column[0], name, column[0]
)
if similar_procent:
insert_relationship(
curr_table, curr_column[0], name, column[0], similar_procent
)
# получаем все таблицы у которых колонки имеют похожий тип на тот, который в исследуемой таблице, например колонка телефона
sql = (
"SELECT table_name, column_name, column_type "
f"FROM type_columns_in_tables "
f"WHERE column_type in (SELECT column_type FROM type_columns_in_tables WHERE table_name='{curr_table}')"
)
curr_columns = {}
columns_type = {}
with conn.cursor() as cursor:
for row in cursor.execute(sql):
if row[0] == curr_table:
curr_columns[row[2]] = row[1]
continue
try:
columns_type[row[2]].append([row[0], row[1]])
except KeyError:
columns_type[row[2]] = [[row[0], row[1]]]
for type_, column_name1 in curr_columns.items():
data = columns_type.get(type_)
if data is None:
continue
for table2, column_name2 in data:
insert_relationship(curr_table, column_name1, table2, column_name2)
@connection(commit=True)
def insert_relationship(
conn, user_name, table1, column1, table2, column2, similar_procent=0
):
sql = (
f"SELECT * FROM relationship_in_tables "
f"WHERE "
f"table1_name='{table1}' and column_from_table1='{column1}' and table2_name='{table2}' and column_from_table2='{column2}' "
"OR "
f"table1_name='{table2}' and column_from_table1='{column2}' and table2_name='{table1}' and column_from_table2='{column1}'"
)
with conn.cursor() as cursor:
for row in cursor.execute(sql):
return
with conn.cursor() as cursor:
sql = (
f"INSERT INTO relationship_in_tables (table1_name, column_from_table1, table2_name, column_from_table2) "
"VALUES(:1, :2, :3, :4)"
)
cursor.execute(sql, [table1, column1, table2, column2])
# print(table1, column1, table2, column2, f"[similar = {similar_procent*100}%]")
@connection()
def analyze_two_columns(conn, user_name, table1, column1, table2, column2):
sql_full = (
f"SELECT {table1}.{column1} AS col1, {table2}.{column2} AS col2 "
f"FROM {table1} "
f"FULL JOIN {table2} "
f"ON {table1}.{column1}={table2}.{column2}"
)
sql_inner = sql_full.replace("FULL JOIN", "INNER JOIN")
with conn.cursor() as cursor:
cursor.execute(sql_full)
res_full = cursor.fetchall()
cursor.execute(sql_inner)
res_inner = cursor.fetchall()
if len(res_full) > 0:
# print(res_full)
return len(res_inner) / len(res_full)
@connection()
def detect_column_type(conn, user_name, table):
types = {}
with conn.cursor() as cursor:
rows = cursor.execute(f"SELECT * FROM {table}")
col_names = [row[0] for row in cursor.description]
for row in rows:
for param_name, param_value in zip(col_names, row):
param_value = str(param_value)
for assumption in DTF.detect_type(param_value):
try:
types[param_name][assumption] += 1
except KeyError:
types[param_name] = {assumption: 1}
for column, types in types.items():
for type_name in types:
insert_type_columns_in_tables(table, column, type_name)
@connection(commit=True)
def insert_type_columns_in_tables(conn, user_name, table, column, type_name):
with conn.cursor() as cursor:
sql = (
"INSERT /*+ ignore_row_on_dupkey_index (type_columns_in_tables(table_name, column_name, column_type)) */ "
f"INTO type_columns_in_tables(table_name, column_name, column_type) VALUES(:1, :2, :3)"
)
cursor.execute(sql, [table, column, type_name])
@connection(commit=True)
def insert_data_in_table(conn, user_name, table, rows, columns=None):
if columns is None:
with conn.cursor() as cursor:
columns = [
x[0]
for x in cursor.execute(
f"SELECT column_name FROM enrichment_tables WHERE table_name='{table}' ORDER BY COLUMN_ID"
)
]
columns_str = ", ".join([str(x) for x in columns])
columns_num = ", ".join([f":{i+1}" for i, _ in enumerate(columns)])
with conn.cursor() as cursor:
for row in rows:
try:
cursor.execute(
f"INSERT INTO {table} ({columns_str}) values ({columns_num})", row
)
except Exception as e:
print(e)
@connection(commit=True)
def insert_info_about_table(conn, user_name, table, schema):
rows = [
(table, column_name, data_type, i)
for i, (column_name, data_type) in enumerate(schema)
]
content = ",\n".join(
[
f"\t{column_name} {data_type}"
for column_name, data_type in schema
]
)
with conn.cursor() as cursor:
cursor.execute(
f"SELECT table_name FROM enrichment_tables WHERE table_name='{table}'"
)
if not cursor.fetchone():
sql = f"CREATE TABLE {table}(\n{content}\n)"
log.debug(sql)
cursor.execute(sql)
cursor.executemany(
"INSERT INTO enrichment_tables (table_name, column_name, data_type, column_id) values (:1, :2, :3, :4)",
rows,
)
@connection(commit=True)
def delete_table(conn, user_name, table):
with conn.cursor() as cursor:
cursor.execute(
f"DELETE FROM enrichment_tables WHERE table_name='{table}'"
)
cursor.execute(
f"DROP TABLE {table}"
)
| python |
import requests
no = input("enter your no")
r = requests.get('https://get.geojs.io/')
ip_request = requests.get('https://get.geojs.io/v1/ip.json')
ipadd = ip_request.json()['ip']
url = 'https://get.geojs.io/v1/ip/geo/' + ipadd + '.json'
geo_request = requests.get(url)
geo_data = geo_request.json()
msg = f"latitude: {geo_data['latitude']} longitude : {geo_data['longitude']} city : {geo_data['city']}"
url1 = "https://www.fast2sms.com/dev/bulk"
query = {"authorization" : "your api key ",
"sender_id" : "FSTSMS",
"message" : msg,
"language" : "english",
"route" : "p",
"numbers" : no
}
headers = {
'cache-control' : "no-cache"
}
response = requests.request("GET", url1, headers=headers, params=query)
print(response.text)
| python |
# encoding: utf-8
from .usstock_interface import * | python |
from SimPEG import Survey, Utils, Problem, np, sp, mkvc
from simpegMT.Utils import rec2ndarr
import simpegMT
from scipy.constants import mu_0
import sys
from numpy.lib import recfunctions as recFunc
############
### Data ###
############
class DataMT(Survey.Data):
'''
Data class for MTdata
:param SimPEG survey object survey:
:param v vector with data
'''
def __init__(self, survey, v=None):
# Pass the variables to the "parent" method
Survey.Data.__init__(self, survey, v)
# # Import data
# @classmethod
# def fromEDIFiles():
# pass
def toRecArray(self,returnType='RealImag'):
'''
Function that returns a numpy.recarray for a SimpegMT impedance data object.
:param str returnType: Switches between returning a rec array where the impedance is split to real and imaginary ('RealImag') or is a complex ('Complex')
'''
# Define the record fields
dtRI = [('freq',float),('x',float),('y',float),('z',float),('zxxr',float),('zxxi',float),('zxyr',float),('zxyi',float),
('zyxr',float),('zyxi',float),('zyyr',float),('zyyi',float),('tzxr',float),('tzxi',float),('tzyr',float),('tzyi',float)]
dtCP = [('freq',float),('x',float),('y',float),('z',float),('zxx',complex),('zxy',complex),('zyx',complex),('zyy',complex),('tzx',complex),('tzy',complex)]
impList = ['zxxr','zxxi','zxyr','zxyi','zyxr','zyxi','zyyr','zyyi']
for src in self.survey.srcList:
# Temp array for all the receivers of the source.
# Note: needs to be written more generally, using diffterent rxTypes and not all the data at the locaitons
# Assume the same locs for all RX
locs = src.rxList[0].locs
if locs.shape[1] == 1:
locs = np.hstack((np.array([[0.0,0.0]]),locs))
elif locs.shape[1] == 2:
locs = np.hstack((np.array([[0.0]]),locs))
tArrRec = np.concatenate((src.freq*np.ones((locs.shape[0],1)),locs,np.nan*np.ones((locs.shape[0],12))),axis=1).view(dtRI)
# np.array([(src.freq,rx.locs[0,0],rx.locs[0,1],rx.locs[0,2],np.nan ,np.nan ,np.nan ,np.nan ,np.nan ,np.nan ,np.nan ,np.nan ) for rx in src.rxList],dtype=dtRI)
# Get the type and the value for the DataMT object as a list
typeList = [[rx.rxType.replace('z1d','zyx'),self[src,rx]] for rx in src.rxList]
# Insert the values to the temp array
for nr,(key,val) in enumerate(typeList):
tArrRec[key] = mkvc(val,2)
# Masked array
mArrRec = np.ma.MaskedArray(rec2ndarr(tArrRec),mask=np.isnan(rec2ndarr(tArrRec))).view(dtype=tArrRec.dtype)
# Unique freq and loc of the masked array
uniFLmarr = np.unique(mArrRec[['freq','x','y','z']]).copy()
try:
outTemp = recFunc.stack_arrays((outTemp,mArrRec))
#outTemp = np.concatenate((outTemp,dataBlock),axis=0)
except NameError as e:
outTemp = mArrRec
if 'RealImag' in returnType:
outArr = outTemp
elif 'Complex' in returnType:
# Add the real and imaginary to a complex number
outArr = np.empty(outTemp.shape,dtype=dtCP)
for comp in ['freq','x','y','z']:
outArr[comp] = outTemp[comp].copy()
for comp in ['zxx','zxy','zyx','zyy','tzx','tzy']:
outArr[comp] = outTemp[comp+'r'].copy() + 1j*outTemp[comp+'i'].copy()
else:
raise NotImplementedError('{:s} is not implemented, as to be RealImag or Complex.')
# Return
return outArr
@classmethod
def fromRecArray(cls, recArray, srcType='primary'):
"""
Class method that reads in a numpy record array to MTdata object.
Only imports the impedance data.
"""
if srcType=='primary':
src = simpegMT.SurveyMT.srcMT_polxy_1Dprimary
elif srcType=='total':
src = sdsimpegMT.SurveyMT.srcMT_polxy_1DhomotD
else:
raise NotImplementedError('{:s} is not a valid source type for MTdata')
# Find all the frequencies in recArray
uniFreq = np.unique(recArray['freq'])
srcList = []
dataList = []
for freq in uniFreq:
# Initiate rxList
rxList = []
# Find that data for freq
dFreq = recArray[recArray['freq'] == freq].copy()
# Find the impedance rxTypes in the recArray.
rxTypes = [ comp for comp in recArray.dtype.names if (len(comp)==4 or len(comp)==3) and 'z' in comp]
for rxType in rxTypes:
# Find index of not nan values in rxType
notNaNind = ~np.isnan(dFreq[rxType])
if np.any(notNaNind): # Make sure that there is any data to add.
locs = rec2ndarr(dFreq[['x','y','z']][notNaNind].copy())
if dFreq[rxType].dtype.name in 'complex128':
rxList.append(simpegMT.SurveyMT.RxMT(locs,rxType+'r'))
dataList.append(dFreq[rxType][notNaNind].real.copy())
rxList.append(simpegMT.SurveyMT.RxMT(locs,rxType+'i'))
dataList.append(dFreq[rxType][notNaNind].imag.copy())
else:
rxList.append(simpegMT.SurveyMT.RxMT(locs,rxType))
dataList.append(dFreq[rxType][notNaNind].copy())
srcList.append(src(rxList,freq))
# Make a survey
survey = simpegMT.SurveyMT.SurveyMT(srcList)
dataVec = np.hstack(dataList)
return cls(survey,dataVec) | python |
import itertools
from aoc_cqkh42 import BaseSolution
class Solution(BaseSolution):
def part_a(self):
return self.data.count('(') - self.data.count(')')
def part_b(self):
instructions = (1 if item == '(' else -1 for item in self.data)
return list(itertools.accumulate(instructions)).index(-1) + 1
| python |
from .core import core
from .task_parser import TaskParser, UnexpectedDayName
from .wrapper import GoogleTasksWrapper, NoSuchTaskList | python |
class PulldownButtonData(ButtonData):
"""
This class contains information necessary to construct a pulldown button in the Ribbon.
PulldownButtonData(name: str,text: str)
"""
@staticmethod
def __new__(self,name,text):
""" __new__(cls: type,name: str,text: str) """
pass
| python |
import numpy as np
import os
from pyspark.sql import SparkSession
import cluster_pack
from cluster_pack.spark import spark_config_builder
if __name__ == "__main__":
package_path, _ = cluster_pack.upload_env()
ssb = SparkSession.builder \
.appName("spark_app") \
.master("yarn") \
.config("spark.submit.deployMode", "client") \
.config("spark.driver.memory", "1g") \
.config("spark.executor.memory", "1g") \
.config("spark.executor.memoryOverhead", "1g") \
.config("spark.executor.cores", "1") \
.config("spark.acls.enable", "true") \
.config("spark.ui.view.acls", "*")
spark_config_builder.add_packaged_environment(ssb, package_path)
spark_config_builder.add_editable_requirements(ssb)
ss = ssb.getOrCreate()
# create 2 arrays with random ints range 0 to 100
a = np.random.random_integers(0, 100, 100)
b = np.random.random_integers(0, 100, 100)
# compute intersection of 2 arrays on the worker
def compute_intersection(x):
first, second = x
return np.intersect1d(first, second)
rdd = ss.sparkContext.parallelize([(a, b)], numSlices=1)
res = rdd.map(compute_intersection).collect()
print(f"intersection of arrays len={len(res)} res={res}")
| python |
from django.db import models
from django.db import migrations
import django.db.models.deletion
import swapper
class Migration(migrations.Migration):
dependencies = [
('imagestore_cms', '0001_initial'),
]
operations = [
migrations.AlterField(
'imagestorealbumptr',
name='album',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=swapper.get_model_name('imagestore', 'Album'),
verbose_name='Album'),
),
migrations.AlterField(
model_name='imagestorealbumcarousel',
name='album',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=swapper.get_model_name('imagestore', 'Album'),
verbose_name='Album'),
),
]
| python |
# pylint: disable=duplicate-code
"""
Authentication example
======================
.. Copyright:
Copyright Wirepas Ltd 2019 licensed under Apache License, Version 2.0
See file LICENSE for full license details.
"""
from utils import get_settings, setup_log
from connections import Connections
import json
from enum import Enum, auto
from wirepas_messaging.wnt.ws_api import AuthenticationMessages
class AuthenticationExample(object):
"""Main example class which is run"""
class State(Enum):
"""State enumeration class"""
START = auto()
LOGIN = auto() # Started on authentication_on_open
QUERY_USERS = auto()
CREATE_USER = auto()
QUERY_USERS_2 = auto()
UPDATE_USER = auto()
QUERY_USERS_3 = auto()
DELETE_USER = auto()
QUERY_USERS_4 = auto()
END = auto()
def __init__(self) -> None:
"""Initialization"""
self.return_code = -1
self.state = self.State(self.State.START.value + 1)
self.new_user = dict(
username="jdoeexample",
password="secret",
full_name="John Doe",
role=AuthenticationMessages.Role.OPERATOR.value,
updated_full_name="John J. Doe",
updated_password="secret2",
updated_role=AuthenticationMessages.Role.ADMIN.value,
)
self.settings = get_settings()
self.logger = setup_log("AuthenticationExample", self.settings.log_level)
self.client = Connections(
hostname=self.settings.hostname,
logger=self.logger,
authentication_on_open=self.authentication_on_open,
authentication_on_message=self.authentication_on_message,
authentication_on_error=self.authentication_on_error,
authentication_on_close=self.authentication_on_close,
)
self.authentication = AuthenticationMessages(
self.logger, self.settings.protocol_version
)
def send_request(self, websocket) -> None:
"""Send request
Args:
websocket (Websocket): communication socket
"""
if self.state.name.startswith(self.State.LOGIN.name):
websocket.send(
json.dumps(
self.authentication.message_login(
self.settings.username, self.settings.password
)
)
)
elif self.state.name.startswith(self.State.QUERY_USERS.name):
websocket.send(json.dumps(self.authentication.message_query_users()))
elif self.state.name.startswith(self.State.CREATE_USER.name):
websocket.send(
json.dumps(
self.authentication.message_create_user(
username=self.new_user["username"],
password=self.new_user["password"],
full_name=self.new_user["full_name"],
role=self.new_user["role"],
)
)
)
elif self.state.name.startswith(self.State.UPDATE_USER.name):
websocket.send(
json.dumps(
self.authentication.message_update_user(
username=self.new_user["username"],
new_password=self.new_user["updated_password"],
new_full_name=self.new_user["updated_full_name"],
new_role=self.new_user["updated_role"],
)
)
)
elif self.state.name.startswith(self.State.DELETE_USER.name):
websocket.send(
json.dumps(
self.authentication.message_delete_user(
username=self.new_user["username"]
)
)
)
def parse_response(self, message: str) -> bool:
"""Parse response
Args:
message (str): received message
Returns:
bool: True if response's request succeeded
"""
if self.state.name.startswith(self.State.LOGIN.name):
if not self.authentication.parse_login(json.loads(message)):
return False
elif self.state.name.startswith(self.State.QUERY_USERS.name):
if not self.authentication.parse_query_users(json.loads(message)):
return False
elif self.state.name.startswith(self.State.CREATE_USER.name):
if not self.authentication.parse_create_user(json.loads(message)):
return False
elif self.state.name.startswith(self.State.UPDATE_USER.name):
if not self.authentication.parse_update_user(json.loads(message)):
return False
elif self.state.name.startswith(self.State.DELETE_USER.name):
if not self.authentication.parse_delete_user(json.loads(message)):
return False
return True
def authentication_on_open(self, websocket) -> None:
"""Websocket callback when the authentication websocket has been opened
Args:
websocket (Websocket): communication socket
"""
self.logger.info("Socket open")
self.send_request(websocket)
def authentication_on_message(self, websocket, message: str) -> None:
"""Websocket callback when a new authentication message arrives
Args:
websocket (Websocket): communication socket
message (str): received message
"""
if not self.parse_response(message):
self.logger.error("Example run failed. Exiting.")
self.client.stop_authentication_thread()
else:
self.state = self.State(self.state.value + 1)
if self.state != self.State.END:
self.send_request(websocket)
else:
self.return_code = 0
self.client.stop_authentication_thread()
def authentication_on_error(self, websocket, error: str) -> None:
"""Websocket callback when an authentication socket error occurs
Args:
_websocket (Websocket): communication socket
error (str): error message
"""
if websocket.keep_running:
self.logger.error("Socket error: {0}".format(error))
def authentication_on_close(
self, _websocket, close_status_code: int = None, reason: str = None
) -> None:
"""Websocket callback when the authentication connection closes
Args:
_websocket (Websocket): communication socket
close_status_code (int): status code for close operation
reason (str): close reason
"""
self.logger.info("Authentication socket close")
def run(self) -> int:
"""Run method which starts and waits the communication thread(s)
Returns:
int: Process return code
"""
try:
self.client.start_authentication_thread().join()
except:
pass
return self.return_code
if __name__ == "__main__":
exit(AuthenticationExample().run())
| python |
#
# PySNMP MIB module FR-MFR-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/FR-MFR-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:15:59 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint", "SingleValueConstraint")
InterfaceIndex, ifIndex = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex", "ifIndex")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
ModuleIdentity, Counter64, NotificationType, Gauge32, TimeTicks, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, transmission, Counter32, ObjectIdentity, Unsigned32, Integer32, iso, IpAddress, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "Counter64", "NotificationType", "Gauge32", "TimeTicks", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "transmission", "Counter32", "ObjectIdentity", "Unsigned32", "Integer32", "iso", "IpAddress", "MibIdentifier")
RowStatus, TestAndIncr, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "TestAndIncr", "DisplayString", "TextualConvention")
mfrMib = ModuleIdentity((1, 3, 6, 1, 2, 1, 10, 47))
mfrMib.setRevisions(('2000-11-30 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: mfrMib.setRevisionsDescriptions(('Published as RFC 3020.',))
if mibBuilder.loadTexts: mfrMib.setLastUpdated('200011300000Z')
if mibBuilder.loadTexts: mfrMib.setOrganization('IETF Frame Relay Service MIB (frnetmib) Working Group')
if mibBuilder.loadTexts: mfrMib.setContactInfo('WG Charter: http://www.ietf.org/html.charters/frnetmib-charter.html WG-email: [email protected] Subscribe: [email protected] Email Archive: ftp://ftp.ietf.org/ietf-mail-archive/frnetmib Chair: Andy Malis Vivace Networks Email: [email protected] WG editor: Prayson Pate Overture Networks Email: [email protected] Co-author: Bob Lynch Overture Networks EMail: [email protected] Co-author: Kenneth Rehbehn Megisto Systems, Inc. EMail: [email protected]')
if mibBuilder.loadTexts: mfrMib.setDescription('This is the MIB used to control and monitor the multilink frame relay (MFR) function described in FRF.16.')
class MfrBundleLinkState(TextualConvention, Integer32):
reference = 'FRF.16 Annex A'
description = 'The possible states for a bundle link, as defined in Annex A of FRF.16.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))
namedValues = NamedValues(("mfrBundleLinkStateAddSent", 1), ("mfrBundleLinkStateAddRx", 2), ("mfrBundleLinkStateAddAckRx", 3), ("mfrBundleLinkStateUp", 4), ("mfrBundleLinkStateIdlePending", 5), ("mfrBundleLinkStateIdle", 6), ("mfrBundleLinkStateDown", 7), ("mfrBundleLinkStateDownIdle", 8))
mfrMibScalarObjects = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 47, 1))
mfrMibBundleObjects = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 47, 2))
mfrMibBundleLinkObjects = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 47, 3))
mfrMibTraps = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 47, 4))
mfrMibConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 47, 5))
mfrMibTrapsPrefix = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 47, 4, 0))
mfrMibGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 47, 5, 1))
mfrMibCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 47, 5, 2))
mfrBundleMaxNumBundles = MibScalar((1, 3, 6, 1, 2, 1, 10, 47, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mfrBundleMaxNumBundles.setStatus('current')
if mibBuilder.loadTexts: mfrBundleMaxNumBundles.setDescription('This object is used to inform the manager of the maximum number of bundles supported by this device.')
mfrBundleNextIndex = MibScalar((1, 3, 6, 1, 2, 1, 10, 47, 1, 2), TestAndIncr()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mfrBundleNextIndex.setReference('RFC 2494')
if mibBuilder.loadTexts: mfrBundleNextIndex.setStatus('current')
if mibBuilder.loadTexts: mfrBundleNextIndex.setDescription('This object is used to assist the manager in selecting a value for mfrBundleIndex during row creation in the mfrBundleTable. It can also be used to avoid race conditions with multiple managers trying to create rows in the table (see RFC 2494 [RFC2494] for one such alogrithm).')
mfrBundleTable = MibTable((1, 3, 6, 1, 2, 1, 10, 47, 2, 3), )
if mibBuilder.loadTexts: mfrBundleTable.setStatus('current')
if mibBuilder.loadTexts: mfrBundleTable.setDescription('The bundle configuration and status table. There is a one-to-one correspondence between a bundle and an interface represented in the ifTable. The following objects of the ifTable have specific meaning for an MFR bundle: ifAdminStatus - the bundle admin status ifOperStatus - the bundle operational status ifSpeed - the current bandwidth of the bundle ifInUcastPkts - the number of frames received on the bundle ifOutUcastPkts - the number of frames transmitted on the bundle ifInErrors - frame (not fragment) errors ifOutErrors - frame (not fragment) errors ')
mfrBundleEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 47, 2, 3, 1), ).setIndexNames((0, "FR-MFR-MIB", "mfrBundleIndex"))
if mibBuilder.loadTexts: mfrBundleEntry.setStatus('current')
if mibBuilder.loadTexts: mfrBundleEntry.setDescription('An entry in the bundle table.')
mfrBundleIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 2, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: mfrBundleIndex.setStatus('current')
if mibBuilder.loadTexts: mfrBundleIndex.setDescription('The index into the table. While this corresponds to an entry in the ifTable, the value of mfrBundleIndex need not match that of the ifIndex in the ifTable. A manager can use mfrBundleNextIndex to select a unique mfrBundleIndex for creating a new row.')
mfrBundleIfIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 2, 3, 1, 2), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mfrBundleIfIndex.setStatus('current')
if mibBuilder.loadTexts: mfrBundleIfIndex.setDescription('The value must match an entry in the interface table whose ifType must be set to frf16MfrBundle(163). For example: if the value of mfrBundleIfIndex is 10, then a corresponding entry should be present in the ifTable with an index of 10 and an ifType of 163.')
mfrBundleRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 2, 3, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mfrBundleRowStatus.setReference('RFC 1903')
if mibBuilder.loadTexts: mfrBundleRowStatus.setStatus('current')
if mibBuilder.loadTexts: mfrBundleRowStatus.setDescription('The mfrBundleRowStatus object allows create, change, and delete operations on bundle entries.')
mfrBundleNearEndName = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 2, 3, 1, 4), SnmpAdminString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mfrBundleNearEndName.setReference('FRF.16 section 3.4.1')
if mibBuilder.loadTexts: mfrBundleNearEndName.setStatus('current')
if mibBuilder.loadTexts: mfrBundleNearEndName.setDescription('The configured name of the bundle.')
mfrBundleFragmentation = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 2, 3, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mfrBundleFragmentation.setStatus('current')
if mibBuilder.loadTexts: mfrBundleFragmentation.setDescription('Controls whether the bundle performs/accepts fragmentation and re-assembly. The possible values are: enable(1) - Bundle links will fragment frames disable(2) - Bundle links will not fragment frames.')
mfrBundleMaxFragSize = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 2, 3, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 8184)).clone(-1)).setUnits('Octets').setMaxAccess("readcreate")
if mibBuilder.loadTexts: mfrBundleMaxFragSize.setStatus('current')
if mibBuilder.loadTexts: mfrBundleMaxFragSize.setDescription('The maximum fragment size supported. Note that this is only valid if mfrBundleFragmentation is set to enable(1). Zero is not a valid fragment size. A bundle that does not support fragmentation must return this object with a value of -1.')
mfrBundleTimerHello = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 2, 3, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 180)).clone(10)).setUnits('Seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: mfrBundleTimerHello.setReference('FRF.16 section 4.3.8.1')
if mibBuilder.loadTexts: mfrBundleTimerHello.setStatus('current')
if mibBuilder.loadTexts: mfrBundleTimerHello.setDescription('The configured MFR Hello Timer value.')
mfrBundleTimerAck = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 2, 3, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10)).clone(4)).setUnits('Seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: mfrBundleTimerAck.setReference('FRF.16 section 4.3.8.2')
if mibBuilder.loadTexts: mfrBundleTimerAck.setStatus('current')
if mibBuilder.loadTexts: mfrBundleTimerAck.setDescription('The configured MFR T_ACK value.')
mfrBundleCountMaxRetry = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 2, 3, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 5)).clone(2)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mfrBundleCountMaxRetry.setReference('FRF.16 section 4.3.8.3')
if mibBuilder.loadTexts: mfrBundleCountMaxRetry.setStatus('current')
if mibBuilder.loadTexts: mfrBundleCountMaxRetry.setDescription('The MFR N_MAX_RETRY value.')
mfrBundleActivationClass = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 2, 3, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("mfrBundleActivationClassA", 1), ("mfrBundleActivationClassB", 2), ("mfrBundleActivationClassC", 3), ("mfrBundleActivationClassD", 4))).clone('mfrBundleActivationClassA')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mfrBundleActivationClass.setReference('FRF.16 section 4.2.2.1')
if mibBuilder.loadTexts: mfrBundleActivationClass.setStatus('current')
if mibBuilder.loadTexts: mfrBundleActivationClass.setDescription('Controls the conditions under which the bundle is activated. The following settings are available: mfrBundleActivationClassA(1) - at least one must link up mfrBundleActivationClassB(2) - all links must be up mfrBundleActivationClassC(3) - a certain number must be up. Refer to mfrBundleThreshold for the required number. mfrBundleActivationClassD(4) - custom (implementation specific).')
mfrBundleThreshold = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 2, 3, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647)).clone(-1)).setUnits('Bundle Links').setMaxAccess("readcreate")
if mibBuilder.loadTexts: mfrBundleThreshold.setReference('FRF.16 section 4.2.2.1')
if mibBuilder.loadTexts: mfrBundleThreshold.setStatus('current')
if mibBuilder.loadTexts: mfrBundleThreshold.setDescription("Specifies the number of links that must be in operational 'up' state before the bundle will transition to an operational up/active state. If the number of operational 'up' links falls below this value, then the bundle will transition to an inactive state. Note - this is only valid when mfrBundleActivationClass is set to mfrBundleActivationClassC or, depending upon the implementation, to mfrBundleActivationClassD. A bundle that is not set to one of these must return this object with a value of -1.")
mfrBundleMaxDiffDelay = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 2, 3, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647)).clone(-1)).setUnits('Milliseconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: mfrBundleMaxDiffDelay.setStatus('current')
if mibBuilder.loadTexts: mfrBundleMaxDiffDelay.setDescription('The maximum delay difference between the bundle links. A value of -1 indicates that this object does not contain a valid value')
mfrBundleSeqNumSize = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 2, 3, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("seqNumSize12bit", 1), ("seqNumSize24bit", 2))).clone('seqNumSize12bit')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mfrBundleSeqNumSize.setReference('FRFTC/99-194')
if mibBuilder.loadTexts: mfrBundleSeqNumSize.setStatus('current')
if mibBuilder.loadTexts: mfrBundleSeqNumSize.setDescription('Controls whether the standard FRF.12 12-bit sequence number is used or the optional 24-bit sequence number.')
mfrBundleMaxBundleLinks = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 2, 3, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setUnits('Bundle Links').setMaxAccess("readonly")
if mibBuilder.loadTexts: mfrBundleMaxBundleLinks.setStatus('current')
if mibBuilder.loadTexts: mfrBundleMaxBundleLinks.setDescription('The maximum number of bundle links supported for this bundle.')
mfrBundleLinksConfigured = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 2, 3, 1, 15), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setUnits('Bundle Links').setMaxAccess("readonly")
if mibBuilder.loadTexts: mfrBundleLinksConfigured.setStatus('current')
if mibBuilder.loadTexts: mfrBundleLinksConfigured.setDescription('The number of links configured for the bundle.')
mfrBundleLinksActive = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 2, 3, 1, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setUnits('Bundle Links').setMaxAccess("readonly")
if mibBuilder.loadTexts: mfrBundleLinksActive.setStatus('current')
if mibBuilder.loadTexts: mfrBundleLinksActive.setDescription('The number of links that are active.')
mfrBundleBandwidth = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 2, 3, 1, 17), Integer32()).setUnits('Bits/Sec').setMaxAccess("readonly")
if mibBuilder.loadTexts: mfrBundleBandwidth.setStatus('current')
if mibBuilder.loadTexts: mfrBundleBandwidth.setDescription('The amount of available bandwidth on the bundle')
mfrBundleFarEndName = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 2, 3, 1, 18), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mfrBundleFarEndName.setReference('FRF.16 section 3.4.1')
if mibBuilder.loadTexts: mfrBundleFarEndName.setStatus('current')
if mibBuilder.loadTexts: mfrBundleFarEndName.setDescription('Name of the bundle received from the far end.')
mfrBundleResequencingErrors = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 2, 3, 1, 19), Counter32()).setUnits('Error Events').setMaxAccess("readonly")
if mibBuilder.loadTexts: mfrBundleResequencingErrors.setStatus('current')
if mibBuilder.loadTexts: mfrBundleResequencingErrors.setDescription('A count of the number of resequencing errors. Each event may correspond to multiple lost frames. Example: Say sequence number 56, 59 and 60 is received for DLCI 100. It is decided by some means that sequence 57 and 58 is lost. This counter should then be incremented by ONE, even though two frames were lost.')
mfrBundleIfIndexMappingTable = MibTable((1, 3, 6, 1, 2, 1, 10, 47, 2, 4), )
if mibBuilder.loadTexts: mfrBundleIfIndexMappingTable.setStatus('current')
if mibBuilder.loadTexts: mfrBundleIfIndexMappingTable.setDescription('A table mapping the values of ifIndex to the mfrBundleIndex. This is required in order to find the mfrBundleIndex given an ifIndex. The mapping of mfrBundleIndex to ifIndex is provided by the mfrBundleIfIndex entry in the mfrBundleTable.')
mfrBundleIfIndexMappingEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 47, 2, 4, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: mfrBundleIfIndexMappingEntry.setStatus('current')
if mibBuilder.loadTexts: mfrBundleIfIndexMappingEntry.setDescription('Each row describes one ifIndex to mfrBundleIndex mapping.')
mfrBundleIfIndexMappingIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 2, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mfrBundleIfIndexMappingIndex.setStatus('current')
if mibBuilder.loadTexts: mfrBundleIfIndexMappingIndex.setDescription('The mfrBundleIndex of the given ifIndex.')
mfrBundleLinkTable = MibTable((1, 3, 6, 1, 2, 1, 10, 47, 3, 1), )
if mibBuilder.loadTexts: mfrBundleLinkTable.setStatus('current')
if mibBuilder.loadTexts: mfrBundleLinkTable.setDescription('The bundle link configuration and status table. There is a one-to-one correspondence between a bundle link and a physical interface represented in the ifTable. The ifIndex of the physical interface is used to index the bundle link table, and to create rows. The following objects of the ifTable have specific meaning for an MFR bundle link: ifAdminStatus - the bundle link admin status ifOperStatus - the bundle link operational status ifSpeed - the bandwidth of the bundle link interface ifInUcastPkts - the number of frames received on the bundle link ifOutUcastPkts - the number of frames transmitted on the bundle link ifInErrors - frame and fragment errors ifOutErrors - frame and fragment errors')
mfrBundleLinkEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 47, 3, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: mfrBundleLinkEntry.setStatus('current')
if mibBuilder.loadTexts: mfrBundleLinkEntry.setDescription('An entry in the bundle link table.')
mfrBundleLinkRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 3, 1, 1, 1), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mfrBundleLinkRowStatus.setStatus('current')
if mibBuilder.loadTexts: mfrBundleLinkRowStatus.setDescription('The mfrBundleLinkRowStatus object allows create, change, and delete operations on mfrBundleLink entries. The create operation must fail if no physical interface is associated with the bundle link.')
mfrBundleLinkConfigBundleIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mfrBundleLinkConfigBundleIndex.setStatus('current')
if mibBuilder.loadTexts: mfrBundleLinkConfigBundleIndex.setDescription('The mfrBundleLinkConfigBundleIndex object allows the manager to control the bundle to which the bundle link is assigned. If no value were in this field, then the bundle would remain in NOT_READY rowStatus and be unable to go to active. With an appropriate mfrBundleIndex in this field, then we could put the mfrBundleLink row in NOT_IN_SERVICE or ACTIVE rowStatus.')
mfrBundleLinkNearEndName = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 3, 1, 1, 3), SnmpAdminString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mfrBundleLinkNearEndName.setStatus('current')
if mibBuilder.loadTexts: mfrBundleLinkNearEndName.setDescription('The configured bundle link name that is sent to the far end.')
mfrBundleLinkState = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 3, 1, 1, 4), MfrBundleLinkState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mfrBundleLinkState.setReference('FRF.16 Annex A')
if mibBuilder.loadTexts: mfrBundleLinkState.setStatus('current')
if mibBuilder.loadTexts: mfrBundleLinkState.setDescription('Current bundle link state as defined by the MFR protocol described in Annex A of FRF.16.')
mfrBundleLinkFarEndName = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 3, 1, 1, 5), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mfrBundleLinkFarEndName.setReference('FRF.16 section 3.4.2')
if mibBuilder.loadTexts: mfrBundleLinkFarEndName.setStatus('current')
if mibBuilder.loadTexts: mfrBundleLinkFarEndName.setDescription('Name of bundle link received from far end.')
mfrBundleLinkFarEndBundleName = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 3, 1, 1, 6), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mfrBundleLinkFarEndBundleName.setReference('FRF.16 section 3.4.1')
if mibBuilder.loadTexts: mfrBundleLinkFarEndBundleName.setStatus('current')
if mibBuilder.loadTexts: mfrBundleLinkFarEndBundleName.setDescription('Name of far end bundle for this link received from far end.')
mfrBundleLinkDelay = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 3, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: mfrBundleLinkDelay.setReference('FRF.16 section 3.4.4')
if mibBuilder.loadTexts: mfrBundleLinkDelay.setStatus('current')
if mibBuilder.loadTexts: mfrBundleLinkDelay.setDescription('Current round-trip delay for this bundle link. The value -1 is returned when an implementation does not support measurement of the bundle link delay.')
mfrBundleLinkFramesControlTx = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 3, 1, 1, 8), Counter32()).setUnits('Frames').setMaxAccess("readonly")
if mibBuilder.loadTexts: mfrBundleLinkFramesControlTx.setReference('FRF.16 section 3.2')
if mibBuilder.loadTexts: mfrBundleLinkFramesControlTx.setStatus('current')
if mibBuilder.loadTexts: mfrBundleLinkFramesControlTx.setDescription('Number of MFR control frames sent.')
mfrBundleLinkFramesControlRx = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 3, 1, 1, 9), Counter32()).setUnits('Frames').setMaxAccess("readonly")
if mibBuilder.loadTexts: mfrBundleLinkFramesControlRx.setReference('FRF.16 section 3.2')
if mibBuilder.loadTexts: mfrBundleLinkFramesControlRx.setStatus('current')
if mibBuilder.loadTexts: mfrBundleLinkFramesControlRx.setDescription('Number of valid MFR control frames received.')
mfrBundleLinkFramesControlInvalid = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 3, 1, 1, 10), Counter32()).setUnits('Frames').setMaxAccess("readonly")
if mibBuilder.loadTexts: mfrBundleLinkFramesControlInvalid.setReference('FRF.16 section 3.2')
if mibBuilder.loadTexts: mfrBundleLinkFramesControlInvalid.setStatus('current')
if mibBuilder.loadTexts: mfrBundleLinkFramesControlInvalid.setDescription('The number of invalid MFR control frames received.')
mfrBundleLinkTimerExpiredCount = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 3, 1, 1, 11), Counter32()).setUnits('Timer Expiration Events').setMaxAccess("readonly")
if mibBuilder.loadTexts: mfrBundleLinkTimerExpiredCount.setReference('FRF.16 section 4.3.8.1 and 4.3.8.2')
if mibBuilder.loadTexts: mfrBundleLinkTimerExpiredCount.setStatus('current')
if mibBuilder.loadTexts: mfrBundleLinkTimerExpiredCount.setDescription('Number of times the T_HELLO or T_ACK timers expired.')
mfrBundleLinkLoopbackSuspected = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 3, 1, 1, 12), Counter32()).setUnits('Loopback Suspected Events').setMaxAccess("readonly")
if mibBuilder.loadTexts: mfrBundleLinkLoopbackSuspected.setReference('FRF.16 section 4.3.7')
if mibBuilder.loadTexts: mfrBundleLinkLoopbackSuspected.setStatus('current')
if mibBuilder.loadTexts: mfrBundleLinkLoopbackSuspected.setDescription('The number of times a loopback has been suspected (based upon the use of magic numbers).')
mfrBundleLinkUnexpectedSequence = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 3, 1, 1, 13), Counter32()).setUnits('Frames').setMaxAccess("readonly")
if mibBuilder.loadTexts: mfrBundleLinkUnexpectedSequence.setReference('FRF.16 section 4.2.3.2')
if mibBuilder.loadTexts: mfrBundleLinkUnexpectedSequence.setStatus('current')
if mibBuilder.loadTexts: mfrBundleLinkUnexpectedSequence.setDescription('The number of data MFR frames discarded because the sequence number of the frame for a DLCI was less than (delayed frame) or equal to (duplicate frame) the one expected for that DLCI. Example: Say frames with sequence numbers 56, 58, 59 is received for DLCI 100. While waiting for sequence number 57 another frame with sequence number 58 arrives. Frame 58 is discarded and the counter is incremented.')
mfrBundleLinkMismatch = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 47, 3, 1, 1, 14), Counter32()).setUnits('Bundle Name Mismatch Events').setMaxAccess("readonly")
if mibBuilder.loadTexts: mfrBundleLinkMismatch.setReference('FRF.16 section 4.3.2.4')
if mibBuilder.loadTexts: mfrBundleLinkMismatch.setStatus('current')
if mibBuilder.loadTexts: mfrBundleLinkMismatch.setDescription('The number of times that the unit has been notified by the remote peer that the bundle name is inconsistent with other bundle links attached to the far-end bundle.')
mfrMibTrapBundleLinkMismatch = NotificationType((1, 3, 6, 1, 2, 1, 10, 47, 4, 0, 1)).setObjects(("FR-MFR-MIB", "mfrBundleNearEndName"), ("FR-MFR-MIB", "mfrBundleFarEndName"), ("FR-MFR-MIB", "mfrBundleLinkNearEndName"), ("FR-MFR-MIB", "mfrBundleLinkFarEndName"), ("FR-MFR-MIB", "mfrBundleLinkFarEndBundleName"))
if mibBuilder.loadTexts: mfrMibTrapBundleLinkMismatch.setStatus('current')
if mibBuilder.loadTexts: mfrMibTrapBundleLinkMismatch.setDescription('This trap indicates that a bundle link mismatch has been detected. The following objects are reported: mfrBundleNearEndName: configured name of near end bundle mfrBundleFarEndName: previously reported name of far end bundle mfrBundleLinkNearEndName: configured name of near end bundle mfrBundleLinkFarEndName: reported name of far end bundle mfrBundleLinkFarEndBundleName: currently reported name of far end bundle Note: that the configured items may have been configured automatically. Note: The mfrBundleLinkMismatch counter is incremented when the trap is sent.')
if mibBuilder.loadTexts: mfrMibTrapBundleLinkMismatch.setReference('FRF.16 section 4.3.2.4')
mfrMibCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 10, 47, 5, 2, 1)).setObjects(("FR-MFR-MIB", "mfrMibBundleGroup"), ("FR-MFR-MIB", "mfrMibBundleLinkGroup"), ("FR-MFR-MIB", "mfrMibTrapGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mfrMibCompliance = mfrMibCompliance.setStatus('current')
if mibBuilder.loadTexts: mfrMibCompliance.setDescription('The compliance statement for equipment that implements the FRF16 MIB. All of the current groups are mandatory, but a number of objects may be read-only if the implementation does not allow configuration.')
mfrMibBundleGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 10, 47, 5, 1, 1)).setObjects(("FR-MFR-MIB", "mfrBundleMaxNumBundles"), ("FR-MFR-MIB", "mfrBundleNextIndex"), ("FR-MFR-MIB", "mfrBundleIfIndex"), ("FR-MFR-MIB", "mfrBundleRowStatus"), ("FR-MFR-MIB", "mfrBundleNearEndName"), ("FR-MFR-MIB", "mfrBundleFragmentation"), ("FR-MFR-MIB", "mfrBundleMaxFragSize"), ("FR-MFR-MIB", "mfrBundleTimerHello"), ("FR-MFR-MIB", "mfrBundleTimerAck"), ("FR-MFR-MIB", "mfrBundleCountMaxRetry"), ("FR-MFR-MIB", "mfrBundleActivationClass"), ("FR-MFR-MIB", "mfrBundleThreshold"), ("FR-MFR-MIB", "mfrBundleMaxDiffDelay"), ("FR-MFR-MIB", "mfrBundleMaxBundleLinks"), ("FR-MFR-MIB", "mfrBundleLinksConfigured"), ("FR-MFR-MIB", "mfrBundleLinksActive"), ("FR-MFR-MIB", "mfrBundleBandwidth"), ("FR-MFR-MIB", "mfrBundleSeqNumSize"), ("FR-MFR-MIB", "mfrBundleFarEndName"), ("FR-MFR-MIB", "mfrBundleResequencingErrors"), ("FR-MFR-MIB", "mfrBundleIfIndexMappingIndex"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mfrMibBundleGroup = mfrMibBundleGroup.setStatus('current')
if mibBuilder.loadTexts: mfrMibBundleGroup.setDescription('Group of objects describing bundles.')
mfrMibBundleLinkGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 10, 47, 5, 1, 2)).setObjects(("FR-MFR-MIB", "mfrBundleLinkRowStatus"), ("FR-MFR-MIB", "mfrBundleLinkConfigBundleIndex"), ("FR-MFR-MIB", "mfrBundleLinkNearEndName"), ("FR-MFR-MIB", "mfrBundleLinkState"), ("FR-MFR-MIB", "mfrBundleLinkFarEndName"), ("FR-MFR-MIB", "mfrBundleLinkFarEndBundleName"), ("FR-MFR-MIB", "mfrBundleLinkDelay"), ("FR-MFR-MIB", "mfrBundleLinkFramesControlTx"), ("FR-MFR-MIB", "mfrBundleLinkFramesControlRx"), ("FR-MFR-MIB", "mfrBundleLinkFramesControlInvalid"), ("FR-MFR-MIB", "mfrBundleLinkTimerExpiredCount"), ("FR-MFR-MIB", "mfrBundleLinkLoopbackSuspected"), ("FR-MFR-MIB", "mfrBundleLinkUnexpectedSequence"), ("FR-MFR-MIB", "mfrBundleLinkMismatch"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mfrMibBundleLinkGroup = mfrMibBundleLinkGroup.setStatus('current')
if mibBuilder.loadTexts: mfrMibBundleLinkGroup.setDescription('Group of objects describing bundle links.')
mfrMibTrapGroup = NotificationGroup((1, 3, 6, 1, 2, 1, 10, 47, 5, 1, 3)).setObjects(("FR-MFR-MIB", "mfrMibTrapBundleLinkMismatch"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mfrMibTrapGroup = mfrMibTrapGroup.setStatus('current')
if mibBuilder.loadTexts: mfrMibTrapGroup.setDescription('Group of objects describing notifications (traps).')
mibBuilder.exportSymbols("FR-MFR-MIB", mfrBundleMaxBundleLinks=mfrBundleMaxBundleLinks, mfrBundleLinkConfigBundleIndex=mfrBundleLinkConfigBundleIndex, mfrBundleLinkRowStatus=mfrBundleLinkRowStatus, mfrMibTrapGroup=mfrMibTrapGroup, mfrBundleLinkFarEndBundleName=mfrBundleLinkFarEndBundleName, mfrBundleLinkFramesControlTx=mfrBundleLinkFramesControlTx, mfrMibGroups=mfrMibGroups, mfrBundleIfIndexMappingEntry=mfrBundleIfIndexMappingEntry, mfrBundleTable=mfrBundleTable, mfrBundleLinkFramesControlRx=mfrBundleLinkFramesControlRx, mfrMibCompliances=mfrMibCompliances, mfrMibCompliance=mfrMibCompliance, mfrBundleThreshold=mfrBundleThreshold, mfrBundleLinkMismatch=mfrBundleLinkMismatch, mfrBundleLinkTimerExpiredCount=mfrBundleLinkTimerExpiredCount, mfrBundleTimerAck=mfrBundleTimerAck, mfrBundleLinkNearEndName=mfrBundleLinkNearEndName, mfrMib=mfrMib, mfrBundleFarEndName=mfrBundleFarEndName, mfrMibScalarObjects=mfrMibScalarObjects, mfrBundleActivationClass=mfrBundleActivationClass, mfrBundleMaxNumBundles=mfrBundleMaxNumBundles, mfrBundleResequencingErrors=mfrBundleResequencingErrors, mfrBundleMaxFragSize=mfrBundleMaxFragSize, mfrBundleIfIndexMappingTable=mfrBundleIfIndexMappingTable, mfrBundleLinkUnexpectedSequence=mfrBundleLinkUnexpectedSequence, mfrMibBundleGroup=mfrMibBundleGroup, mfrBundleRowStatus=mfrBundleRowStatus, mfrBundleLinkFarEndName=mfrBundleLinkFarEndName, mfrBundleIfIndex=mfrBundleIfIndex, mfrBundleLinksConfigured=mfrBundleLinksConfigured, mfrBundleNextIndex=mfrBundleNextIndex, mfrBundleSeqNumSize=mfrBundleSeqNumSize, mfrBundleNearEndName=mfrBundleNearEndName, mfrBundleBandwidth=mfrBundleBandwidth, mfrMibBundleLinkObjects=mfrMibBundleLinkObjects, mfrBundleFragmentation=mfrBundleFragmentation, mfrMibTraps=mfrMibTraps, mfrBundleTimerHello=mfrBundleTimerHello, mfrBundleLinkState=mfrBundleLinkState, mfrBundleLinkDelay=mfrBundleLinkDelay, mfrMibTrapBundleLinkMismatch=mfrMibTrapBundleLinkMismatch, mfrBundleLinkLoopbackSuspected=mfrBundleLinkLoopbackSuspected, mfrBundleLinkTable=mfrBundleLinkTable, mfrBundleIndex=mfrBundleIndex, PYSNMP_MODULE_ID=mfrMib, mfrBundleMaxDiffDelay=mfrBundleMaxDiffDelay, mfrBundleIfIndexMappingIndex=mfrBundleIfIndexMappingIndex, mfrMibTrapsPrefix=mfrMibTrapsPrefix, mfrMibBundleObjects=mfrMibBundleObjects, mfrBundleLinksActive=mfrBundleLinksActive, mfrBundleCountMaxRetry=mfrBundleCountMaxRetry, mfrBundleLinkEntry=mfrBundleLinkEntry, mfrBundleLinkFramesControlInvalid=mfrBundleLinkFramesControlInvalid, MfrBundleLinkState=MfrBundleLinkState, mfrMibBundleLinkGroup=mfrMibBundleLinkGroup, mfrBundleEntry=mfrBundleEntry, mfrMibConformance=mfrMibConformance)
| python |
import torch
import numpy as np
from utils import vocab, pos_vocab, ner_vocab, rel_vocab
class Example:
def __init__(self, input_dict):
self.id = input_dict['id']
self.passage = input_dict['d_words']
self.question = input_dict['q_words']
self.choice = input_dict['c_words']
self.d_pos = input_dict['d_pos']
self.d_ner = input_dict['d_ner']
self.q_pos = input_dict['q_pos']
assert len(self.q_pos) == len(self.question.split()), (self.q_pos, self.question)
assert len(self.d_pos) == len(self.passage.split())
self.features = np.stack([input_dict['in_q'], input_dict['in_c'], \
input_dict['lemma_in_q'], input_dict['lemma_in_c'], \
input_dict['tf']], 1)
assert len(self.features) == len(self.passage.split())
self.label = input_dict['label']
self.d_tensor = torch.LongTensor([vocab[w] for w in self.passage.split()])
self.q_tensor = torch.LongTensor([vocab[w] for w in self.question.split()])
self.c_tensor = torch.LongTensor([vocab[w] for w in self.choice.split()])
self.d_pos_tensor = torch.LongTensor([pos_vocab[w] for w in self.d_pos])
self.q_pos_tensor = torch.LongTensor([pos_vocab[w] for w in self.q_pos])
self.d_ner_tensor = torch.LongTensor([ner_vocab[w] for w in self.d_ner])
self.features = torch.from_numpy(self.features).type(torch.FloatTensor)
self.p_q_relation = torch.LongTensor([rel_vocab[r] for r in input_dict['p_q_relation']])
self.p_c_relation = torch.LongTensor([rel_vocab[r] for r in input_dict['p_c_relation']])
def __str__(self):
return 'Passage: %s\n Question: %s\n Answer: %s, Label: %d' % (self.passage, self.question, self.choice, self.label)
def _to_indices_and_mask(batch_tensor, need_mask=True):
mx_len = max([t.size(0) for t in batch_tensor])
batch_size = len(batch_tensor)
indices = torch.LongTensor(batch_size, mx_len).fill_(0)
if need_mask:
mask = torch.ByteTensor(batch_size, mx_len).fill_(1)
for i, t in enumerate(batch_tensor):
indices[i, :len(t)].copy_(t)
if need_mask:
mask[i, :len(t)].fill_(0)
if need_mask:
return indices, mask
else:
return indices
def _to_feature_tensor(features):
mx_len = max([f.size(0) for f in features])
batch_size = len(features)
f_dim = features[0].size(1)
f_tensor = torch.FloatTensor(batch_size, mx_len, f_dim).fill_(0)
for i, f in enumerate(features):
f_tensor[i, :len(f), :].copy_(f)
return f_tensor
def batchify(batch_data):
p, p_mask = _to_indices_and_mask([ex.d_tensor for ex in batch_data])
p_pos = _to_indices_and_mask([ex.d_pos_tensor for ex in batch_data], need_mask=False)
p_ner = _to_indices_and_mask([ex.d_ner_tensor for ex in batch_data], need_mask=False)
p_q_relation = _to_indices_and_mask([ex.p_q_relation for ex in batch_data], need_mask=False)
p_c_relation = _to_indices_and_mask([ex.p_c_relation for ex in batch_data], need_mask=False)
q, q_mask = _to_indices_and_mask([ex.q_tensor for ex in batch_data])
q_pos = _to_indices_and_mask([ex.q_pos_tensor for ex in batch_data], need_mask=False)
choices = [ex.choice.split() for ex in batch_data]
c, c_mask = _to_indices_and_mask([ex.c_tensor for ex in batch_data])
f_tensor = _to_feature_tensor([ex.features for ex in batch_data])
y = torch.FloatTensor([ex.label for ex in batch_data])
return p, p_pos, p_ner, p_mask, q, q_pos, q_mask, c, c_mask, f_tensor, p_q_relation, p_c_relation, y
| python |
import pygame
pygame.init()
SCREEN_WIDTH = 800
SCREEN_HEIGHT = int(SCREEN_WIDTH * 0.8)
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
pygame.display.set_caption('Shooter')
#set framerate
clock = pygame.time.Clock()
FPS = 60
#define player action variables
moving_left = False
moving_right = False
#define colours
BG = (144, 201, 120)
def draw_bg():
screen.fill(BG)
class Soldier(pygame.sprite.Sprite):
def __init__(self, char_type, x, y, scale, speed):
pygame.sprite.Sprite.__init__(self)
self.char_type = char_type
self.speed = speed
self.direction = 1
self.flip = False
img = pygame.image.load(f'img/{self.char_type}/Idle/0.png')
self.image = pygame.transform.scale(img, (int(img.get_width() * scale), int(img.get_height() * scale)))
self.rect = self.image.get_rect()
self.rect.center = (x, y)
def move(self, moving_left, moving_right):
#reset movement variables
dx = 0
dy = 0
#assign movement variables if moving left or right
if moving_left:
dx = -self.speed
self.flip = True
self.direction = -1
if moving_right:
dx = self.speed
self.flip = False
self.direction = 1
#update rectangle position
self.rect.x += dx
self.rect.y += dy
def draw(self):
screen.blit(pygame.transform.flip(self.image, self.flip, False), self.rect)
player = Soldier('player', 200, 200, 3, 5)
enemy = Soldier('enemy', 400, 200, 3, 5)
run = True
while run:
clock.tick(FPS)
draw_bg()
player.draw()
enemy.draw()
player.move(moving_left, moving_right)
for event in pygame.event.get():
#quit game
if event.type == pygame.QUIT:
run = False
#keyboard presses
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_a:
moving_left = True
if event.key == pygame.K_d:
moving_right = True
if event.key == pygame.K_ESCAPE:
run = False
#keyboard button released
if event.type == pygame.KEYUP:
if event.key == pygame.K_a:
moving_left = False
if event.key == pygame.K_d:
moving_right = False
pygame.display.update()
pygame.quit() | python |
# -*- coding: utf8 -*-
from django.contrib.auth import get_user_model
from django.core import mail
from django.test import TestCase
from rest_framework.authtoken.models import Token
from nopassword.models import LoginCode
class TestRestViews(TestCase):
def setUp(self):
self.user = get_user_model().objects.create(username='user', email='[email protected]')
def test_request_login_code(self):
response = self.client.post('/accounts-rest/login/', {
'username': self.user.username,
'next': '/private/',
})
self.assertEqual(response.status_code, 200)
login_code = LoginCode.objects.filter(user=self.user).first()
self.assertIsNotNone(login_code)
self.assertEqual(login_code.next, '/private/')
self.assertEqual(len(mail.outbox), 1)
self.assertIn(
'http://testserver/accounts/login/code/?user={}&code={}'.format(
login_code.user.pk,
login_code.code
),
mail.outbox[0].body,
)
def test_request_login_code_missing_username(self):
response = self.client.post('/accounts-rest/login/')
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json(), {
'username': ['This field is required.'],
})
def test_request_login_code_unknown_user(self):
response = self.client.post('/accounts-rest/login/', {
'username': 'unknown',
})
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json(), {
'username': ['Please enter a correct userid. Note that it is case-sensitive.'],
})
def test_request_login_code_inactive_user(self):
self.user.is_active = False
self.user.save()
response = self.client.post('/accounts-rest/login/', {
'username': self.user.username,
})
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json(), {
'username': ['This account is inactive.'],
})
def test_login(self):
login_code = LoginCode.objects.create(user=self.user, next='/private/')
response = self.client.post('/accounts-rest/login/code/', {
'user': login_code.user.pk,
'code': login_code.code,
})
self.assertEqual(response.status_code, 200)
self.assertFalse(LoginCode.objects.filter(pk=login_code.pk).exists())
token = Token.objects.filter(user=self.user).first()
self.assertIsNotNone(token)
self.assertEqual(response.data, {
'key': token.key,
'next': '/private/',
})
def test_login_missing_code(self):
response = self.client.post('/accounts-rest/login/code/')
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json(), {
'code': ['This field is required.'],
})
def test_login_unknown_code(self):
response = self.client.post('/accounts-rest/login/code/', {
'code': 'unknown',
})
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json(), {
'__all__': ['Unable to log in with provided login code.'],
'user': ['This field is required.']
})
def test_login_inactive_user(self):
self.user.is_active = False
self.user.save()
login_code = LoginCode.objects.create(user=self.user)
response = self.client.post('/accounts-rest/login/code/', {
'code': login_code.code,
})
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json(), {
'__all__': ['Unable to log in with provided login code.'],
'user': ['This field is required.']
})
def test_logout(self):
token = Token.objects.create(user=self.user, key='foobar')
response = self.client.post(
'/accounts-rest/logout/',
HTTP_AUTHORIZATION='Token {}'.format(token.key),
)
self.assertEqual(response.status_code, 200)
self.assertFalse(Token.objects.filter(user=self.user).exists())
def test_logout_unknown_token(self):
login_code = LoginCode.objects.create(user=self.user)
self.client.login(username=self.user.username, code=login_code.code)
response = self.client.post(
'/accounts-rest/logout/',
HTTP_AUTHORIZATION='Token unknown',
)
self.assertEqual(response.status_code, 200)
| python |
# see https://www.codewars.com/kata/559a28007caad2ac4e000083/solutions/python
fibonacci_cache = {}
def fib(n):
if n in fibonacci_cache:
return fibonacci_cache[n]
if n == 1: return 0
if n == 2: return 1
else:
value = fib(n-1) + fib(n-2)
fibonacci_cache[n] = value
return value
def perimeter(n):
n_fib = [fib(i) for i in range(1, n+3)]
return 4*sum([x for x in n_fib])
from TestFunction import Test
test = Test(None)
test.assert_equals(perimeter(5), 80)
test.assert_equals(perimeter(7), 216)
test.assert_equals(perimeter(20), 114624)
test.assert_equals(perimeter(30), 14098308)
test.assert_equals(perimeter(100), 6002082144827584333104)
| python |
import warnings
import numpy as np
import scipy.sparse as sp
class Graph:
"""
A container to represent a graph. The data associated with the Graph is
stored in its attributes:
- `x`, for the node features;
- `a`, for the adjacency matrix;
- `e`, for the edge attributes;
- `y`, for the node or graph labels;
All of these default to `None` if you don't specify them in the constructor.
If you want to read all non-None attributes at once, you can call the
`numpy()` method, which will return all data in a tuple (with the order
defined above).
Graphs also have the following attributes that are computed automatically
from the data:
- `n_nodes`: number of nodes;
- `n_edges`: number of edges;
- `n_node_features`: size of the node features, if available;
- `n_edge_features`: size of the edge features, if available;
- `n_labels`: size of the labels, if available;
Any additional `kwargs` passed to the constructor will be automatically
assigned as instance attributes of the graph.
Data can be stored in Numpy arrays or Scipy sparse matrices, and labels can
also be scalars.
Spektral usually assumes that the different data matrices have specific
shapes, although this is not strictly enforced to allow more flexibility.
In general, node attributes should have shape `(n_nodes, n_node_features)` and the adjacency
matrix should have shape `(n_nodes, n_nodes)`.
Edge attributes can be stored in a dense format as arrays of shape
`(n_nodes, n_nodes, n_edge_features)` or in a sparse format as arrays of shape `(n_edges, n_edge_features)`
(so that you don't have to store all the zeros for missing edges). Most
components of Spektral will know how to deal with both situations
automatically.
Labels can refer to the entire graph (shape `(n_labels, )`) or to each
individual node (shape `(n_nodes, n_labels)`).
**Arguments**
- `x`: np.array, the node features (shape `(n_nodes, n_node_features)`);
- `a`: np.array or scipy.sparse matrix, the adjacency matrix (shape `(n_nodes, n_nodes)`);
- `e`: np.array, the edge features (shape `(n_nodes, n_nodes, n_edge_features)` or `(n_edges, n_edge_features)`);
- `y`: np.array, the node or graph labels (shape `(n_nodes, n_labels)` or `(n_labels, )`);
"""
def __init__(self, x=None, a=None, e=None, y=None, **kwargs):
if x is not None:
if not isinstance(x, np.ndarray):
raise ValueError(f"Unsupported type {type(x)} for x")
if len(x.shape) == 1:
x = x[:, None]
warnings.warn(f"x was automatically reshaped to {x.shape}")
if len(x.shape) != 2:
raise ValueError(
f"x must have shape (n_nodes, n_node_features), got "
f"rank {len(x.shape)}"
)
if a is not None:
if not (isinstance(a, np.ndarray) or sp.isspmatrix(a)):
raise ValueError(f"Unsupported type {type(a)} for a")
if len(a.shape) != 2:
raise ValueError(
f"a must have shape (n_nodes, n_nodes), got rank {len(a.shape)}"
)
if e is not None:
if not isinstance(e, np.ndarray):
raise ValueError(f"Unsupported type {type(e)} for e")
if len(e.shape) not in (2, 3):
raise ValueError(
f"e must have shape (n_edges, n_edge_features) or "
f"(n_nodes, n_nodes, n_edge_features), got rank {len(e.shape)}"
)
self.x = x
self.a = a
self.e = e
self.y = y
# Read extra kwargs
for k, v in kwargs.items():
self[k] = v
def numpy(self):
return tuple(ret for ret in [self.x, self.a, self.e, self.y] if ret is not None)
def get(self, *keys):
return tuple(self[key] for key in keys if self[key] is not None)
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key, None)
def __contains__(self, key):
return key in self.keys
def __repr__(self):
return "Graph(n_nodes={}, n_node_features={}, n_edge_features={}, n_labels={})".format(
self.n_nodes, self.n_node_features, self.n_edge_features, self.n_labels
)
@property
def n_nodes(self):
if self.x is not None:
return self.x.shape[-2]
elif self.a is not None:
return self.a.shape[-1]
else:
return None
@property
def n_edges(self):
if sp.issparse(self.a):
return self.a.nnz
elif isinstance(self.a, np.ndarray):
return np.count_nonzero(self.a)
else:
return None
@property
def n_node_features(self):
if self.x is not None:
return self.x.shape[-1]
else:
return None
@property
def n_edge_features(self):
if self.e is not None:
return self.e.shape[-1]
else:
return None
@property
def n_labels(self):
if self.y is not None:
shp = np.shape(self.y)
return 1 if len(shp) == 0 else shp[-1]
else:
return None
@property
def keys(self):
keys = [
key
for key in self.__dict__.keys()
if self[key] is not None and not key.startswith("__")
]
return keys
| python |
from __future__ import annotations
from typing import List, Tuple
def check_conflicts(path1: Path, path2: Path) -> bool:
"""
Checks if two paths have either an edge conflict or a vertex conflict
:param path1: The first path
:param path2: The second path
:return: True if paths are conflicting, False otherwise
"""
n = len(path1)
m = len(path2)
i = 1
while i < n and i < m:
# Vertex conflict
if path1[i] == path2[i]:
return True
# Edge conflict
if path1[i] == path2[i - 1] and path1[i - 1] == path2[i]:
return True
i += 1
while i < n:
if path1[i] == path2[-1]:
return True
i += 1
while i < m:
if path1[-1] == path2[i]:
return True
i += 1
return False
class Path:
__slots__ = 'path', 'identifier'
def __init__(self, path: List[Tuple[int, int]], identifier: int):
self.path = path
self.identifier: int = identifier
def __getitem__(self, item):
return self.path[item]
def __len__(self):
return len(self.path)
def __lt__(self, other: Path):
return self.identifier < other.identifier
def conflicts(self, other: Path):
"""
Checks if two paths have either an edge conflict or a vertex conflict
:param other: The other path to check conflicts with
:return: True if paths are conflicting, False otherwise
"""
n = len(self)
m = len(other)
i = 1
while i < n and i < m:
# Vertex conflict
if self[i] == other[i]:
return True
# Edge conflict
if self[i] == other[i - 1] and self[i - 1] == other[i]:
return True
i += 1
while i < n:
if self[i] == other[-1]:
return True
i += 1
while i < m:
if self[-1] == other[i]:
return True
i += 1
return False
def get_cost(self):
"""
Calculates the individual cost of a path
The cost of staying on the goal at the end of the path is subtracted.
:return: Cost
"""
cost = len(self)
last = self[-1]
i = 2
if i > len(self):
return cost
while self[-i] == last:
cost -= 1
i += 1
if i > len(self):
break
return cost
| python |
from collections import defaultdict
from itertools import cycle, count
# Python 3 got rid of itertools.izip because zip now does it (but not in Python 2)
try: from itertools import izip
except: izip = zip
def spiral_directions():
dirs = cycle([(1,0), (0,-1), (-1,0), (0,1)]) # R, U, L, D, ...
dists = (n >> 1 for n in count(2)) # 2, 2, 3, 3, 4, 4, 5, 5, ...
return izip(dists, dirs)
def distance_to_square(square):
square -= 1
x, y = 0, 0
for d in spiral_directions():
dist = min(d[0], square)
x += dist * d[1][0]
y += dist * d[1][1]
square -= dist
if square == 0:
return abs(x) + abs(y)
def first_square_over(threshold):
mem = defaultdict(int)
x, y, mem[0, 0] = 0, 0, 1
for d in spiral_directions():
for i in range(d[0]):
x += d[1][0]
y += d[1][1]
mem[x, y] = sum([mem[j, k] for j in range(x-1, x+2)
for k in range(y-1, y+2)])
if mem[x, y] > threshold:
return mem[x, y]
with open("day03.txt") as f:
data = int(f.readline())
print("2017 day 3 part 1: %d" % distance_to_square(data))
print("2017 day 3 part 2: %d" % first_square_over(data))
| python |
import requests
from .progressbar import SimpleProgressBar
def download(url, dst):
r = requests.get(
url,
stream=True,
)
bar = SimpleProgressBar(int(r.headers['Content-Length']))
with open(dst, 'wb') as f:
CHUNK_SIZE = 256 * 1024
for chunk in r.iter_content(chunk_size=CHUNK_SIZE):
if not chunk:
break
f.write(chunk)
bar.update_received(CHUNK_SIZE)
bar.done()
| python |
from barcode import EAN13
from barcode.writer import ImageWriter
from io import BytesIO
# print to a file-like object:
rv = BytesIO()
EAN13(str(100000902922), writer=ImageWriter()).write(rv)
# or sure, to an actual file:
with open('somefile.jpeg', 'wb') as f:
EAN13('100000011111', writer=ImageWriter()).write(f) | python |
# -*- coding: utf-8 -*-
# @Author: YangZhou
# @Date: 2017-06-03 20:02:55
# @Last Modified by: YangZhou
# @Last Modified time: 2017-06-03 20:07:13
from ase import io
atoms=io.read('POSCAR')
filter=atoms.positions[:,0]<atoms.positions[:,0].max()-5.17286
del atoms[filter]
atoms.cell[0,0]=5.17286
atoms.center(axis=0)
from aces.io.vasp import writevasp
writevasp(atoms,'POSCAR1') | python |
# Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base logic for hardware robots."""
import abc
import logging
import time
from typing import Iterable, Optional, Tuple
import numpy as np
from robel.components.robot.group_config import RobotGroupConfig
from robel.components.robot.robot import RobotComponent, RobotState
# Default tolerance for determining if the hardware has reached a state.
DEFAULT_ERROR_TOL = 1. * np.pi / 180
class HardwareRobotGroupConfig(RobotGroupConfig):
"""Stores group configuration for a HardwareRobotComponent."""
def __init__(self,
*args,
calib_scale: Optional[Iterable[float]] = None,
calib_offset: Optional[Iterable[float]] = None,
**kwargs):
"""Initializes a new configuration for a HardwareRobotComponent group.
Args:
calib_scale: A scaling factor that is multipled with state to
convert from component state space to hardware state space,
and divides control to convert from hardware control space to
component control space.
calib_offset: An offset that is added to state to convert from
component state space to hardware state space, and subtracted
from control to convert from hardware control space to
component control space.
"""
super().__init__(*args, **kwargs)
self.calib_scale = None
if calib_scale is not None:
self.calib_scale = np.array(calib_scale, dtype=np.float32)
self.calib_offset = None
if calib_offset is not None:
self.calib_offset = np.array(calib_offset, dtype=np.float32)
class HardwareRobotComponent(RobotComponent, metaclass=abc.ABCMeta):
"""Base component for hardware robots."""
def __init__(self, *args, **kwargs):
"""Initializes the component."""
super().__init__(*args, **kwargs)
self.reset_time()
@property
def is_hardware(self) -> bool:
"""Returns True if this is a hardware component."""
return True
@property
def time(self) -> float:
"""Returns the time (total sum of timesteps) since the last reset."""
return self._time
def reset_time(self):
"""Resets the timer for the component."""
self._last_reset_time = time.time()
self._time = 0
def _process_group(self, **config_kwargs) -> HardwareRobotGroupConfig:
"""Processes the configuration for a group."""
return HardwareRobotGroupConfig(self.sim_scene, **config_kwargs)
def _calibrate_state(self, state: RobotState,
group_config: HardwareRobotGroupConfig):
"""Converts the given state from hardware space to component space."""
# Calculate qpos' = qpos * scale + offset, and qvel' = qvel * scale.
if group_config.calib_scale is not None:
assert state.qpos.shape == group_config.calib_scale.shape
assert state.qvel.shape == group_config.calib_scale.shape
state.qpos *= group_config.calib_scale
state.qvel *= group_config.calib_scale
if group_config.calib_offset is not None:
assert state.qpos.shape == group_config.calib_offset.shape
# Only apply the offset to positions.
state.qpos += group_config.calib_offset
def _decalibrate_qpos(self, qpos: np.ndarray,
group_config: HardwareRobotGroupConfig) -> np.ndarray:
"""Converts the given position from component to hardware space."""
# Calculate qpos' = (qpos - offset) / scale.
if group_config.calib_offset is not None:
assert qpos.shape == group_config.calib_offset.shape
qpos = qpos - group_config.calib_offset
if group_config.calib_scale is not None:
assert qpos.shape == group_config.calib_scale.shape
qpos = qpos / group_config.calib_scale
return qpos
def _synchronize_timestep(self, minimum_sleep: float = 1e-4):
"""Waits for one timestep to elapse."""
# Block the thread such that we've waited at least `step_duration` time
# since the last call to `_synchronize_timestep`.
time_since_reset = time.time() - self._last_reset_time
elapsed_time = time_since_reset - self._time
remaining_step_time = self.sim_scene.step_duration - elapsed_time
if remaining_step_time > minimum_sleep:
time.sleep(remaining_step_time)
elif remaining_step_time < 0:
logging.warning('Exceeded timestep by %0.4fs', -remaining_step_time)
# Update the current time, relative to the last reset time.
self._time = time.time() - self._last_reset_time
def _wait_for_desired_states(
self,
desired_states: Iterable[Tuple[RobotGroupConfig, RobotState]],
error_tol: float = DEFAULT_ERROR_TOL,
timeout: float = 3.0,
poll_interval: float = 0.25,
initial_sleep: Optional[float] = 0.25,
last_diff_tol: Optional[float] = DEFAULT_ERROR_TOL,
last_diff_ticks: int = 2,
):
"""Polls the current state until it reaches the desired state.
Args:
desired_states: The desired states to wait for.
error_tol: The maximum position difference within which the desired
state is considered to have been reached.
timeout: The maximum amount of time to wait, in seconds.
poll_interval: The interval in seconds to poll the current state.
initial_sleep: The initial time to sleep before polling.
last_diff_tol: The maximum position difference between the current
state and the last state at which motion is considered to be
stopped, thus waiting will terminate early.
last_diff_ticks: The number of cycles where the last difference
tolerance check must pass for waiting to terminate early.
"""
# Define helper function to compare two state sets.
def all_states_close(states_a, states_b, tol):
all_close = True
for state_a, state_b in zip(states_a, states_b):
if not np.allclose(state_a.qpos, state_b.qpos, atol=tol):
all_close = False
break
return all_close
# Poll for the hardware move command to complete.
configs, desired_states = zip(*desired_states)
previous_states = None
ticks_until_termination = last_diff_ticks
start_time = time.time()
if initial_sleep is not None and initial_sleep > 0:
time.sleep(initial_sleep)
while True:
cur_states = self._get_group_states(configs)
# Terminate if the current states have reached the desired states.
if all_states_close(cur_states, desired_states, tol=error_tol):
return
# Terminate if the current state and previous state are the same.
# i.e. the robot is unable to move further.
if previous_states is not None and all_states_close(
cur_states, previous_states, tol=last_diff_tol):
if not ticks_until_termination:
logging.warning(
'Robot stopped motion; terminating wait early.')
return
ticks_until_termination -= 1
else:
ticks_until_termination = last_diff_ticks
if time.time() - start_time > timeout:
logging.warning('Reset timed out after %1.1fs', timeout)
return
previous_states = cur_states
time.sleep(poll_interval)
def _copy_to_simulation_state(
self, group_states: Iterable[Tuple[RobotGroupConfig, RobotState]]):
"""Copies the given states to the simulation."""
for config, state in group_states:
# Skip if this is a hardware-only group.
if config.qpos_indices is None:
continue
if state.qpos is not None:
self.sim_scene.data.qpos[config.qpos_indices] = state.qpos
if state.qvel is not None:
self.sim_scene.data.qvel[config.qvel_indices] = state.qvel
# Recalculate forward dynamics.
self.sim_scene.sim.forward()
self.sim_scene.renderer.refresh_window()
| python |
a = {
'x' : 1,
'y' : 2,
'z' : 3
}
b = {
'w' : 10,
'x' : 11,
'y' : 2
}
#find keys in common
print( a.keys() & b.keys() )
#find keys in a not in b, no + operator
print(a.keys() - b.keys() )
#find (key,value) pairs in common !!not values
print(a.items() & b.items() )
c = {key:a[key] for key in a.keys() & b.keys() }
print(c) | python |
"""
Простое приложение, которое показывает импорт функций.
"""
from library.services import delay_function
if __name__ == "__main__":
delay_function(10)
| python |
tup1 = ("aws",'azur',1988,2050,50,57)
tup2 = (1,2,3,4,5,6,7)
print(tuple(enumerate(tup1)),type(tup1),id(tup1),len(tup1))
print(tuple(enumerate(tup2)),type(tup2),id(tup2),len(tup2))
print(tup1[3:])
print(tup1[-3])
print(tup2[:4])
print(tup2[0:])
#del(tup1[0]) #tuple object doesnot support item deletion
tup = (1,2,[1,2])
print(tuple(enumerate(tup)),type(tup))
| python |
from .database import *
from acq4.util import DataManager
from acq4.pyqtgraph.widgets.ProgressDialog import ProgressDialog
import acq4.util.debug as debug
from acq4.Manager import logExc, logMsg
class AnalysisDatabase(SqliteDatabase):
"""Defines the structure for DBs used for analysis. Essential features are:
- a table of control parameters "DbParameters"
these are just key: value pairs used by the database to store configuration variables
- a table defining relationships between tables "TableRelationships"
lets you declare "table1.column1 refers to table2.rowid"
- a table assgning ownership of data tables to analysis modules
this ensures that analysis modules do not accidentally access tables belonging to another module.
- Directories created by data manager can be added automatically to DB
one table for each type of directory (Day, Cell, Protocol, etc)
- Automatic creation of views that join together directory hierarchies
- Automatic storage/retrieval of directory and file handles
"""
MetaTypes = {
'directory': 'int', # reference to a record in a directory table
'file': 'text', #
}
Version = '1'
def __init__(self, dbFile, dataModel, baseDir=None):
create = False
self.tableConfigCache = None
self.columnConfigCache = advancedTypes.CaselessDict()
self.setDataModel(dataModel)
self._baseDir = None
if not os.path.exists(dbFile):
create = True
if baseDir is None:
raise Exception("Must specify a base directory when creating a database.")
#self.db = SqliteDatabase(dbFile)
if not create:
## load DB and check version before initializing
db = SqliteDatabase(dbFile)
if not db.hasTable('DbParameters'):
raise Exception("Invalid analysis database -- no DbParameters table.")
recs = db.select('DbParameters', ['Value'], where={'Param': 'DB Version'})
db.close()
if len(recs) == 0:
version = None
else:
version = recs[0]['Value']
if version != AnalysisDatabase.Version:
self._convertDB(dbFile, version)
SqliteDatabase.__init__(self, dbFile)
self.file = dbFile
if create:
self.initializeDb()
self.setBaseDir(baseDir)
self.setCtrlParam('DB Version', AnalysisDatabase.Version)
self.setCtrlParam('Description', '')
def setDataModel(self, dm):
self._dataModel = dm
def dataModel(self):
return self._dataModel
def _convertDB(self, dbFile, version):
## Convert datbase dbFile from version to the latest version
newFileName = dbFile+"version_upgrade"
if os.path.exists(newFileName):
raise Exception("A .version_upgrade for %s already exists. Please delete or rename it" %dbFile)
if version is None:
prog = ProgressDialog("Converting database...")
from AnalysisDatabase_ver0 import AnalysisDatabase as AnalysisDatabaseOld
oldDb = AnalysisDatabaseOld(dbFile)
newDb = AnalysisDatabase(newFileName, self.dataModel(), oldDb.baseDir())
dirTypes = ['Day', 'Experiment', 'Slice', 'Cell', 'Site', 'Protocol', 'ProtocolSequence']
print oldDb.listTables()
for table in dirTypes:
if not oldDb.hasTable(table):
continue
for rec in oldDb.select(table):
dh = oldDb.baseDir()[rec['Dir']]
try:
newDb.addDir(dh)
except:
print "Can't add directory %s from old DB:" % dh.name()
debug.printExc()
total = len(oldDb.select('Photostim_events')) + len(oldDb.select('Photostim_sites'))
n=0
for table in ['Photostim_events', 'Photostim_sites', 'Photostim_events2', 'Photostim_sites2']:
if prog.wasCanceled():
break
if not oldDb.hasTable(table):
continue
schema = oldDb.tableSchema(table)
## SourceDir -> ProtocolSequenceDir type='directory:ProtocolSequence'
del schema['SourceDir']
schema['ProtocolSequenceDir'] = 'directory:ProtocolSequence'
## add column ProtocolDir
schema['ProtocolDir'] = 'directory:Protocol'
## SourceFile -> ? type='file'
if 'SourceFile' in schema:
schema['SourceFile'] = 'file'
owner = oldDb.tableOwner(table)
newDb.createTable(table, schema, owner=owner)
records = oldDb.select(table)
for r in records:
if prog.wasCanceled():
break
## SourceFile -> convert to filehandle
r['SourceFile']= oldDb.getDir('ProtocolSequence', r['SourceDir'])[r['SourceFile']]
del r['SourceDir']
## ProtocolDir, ProtocolSequenceDir -> dirHandles
#r['ProtocolSequenceDir'] = oldDb.getDir('ProtocolSequence', r['SourceDir'])
r['ProtocolDir'] = r['SourceFile'].parent()
r['ProtocolSequenceDir'] = self.dataModel().getParent(r['ProtocolDir'], 'ProtocolSequence')
n+=1
prog.setValue(n/total)
newDb.insert(table, records)
oldDb.close()
newDb.close()
if not prog.wasCanceled():
os.rename(dbFile, dbFile+'version_upgrade_backup')
os.rename(newFileName, dbFile)
else:
raise Exception("Don't know how to convert from version %s" % str(version))
#params = self.select('DbParameters')
#self.removeTable('DbParameters')
#self.initializeDb()
#for rec in params:
#self.setCtrlParam(rec['Param'], rec['Value'])
### update all dir tables
#for dirType in dirTypes:
#if not self.hasTable(dirType):
#continue
#newName = self.dirTableName(dirType)
#self.insert('TableConfig', Table=newName, DirType=dirType)
#ts = self.tableSchema(dirType)
#link = self.select('TableRelationships', ['Column', 'Table2'], sql='where Table1="%s"' % dirType)[0]
#linkedType = link['Table2']
#ts[linkedType] = ('directory:%s' % linkedType)
#del ts[link['Column']]
#self.createTable(newName, ts.items())
#records = self.select(dirType)
#for rec in records:
#rec[linkedType] = rec[link['Column']]
### TODO: need to convert integers to handles here..
#del rec[link['Column']]
#self.insert(newName, records)
#self.removeTable(dirType)
##for link in self.select('TableRelationships'):
##self.linkTables(link['Table1'], link['Column'], link['Table2'])
#self.removeTable('TableRelationships')
def initializeDb(self):
SqliteDatabase.createTable(self, 'DbParameters', [('Param', 'text', 'unique'), ('Value', 'text')])
## Table1.Column refers to Table2.ROWID
## obsolete--use TableConfig now.
#self.createTable("TableRelationships", ['"Table1" text', '"Column" text', '"Table2" text'])
## Stores meta information about tables:
## Owner - prevents table name collisions, allows users of the DB to be
## (nearly) assured exclusive access to a table. (I say 'nearly'
## because this is a voluntary restriction--each DB user must check
## for table ownership before accessing the table.)
## DirType - If this is a directory table, then the directory type is stored
## here. Otherwise, the field is blank.
SqliteDatabase.createTable(self, 'TableConfig', [('Table', 'text', 'unique on conflict abort'), ('Owner', 'text'), ('DirType', 'text')])
self('create index "TableConfig_byOwner" on "TableConfig" ("Owner")')
self('create index "TableConfig_byTable" on "TableConfig" ("Table")')
## stores column arguments used when creating tables
## This is similar to the information returned by tableSchema(), but
## contains extra information and data types not supported by SqliteDatabase
fields = ['Table', 'Column', 'Type', 'Link', 'Constraints']
SqliteDatabase.createTable(self, 'ColumnConfig', [(field, 'text') for field in fields])
self('create index "ColumnConfig_byTable" on "ColumnConfig" ("Table")')
self('create index "ColumnConfig_byTableColumn" on "ColumnConfig" ("Table", "Column")')
def baseDir(self):
"""Return a dirHandle for the base directory used for all file names in the database."""
if self._baseDir is None:
dirName = self.ctrlParam('BaseDirectory')
self._baseDir = DataManager.getHandle(dirName)
return self._baseDir
def setBaseDir(self, baseDir):
"""Sets the base dir which prefixes all file names in the database. Must be a DirHandle."""
self.setCtrlParam('BaseDirectory', baseDir.name())
self._baseDir = baseDir
def ctrlParam(self, param):
res = SqliteDatabase.select(self, 'DbParameters', ['Value'], sql="where Param='%s'"%param)
if len(res) == 0:
return None
else:
return res[0]['Value']
def setCtrlParam(self, param, value):
self.replace('DbParameters', {'Param': param, 'Value': value})
def createTable(self, table, columns, sql="", owner=None, dirType=None):
"""
Extends SqliteDatabase.createTable to allow more descriptve column specifications.
- Columns are specified as either a tuple (name, type, constraints, link)
or a dict {'name': name, ...}
- The added 'link' column parameter should be the name of a table, indicating
that this column refers to the rowids of the linked table.
- Two new column type specifications:
directory:DirType - the column will be an integer referencing a row from
the DirType (Protocol, Cell, etc) directory table.
Directory handles stored in this column will be
automatically converted to/from their row ID.
This type implies link=DirTypeTable
file - the column will be a text file name relative to the DB base directory.
File/DirHandles will be automatically converted to/from their
text value.
example:
columnConfig = [
('Column1', 'directory:Protocol'),
('Column2', 'file', 'unique'),
dict(Name='Column3', Type='int', Link='LinkedTable')
]
db.createTable("TableName", columnConfig)
"""
## translate directory / file columns into int / text
## build records for insertion to ColumnConfig
columns = parseColumnDefs(columns, keyOrder=['Type', 'Constraints', 'Link'])
records = []
colTuples = []
for name, col in columns.iteritems():
rec = {'Column': name, 'Table': table, 'Link': None, 'Constraints': None}
rec.update(col)
typ = rec['Type']
typ, link = self.interpretColumnType(typ)
if link is not None:
rec['Link'] = link
tup = (rec['Column'], typ)
if rec['Constraints'] is not None:
tup = tup + (rec['Constraints'],)
colTuples.append(tup)
records.append(rec)
ret = SqliteDatabase.createTable(self, table, colTuples, sql)
self.insert('ColumnConfig', records)
tableRec = dict(Table=table, Owner=owner, DirType=dirType)
self.insert('TableConfig', tableRec)
self.tableConfigCache = None
return ret
def interpretColumnType(self, typ):
## returns: (Sqlite type, Link)
link = None
if typ.startswith('directory'):
link = self.dirTableName(typ.lstrip('directory:'))
typ = 'int'
elif typ == 'file':
typ = 'text'
return typ, link
def addColumn(self, table, colName, colType, constraints=None):
"""
Add a new column to a table.
"""
typ, link = self.interpretColumnType(colType)
SqliteDatabase.addColumn(self, table, colName, typ, constraints)
self.insert('ColumnConfig', {'Column': colName, 'Table': table, 'Type': colType, 'Link': link})
if table in self.columnConfigCache:
del self.columnConfigCache[table]
def checkTable(self, table, owner, columns, create=False, ignoreUnknownColumns=False, addUnknownColumns=False, indexes=None):
"""
Checks to be sure that a table has been created with the correct fields and ownership.
This should generally be run before attempting to access a table.
If the table does not exist and create==True, then the table will be created with the
given columns and owner.
If ignoreUnknownColumns==True, then any columns in the data
that are not also in the table will be ignored. (Note: in this case, an insert may fail
unless ignoreUnknownColumns=True is also specified when calling insert())
If addUnknownColumns==True, then any columns in the data
that are not also in the table will be created in the table.
If indexes is supplied and create==True, then the specified indexes will be created
if they do not already exist by calling db.createIndex(table, index) once for each item in indexes.
"""
columns = parseColumnDefs(columns, keyOrder=['Type', 'Constraints', 'Link'])
## Make sure target table exists and has correct columns, links to input file
with self.transaction():
if not self.hasTable(table):
if create:
## create table
self.createTable(table, columns, owner=owner)
else:
raise Exception("Table %s does not exist." % table)
else:
## check table for ownership
if self.tableOwner(table) != owner:
raise Exception("Table %s is not owned by %s." % (table, owner))
## check table for correct columns
ts = self.tableSchema(table)
config = self.getColumnConfig(table)
for colName, col in columns.iteritems():
colType = col['Type']
if colName not in ts: ## <-- this is a case-insensitive operation
if ignoreUnknownColumns:
continue
elif addUnknownColumns:
self.addColumn(table, colName, colType)
ts = self.tableSchema(table) ## re-read schema and column config
config = self.getColumnConfig(table)
else:
raise Exception("Table has different data structure: Missing column %s" % colName)
specType = ts[colName]
if specType.lower() != colType.lower(): ## type names are case-insensitive too
## requested column type does not match schema; check for directory / file types
if (colType == 'file' or colType.startswith('directory')):
if (colName in config and config[colName].get('Type',None) == colType):
continue
raise Exception("Table has different data structure: Column '%s' type is %s, should be %s" % (colName, specType, colType))
if create is True and indexes is not None:
for index in indexes:
self.createIndex(table, index, ifNotExist=True)
return True
def createDirTable(self, dirHandle):
"""Creates a new table for storing directories similar to dirHandle"""
with self.transaction():
## Ask manager what columns we think should go with this directory
columns = acq4.Manager.getManager().suggestedDirFields(dirHandle).keys()
## Add in any other columns present
#for k in dirHandle.info(): ## Let's leave it to the user to add these if they want
#if k not in columns:
#columns.append(k)
columns = [(k, 'text') for k in columns]
columns = [('Dir', 'file')] + columns
tableName = self.dirTableName(dirHandle)
if self.hasTable(tableName):
raise Exception('Can not add directory table "%s"; table already exists.' % tableName)
## Link this table to its parent
parent = dirHandle.parent()
if parent.isManaged() and parent is not self.baseDir():
pType = self.dataModel().dirType(parent)
colName = pType + "Dir"
columns = [(colName, 'directory:'+pType)] + columns
#self.linkTables(tableName, colName, pName)
dirType = self.dataModel().dirType(dirHandle)
self.createTable(tableName, columns, dirType=dirType)
return tableName
def addDir(self, handle):
"""Create a record based on a DirHandle and its meta-info."""
info = handle.info().deepcopy()
for k in info: ## replace tuple keys with strings
if isinstance(k, tuple):
n = "_".join(k)
info[n] = info[k]
del info[k]
with self.transaction():
table = self.dirTableName(handle)
if not self.hasTable(table):
self.createDirTable(handle)
## make sure dir is not already in DB.
## if it is, just return the row ID
rid = self.getDirRowID(handle)
if rid is not None:
return table, rid
## find all directory columns, make sure linked directories are present in DB
conf = self.getColumnConfig(table)
for colName, col in conf.iteritems():
if col['Type'].startswith('directory'):
#pTable = col['Link']
pType = col['Type'].lstrip('directory:')
parent = self.dataModel().getParent(handle, pType)
if parent is not None:
self.addDir(parent)
info[colName] = parent
else:
info[colName] = None
info['Dir'] = handle
self.insert(table, info, ignoreExtraColumns=True)
return table, self.lastInsertRow()
def createView(self, viewName, tables):
"""Create a view that joins the tables listed."""
# db('create view "sites" as select * from photostim_sites inner join DirTable_Protocol on photostim_sites.ProtocolDir=DirTable_Protocol.rowid inner join DirTable_Cell on DirTable_Protocol.CellDir=DirTable_Cell.rowid')
with self.transaction():
sel = self.makeJoinStatement(tables)
cmd = 'create view "%s" as select * from %s' % (viewName, sel)
#for i in range(1,len(tables)): ## figure out how to join each table one at a time
#nextTable = tables[i]
#cols = None
#for joinTable in tables[:i]:
#cols = self.findJoinColumns(nextTable, joinTable)
#if cols is not None:
#break
#if cols is None:
#raise Exception("Could not find criteria to join table '%s' to any of '%s'" % (joinTable, str(tables[:i])) )
#cmd += ' inner join "%s" on "%s"."%s"="%s"."%s"' % (nextTable, nextTable, cols[0], joinTable, cols[1])
self(cmd)
## Create column config records for this view
colNames = self.tableSchema(viewName).keys()
colDesc = []
colIndex = 0
for table in tables:
cols = self.getColumnConfig(table)
for col, config in cols.iteritems():
config = config.copy()
config['Column'] = colNames[colIndex]
config['Table'] = viewName
colDesc.append(config)
colIndex += 1
self.insert('ColumnConfig', colDesc)
def makeJoinStatement(self, tables):
### construct an expresion that joins multiple tables automatically
cmd = '"%s"' % tables[0]
for i in range(1,len(tables)): ## figure out how to join each table one at a time
nextTable = tables[i]
cols = None
for joinTable in tables[:i]:
cols = self.findJoinColumns(nextTable, joinTable)
if cols is not None:
break
if cols is None:
raise Exception("Could not find criteria to join table '%s' to any of '%s'" % (joinTable, str(tables[:i])) )
cmd += ' inner join "%s" on "%s"."%s"="%s"."%s"' % (nextTable, nextTable, cols[0], joinTable, cols[1])
return cmd
def findJoinColumns(self, t1, t2):
"""Return the column names that can be used to join two tables.
If no relationships are found, return None.
"""
def strlower(x): # convert strings to lower, everything else stays the same
if isinstance(x, basestring):
return x.lower()
return x
links1 = [(strlower(x['Column']), strlower(x['Link'])) for x in self.getColumnConfig(t1).values()]
links2 = [(strlower(x['Column']), strlower(x['Link'])) for x in self.getColumnConfig(t2).values()]
for col, link in links1: ## t1 explicity links to t2.rowid
if link == t2.lower():
return col, 'rowid'
for col, link in links2: ## t2 explicitly links to t1.rowid
if link == t1.lower():
return 'rowid', col
for col1, link1 in links1: ## t1 and t2 both link to the same table.rowid
for col2, link2 in links2:
if link1 is not None and link1 == link2:
return col1, col2
return None ## no links found
#def linkTables(self, table1, col, table2):
#"""Declare a key relationship between two tables. Values in table1.column are ROWIDs from table 2"""
##self.insert('TableRelationships', Table1=table1, Column=col, Table2=table2)
#self.insert('TableConfig', Table=table1, Column=col, Key='link', Value=table2)
#if table1 in self.columnConfigCache:
#del self.columnConfigCache[table1]
#def listTableLinks(self, table):
#"""
#List all declared relationships for table.
#returns {columnName: linkedTable, ...}
#"""
#links = self.select('TableConfig', ['Column', 'Value'], sql="where \"Table\"='%s' and Key='link'" % table)
#return dict([(link['Column'], link['Value']) for link in links])
def getColumnConfig(self, table):
"""Return the column config records for table.
Records are returned as {columnName: {'Type': t, 'Constraints': c, 'Link': l), ...}
(Note this is not the same as tableSchema)
"""
if table not in self.columnConfigCache:
if not self.hasTable('ColumnConfig'):
return {}
recs = SqliteDatabase.select(self, 'ColumnConfig', ['Column', 'Type', 'Constraints', 'Link'], sql="where lower(\"Table\")=lower('%s') order by rowid" % table)
if len(recs) == 0:
return {}
self.columnConfigCache[table] = collections.OrderedDict([(r['Column'], r) for r in recs])
return self.columnConfigCache[table]
def getTableConfig(self, table):
if self.tableConfigCache is None:
recs = SqliteDatabase.select(self, 'TableConfig')
self.tableConfigCache = advancedTypes.CaselessDict()
for rec in recs:
self.tableConfigCache[rec['Table']] = rec
#recs = self.select('TableConfig', sql="where \"Table\"='%s'" % table)
if table not in self.tableConfigCache:
raise Exception('No config record for table "%s"' % table)
return self.tableConfigCache[table]
def getDirRowID(self, dirHandle):
table = self.dirTableName(dirHandle)
if not self.hasTable(table):
return None
name = dirHandle.name(relativeTo=self.baseDir())
name1 = name.replace('/', '\\')
name2 = name.replace('\\', '/')
rec = self.select(table, ['rowid'], sql="where Dir='%s' or Dir='%s'" % (name1, name2))
if len(rec) < 1:
return None
#print rec[0]
return rec[0]['rowid']
def getDir(self, table, rowid):
## Return a DirHandle given table, rowid
res = self.select(table, ['Dir'], sql='where rowid=%d'%rowid)
if len(res) < 1:
raise Exception('rowid %d does not exist in %s' % (rowid, table))
#logMsg('rowid %d does not exist in %s' % (rowid, table), msgType='error') ### This needs to be caught further up in Photostim or somewhere, not here -- really this shouldn't be caught at all since it means something is wrong with the db
#return None
#print res
#return self.baseDir()[res[0]['Dir']]
return res[0]['Dir']
def dirTableName(self, dh):
"""Return the name of the directory table that should hold dh.
dh may be either a directory handle OR the string result of self.dataModel().dirType(dh)
"""
if isinstance(dh, DataManager.DirHandle):
typeName = self.dataModel().dirType(dh)
elif isinstance(dh, basestring):
typeName = dh
else:
raise TypeError(type(dh))
return "DirTable_" + typeName
#def dirTypeName(self, dh):
#info = dh.info()
#type = info.get('dirType', None)
#if type is None:
#if 'protocol' in info:
#if 'sequenceParams' in info:
#type = 'ProtocolSequence'
#else:
#type = 'Protocol' ## an individual protocol run, NOT a single run from within a sequence
#else:
#try:
#if self.dirTypeName(dh.parent()) == 'ProtocolSequence':
#type = 'Protocol'
#else:
#raise Exception()
#except:
#raise Exception("Can't determine type for dir %s" % dh.name())
#return type
def listTablesOwned(self, owner):
res = self.select('TableConfig', ['Table'], sql="where Owner='%s'" % owner)
return [x['Table'] for x in res]
## deprecated--use createTable() with owner specified instead.
#def takeOwnership(self, table, owner):
#self.insert("DataTableOwners", {'Table': table, "Owner": owner})
def tableOwner(self, table):
#res = self.select("DataTableOwners", ["Owner"], sql='where "Table"=\'%s\'' % table)
res = self.select('TableConfig', ['Owner'], sql="where \"Table\"='%s'" % table)
if len(res) == 0:
return None
return res[0]['Owner']
def describeData(self, data):
"""Given a dict or record array, return a table description suitable for creating / checking tables."""
columns = collections.OrderedDict()
if isinstance(data, list): ## list of dicts is ok
data = data[0]
if isinstance(data, np.ndarray):
for i in xrange(len(data.dtype)):
name = data.dtype.names[i]
typ = data.dtype[i].kind
if typ == 'i':
typ = 'int'
elif typ == 'f':
typ = 'real'
elif typ == 'S':
typ = 'text'
else:
if typ == 'O': ## check to see if this is a pointer to a string
allStr = 0
allHandle = 0
for i in xrange(len(data)):
val = data[i][name]
if val is None or isinstance(val, basestring):
allStr += 1
elif val is None or isinstance(val, DataManager.FileHandle):
allHandle += 1
if allStr == len(data):
typ = 'text'
elif allHandle == len(data):
typ = 'file'
else:
typ = 'blob'
columns[name] = typ
elif isinstance(data, dict):
for name, v in data.iteritems():
if functions.isFloat(v):
typ = 'real'
elif functions.isInt(v):
typ = 'int'
elif isinstance(v, basestring):
typ = 'text'
elif isinstance(v, DataManager.FileHandle):
typ = 'file'
else:
typ = 'blob'
columns[name] = typ
else:
raise Exception("Can not describe data of type '%s'" % type(data))
return columns
def select(self, table, columns='*', where=None, sql='', toDict=True, toArray=False, distinct=False, limit=None, offset=None):
"""Extends select to convert directory/file columns back into Dir/FileHandles. If the file doesn't exist, you will still get a handle, but it may not be the correct type."""
prof = debug.Profiler("AnalysisDatabase.select()", disabled=True)
data = SqliteDatabase.select(self, table, columns, where=where, sql=sql, distinct=distinct, limit=limit, offset=offset, toDict=True, toArray=False)
data = TableData(data)
prof.mark("got data from SQliteDatabase")
config = self.getColumnConfig(table)
## convert file/dir handles
for column, conf in config.iteritems():
if column not in data.columnNames():
continue
if conf.get('Type', '').startswith('directory'):
rids = set([d[column] for d in data])
linkTable = conf['Link']
handles = dict([(rid, self.getDir(linkTable, rid)) for rid in rids if rid is not None])
handles[None] = None
data[column] = map(handles.get, data[column])
elif conf.get('Type', None) == 'file':
def getHandle(name):
if name is None:
return None
else:
if os.sep == '/':
sep = '\\'
else:
sep = '/'
name = name.replace(sep, os.sep) ## make sure file handles have an operating-system-appropriate separator (/ for Unix, \ for Windows)
return self.baseDir()[name]
data[column] = map(getHandle, data[column])
prof.mark("converted file/dir handles")
ret = data.originalData()
if toArray:
ret = data.toArray()
prof.mark("converted data to array")
prof.finish()
return ret
def _prepareData(self, table, data, ignoreUnknownColumns=False, batch=False):
"""
Extends SqliteDatabase._prepareData():
- converts DirHandles to the correct rowid for any linked columns
(and automatically adds directories to their tables if needed)
- converts filehandles to a string file name relative to the DB base dir.
"""
#if batch is False:
#raise Exception("AnalysisDatabase only implements batch mode.")
#links = self.listTableLinks(table)
config = self.getColumnConfig(table)
data = TableData(data).copy() ## have to copy here since we might be changing some values
dataCols = set(data.columnNames())
for colName, colConf in config.iteritems():
if colName not in dataCols:
continue
if colConf.get('Type', '').startswith('directory'):
## Make sure all directories are present in the DB
handles = data[colName]
linkTable = colConf['Link']
if linkTable is None:
raise Exception('Column "%s" is type "%s" but is not linked to any table.' % (colName, colConf['Type']))
rowids = {None: None}
for dh in set(handles):
if dh is None:
continue
dirTable, rid = self.addDir(dh)
if dirTable != linkTable:
linkType = self.getTableConfig(linkTable)['DirType']
dirType = self.getTableConfig(dirTable)['DirType']
raise Exception("Trying to use directory '%s' (type='%s') for column %s.%s, but this column is for directories of type '%s'." % (dh.name(), dirType, table, colName, linkType))
rowids[dh] = rid
## convert dirhandles to rowids
data[colName] = map(rowids.get, handles)
elif colConf.get('Type', None) == 'file':
## convert filehandles to strings
files = []
for f in data[colName]:
if f is None:
files.append(None)
else:
try:
files.append(f.name(relativeTo=self.baseDir()))
except:
print "f:", f
raise
data[colName] = files
newData = SqliteDatabase._prepareData(self, table, data, ignoreUnknownColumns, batch)
return newData
| python |
# -*- coding: utf-8 -*-
"""
Created Aug 11, 2020
author: Mark Panas
"""
def OpenAirBeam2(filename):
import numpy as np
import pandas as pd
with open(filename) as fp:
out = fp.readlines()
#print(out[0].rstrip().split(','))
if out[0].rstrip().split(',')[0] != "":
#print("Data format = 1")
bad_rows = []
element_names = []
for i in range(len(out)):
try:
float(out[i].rstrip().split(',')[3])
except(ValueError):
#print("Line %i:" % (i),out[i].rstrip().split(','))
if out[i].rstrip().split(',')[0] == "sensor:model":
bad_rows.append(i)
if out[i].rstrip().split(',')[0].split('-')[0] == 'AirBeam2':
element_names.append(out[i].rstrip().split(',')[0].split('-')[1])
#print(element_names)
d_pm = {}
col_names = out[2].rstrip().split(',')
for i in range(len(bad_rows)):
if i == 0:
skip_rows_start = np.asarray([bad_rows[i],bad_rows[i]+1, bad_rows[i]+2])
skip_rows_rest = np.arange(bad_rows[i+1],len(out))
skip_rows_all = np.concatenate((skip_rows_start, skip_rows_rest))
d_pm[element_names[i]] = pd.read_csv(filename, header=None, names=col_names, skiprows=skip_rows_all)
elif i != len(bad_rows)-1:
skip_rows_start = np.arange(0,bad_rows[i]+1)
skip_rows_mid = np.asarray([bad_rows[i],bad_rows[i]+1, bad_rows[i]+2])
skip_rows_rest = np.arange(bad_rows[i+1],len(out))
skip_rows_all = np.concatenate((skip_rows_start, skip_rows_mid, skip_rows_rest))
d_pm[element_names[i]] = pd.read_csv(filename, header=None, names=col_names, skiprows=skip_rows_all)
else:
d_pm[element_names[i]] = pd.read_csv(filename, header=None, names=col_names, skiprows=np.arange(0,bad_rows[i]+3))
data_format = 1
col_names = element_names
else:
col_names = ['F', 'PM1', 'PM10', 'PM2.5', 'RH']
all_col_names = ['Timestamp', 'Latitude', 'Longitude', 'F', 'PM1', 'PM10', 'PM2.5', 'RH']
d_pm = pd.read_csv(filename, names=all_col_names, skiprows=9, usecols=range(2,10))
data_format = 2
# Arrays of different values may be different lengths
# Find the smallest length
column_lengths = []
for i in range(len(col_names)):
if data_format == 1: column_lengths.append(d_pm[col_names[i]]["Value"].shape)
if data_format == 2: column_lengths.append(d_pm[col_names[i]].dropna().shape)
min_length = min(column_lengths)[0]
# Consolidate the lat long data into one average array
lats = np.empty((min_length,5))
longs = np.empty((min_length,5))
for i in range(len(col_names)):
if data_format == 1:
lats[:,i] = d_pm[col_names[i]]['geo:lat'][0:min_length]
longs[:,i] = d_pm[col_names[i]]['geo:long'][0:min_length]
if data_format == 2:
lats[:,i] = d_pm['Latitude'][d_pm[col_names[i]].dropna()[0:min_length].index]
longs[:,i] = d_pm['Longitude'][d_pm[col_names[i]].dropna()[0:min_length].index]
lats = np.mean(lats, axis=1)
longs = np.mean(longs, axis=1)
# Generate arrays for absolute time and relative time
if data_format == 1:
d_pm['datetime'] = pd.DataFrame()
for i in range(len(col_names)):
d_pm['datetime'][col_names[i]] = pd.to_datetime(d_pm[col_names[i]]['Timestamp'],format="%Y-%m-%dT%H:%M:%S.%f-0400")
if i == 0:
min_time = np.min(d_pm['datetime'][col_names[i]])
max_time = np.min(d_pm['datetime'][col_names[i]])
else:
if d_pm['datetime'][col_names[i]].min() < min_time:
min_time = np.min(d_pm['datetime'][col_names[i]])
if d_pm['datetime'][col_names[i]].max() > max_time:
max_time = np.max(d_pm['datetime'][col_names[i]])
if data_format == 2:
d_pm['datetime'] = pd.to_datetime(d_pm['Timestamp'],format="%Y-%m-%dT%H:%M:%S.%f")
min_time = np.min(d_pm['datetime'])
max_time = np.max(d_pm['datetime'])
datetimes = np.asarray(pd.date_range(min_time, max_time, min_length).to_series(), dtype=np.datetime64)
t_end = float((max_time - min_time) // pd.Timedelta('1ms'))/1000
rel_time = np.linspace(0,t_end, min_length)
# Copy the measurement values into numpy arrays
if data_format == 1:
temp = np.asarray(d_pm["F"]["Value"][:min_length])
pm1 = np.asarray(d_pm["PM1"]["Value"][:min_length])
pm10 = np.asarray(d_pm["PM10"]["Value"][:min_length])
pm2 = np.asarray(d_pm["PM2.5"]["Value"][:min_length])
rh = np.asarray(d_pm["RH"]["Value"][:min_length])
if data_format == 2:
temp = np.asarray(d_pm["F"].dropna()[:min_length])
pm1 = np.asarray(d_pm["PM1"].dropna()[:min_length])
pm10 = np.asarray(d_pm["PM10"].dropna()[:min_length])
pm2 = np.asarray(d_pm["PM2.5"].dropna()[:min_length])
rh = np.asarray(d_pm["RH"].dropna()[:min_length])
return datetimes, rel_time, temp, pm1, pm10, pm2, rh, lats, longs
def OpenAeroqual(filename):
import pandas as pd
import numpy as np
df = pd.read_csv(filename, header=0, skipinitialspace=True)
df['datetime'] = pd.to_datetime(df['Date Time'],format="%d %b %Y %H:%M")
td = (df['datetime'] - df['datetime'][0])// pd.Timedelta('1ms')/1000
abs_time = np.asarray(df['datetime'], dtype=np.datetime64)
rel_time = np.asarray(td)
if any(df.columns == 'CO2(ppm)'):
vmr = np.asarray(df['CO2(ppm)'])
else:
vmr = np.asarray(df['O3(ppm)'])
return abs_time, rel_time, vmr
def PointLabels(x, y, n, plot_index=False):
import matplotlib.pyplot as plt
import numpy as np
xy_locs = list(zip(x[::n], y[::n]))
if plot_index == True:
x = np.arange(0, x.shape[0])
xy_labels = list(zip(x[::n], y[::n]))
else:
xy_labels = xy_locs
for i in range(len(xy_locs)):
plt.annotate('(%s, %s)' % xy_labels[i], xy=xy_locs[i], textcoords='data')
def factorization(n):
from math import gcd
factors = []
def get_factor(n):
x_fixed = 2
cycle_size = 2
x = 2
factor = 1
while factor == 1:
for count in range(cycle_size):
if factor > 1: break
x = (x * x + 1) % n
factor = gcd(x - x_fixed, n)
cycle_size *= 2
x_fixed = x
return factor
while n > 1:
next = get_factor(n)
factors.append(next)
n //= next
return factors
def SaveAirbeam2(filename, pm_datetimes, pm_rel_time, pm1, pm2, pm10, pm_temp, pm_rh):
import pandas as pd
d = {"datetimes":pm_datetimes,"rel_time":pm_rel_time, "pm1":pm1, "pm2.5":pm2, "pm10":pm10, "pm_temp":pm_temp, "pm_rh":pm_rh}
pd.DataFrame(d).to_csv(filename)
def SaveAeroqual(filename, datetimes, rel_time, vmr):
import pandas as pd
d = {"datetimes":datetimes,"rel_time":rel_time, "vmr":vmr}
pd.DataFrame(d).to_csv(filename) | python |
import requests
import os
import json
import logging
from logging.handlers import TimedRotatingFileHandler
import time
from kafka import KafkaProducer
import psycopg2
from datetime import datetime, timezone
import datetime
import pytz
from psycopg2.extras import Json
from psycopg2.sql import SQL, Literal, Identifier
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
# Daily rotating logs
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s')
handler = TimedRotatingFileHandler('snt.log',
when='midnight',
backupCount=10)
handler.setFormatter(formatter)
logger = logging.getLogger('snt_logger')
#logger = logging.getLogger(__name__)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
bearer_token = os.environ.get("BEARER_TOKEN")
http = requests.Session()
# We want to account for timeouts. The Twitter API says there should be 20s
# heartbeat messages as per
# https://developer.twitter.com/en/docs/twitter-api/tweets/filtered-stream/integrate/handling-disconnections
# We will set our timeout limit to 30s which should be able to account
# for the heartbeats (which are newline characters - \n)
DEFAULT_TIMEOUT = 30 # seconds
class TimeoutHTTPAdapter(HTTPAdapter):
def __init__(self, *args, **kwargs):
self.timeout = DEFAULT_TIMEOUT
if "timeout" in kwargs:
self.timeout = kwargs["timeout"]
del kwargs["timeout"]
super().__init__(*args, **kwargs)
def send(self, request, **kwargs):
timeout = kwargs.get("timeout")
if timeout is None:
kwargs["timeout"] = self.timeout
return super().send(request, **kwargs)
retry_strategy = Retry(
total=10,
backoff_factor=2,
status_forcelist=[429, 500, 502, 503, 504],
allowed_methods=["HEAD", "GET", "OPTIONS"]
)
http.mount("https://", TimeoutHTTPAdapter(max_retries=retry_strategy))
http.mount("http://", TimeoutHTTPAdapter(max_retries=retry_strategy))
producer = KafkaProducer(
bootstrap_servers='localhost:9092'
)
def bearer_oauth(r):
"""
Method required by bearer token authentication.
"""
r.headers["Authorization"] = f"Bearer {bearer_token}"
r.headers["User-Agent"] = "S-n-Tweet Alpha"
return r
def get_rules():
logger.info('starting get_rules()')
response = http.get(
"https://api.twitter.com/2/tweets/search/stream/rules", auth=bearer_oauth
)
if response.status_code != 200:
err = "Cannot get rules (HTTP {}): {}".format(response.status_code, response.text)
logger.error(err)
raise Exception(
err
)
rule_response = response.json()
logger.info('done get_rules()')
logger.info(f'got rules: {rule_response}')
return rule_response
def delete_all_rules(rules):
logger.info('starting delete_all_rules()')
if rules is None or "data" not in rules:
return None
logger.info('no existing rules found')
ids = list(map(lambda rule: rule["id"], rules["data"]))
payload = {"delete": {"ids": ids}}
response = http.post(
"https://api.twitter.com/2/tweets/search/stream/rules",
auth=bearer_oauth,
json=payload
)
if response.status_code != 200:
err = "Cannot delete rules (HTTP {}): {}".format(
response.status_code, response.text
)
logger.error(err)
raise Exception(
err
)
logger.info('done delete_all_rules()')
#print(json.dumps(response.json()))
def set_rules(delete):
# You can adjust the rules if needed
logger.info('starting set_rules()')
rules = [
{"value": "TSLA"},
#{"value": "MSFT"},
#{"value": "GOOG"},
#{"value": "GME"},
#{"value": "BTC"},
#{"value": "#ElectionsCanada"},
#{"value": "AAPL"},
#{"value": "AMZN"},
]
payload = {"add": rules}
response = http.post(
"https://api.twitter.com/2/tweets/search/stream/rules",
auth=bearer_oauth,
json=payload,
)
logger.info(f'set rules: {json.dumps(response.json())}')
try:
j = response.json()
# Example response
#
# {
# "data": [
# {
# "value": "TSLA",
# "id": "1429130887095017481"
# },
# {
# "value": "GOOG",
# "id": "1429130887095017480"
# },
# {
# "value": "MSFT",
# "id": "1429130887095017482"
# }
# ],
# "meta": {
# "sent": "2021-08-20T20:21:29.534Z",
# "summary": {
# "created": 3,
# "not_created": 0,
# "valid": 3,
# "invalid": 0
# }
# }
# }
senttime = datetime.datetime.strptime(j['meta']['sent'], '%Y-%m-%dT%H:%M:%S.%fZ')
summary_created = j['meta']['summary']['created']
summary_not_created = j['meta']['summary']['not_created']
summary_valid = j['meta']['summary']['valid']
summary_invalid = j['meta']['summary']['invalid']
with psycopg2.connect("host=100.100.100.42 dbname=datascience user=roman") as pg_con:
with pg_con.cursor() as cursor:
for rule in j['data']:
match_value = rule['value']
match_id = rule['id']
sql = """
insert into snt.rules
(match_id, match_value, sent_time, summary_created, summary_not_created, summary_valid, summary_invalid)
values
(%s, %s, %s, %s, %s, %s, %s);
"""
cursor.execute(
sql,
(match_id, match_value, str(senttime), summary_created, summary_not_created, summary_valid, summary_invalid)
)
pg_con.commit()
except Exception as e:
logger.error(e)
raise e
if response.status_code != 201:
err = "Cannot add rules (HTTP {}): {}".format(response.status_code, response.text)
logger.error(err)
raise Exception(
err
)
logger.info('done setting rules')
def get_stream(set):
logger.info('starting get_stream()')
response = http.get(
"https://api.twitter.com/2/tweets/search/stream", auth=bearer_oauth, stream=True,
)
logger.info(f'get_stream response: {response.status_code}')
if response.status_code != 200:
err = "Cannot get stream (HTTP {}): {}".format(
response.status_code, response.text
)
logger.error(err)
raise Exception(err)
local_timezone = pytz.timezone('America/Edmonton')
utc_timezone = pytz.timezone("UTC")
for response_line in response.iter_lines():
try:
if response_line:
producer.send(
'tweets',
response_line,
timestamp_ms=int(datetime.datetime.utcnow().timestamp() * 1000)
)
except Exception as e:
logger.error(e)
raise e
def main():
rules = get_rules()
delete = delete_all_rules(rules)
set = set_rules(delete)
get_stream(set)
if __name__ == "__main__":
main()
| python |
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tinctest
from tinctest.models.scenario import ScenarioTestCase
from mpp.gpdb.tests.storage.aoco_compression import GenerateSqls
class AOCOCompressionTestCase(ScenarioTestCase):
"""
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
@classmethod
def setUpClass(cls):
gensql = GenerateSqls()
gensql.generate_sqls()
def test_aoco_large_block(self):
'''
@data_provider test_types_large
'''
test_list1 = []
test_list1.append("mpp.gpdb.tests.storage.aoco_compression.test_runsqls.%s" % self.test_data[1][0])
self.test_case_scenario.append(test_list1)
def test_validation(self):
'''
Check catakog and checkmirrorintegrity
note: Seperating this out to not run as part of every test
'''
test_list1 = []
test_list1.append("mpp.gpdb.tests.storage.lib.dbstate.DbStateClass.run_validation")
self.test_case_scenario.append(test_list1)
@tinctest.dataProvider('test_types_large')
def test_data_provider():
data = {'test_01_3_co_create_storage_directive_large':['co_create_storage_directive_large_2G_zlib'],
'test_01_4_co_create_storage_directive_large':['co_create_storage_directive_large_2G_quick_rle'],
'test_01_6_co_create_storage_directive_large':['co_create_storage_directive_large_2G_zlib_2'],
'test_02_3_co_create_column_reference_default_large':['co_create_column_reference_default_large_2G_zlib'],
'test_02_4_co_create_column_reference_default_large':['co_create_column_reference_default_large_2G_quick_rle'],
'test_02_6_co_create_column_reference_default_large':['co_create_column_reference_default_large_2G_zlib_2'],
'test_03_3_co_create_column_reference_column_large':['co_create_column_reference_column_large_2G_zlib'],
'test_03_4_co_create_column_reference_column_large':['co_create_column_reference_column_large_2G_quick_rle'],
'test_03_6_co_create_column_reference_column_large':['co_create_column_reference_column_large_2G_zlib_2'],
'test_04_3_ao_create_with_row_large':['ao_create_with_row_large_2G_zlib'],
'test_04_4_ao_create_with_row_large':['ao_create_with_row_large_2G_quick_rle'],
}
return data
| python |
from PIL import Image
import math
import sys
import eleksdrawpy as xy
def create_paths(im):
f = (255 * 255 * 3) ** 0.5
paths = []
w, h = im.size
for m in [-2, -1, 0, 1, 2]:
for radius in range(0, w, 8):
path = []
for a in range(1800):
a = math.radians(a / 10.0)
x = w / 2 + int(math.cos(a) * radius)
y = h - int(math.sin(a) * radius)
if x < 0 or x >= w:
continue
if y < 0 or y >= h:
continue
r, g, b = im.getpixel((x, y))
p = (r * r + g * g + b * b) ** 0.5
p = 1 - (p / f)
p = p ** 2
if p < 0.05:
if len(path) > 1:
paths.append(path)
path = []
else:
x = w / 2 + math.cos(a) * (radius + m * p)
y = h - math.sin(a) * (radius + m * p)
path.append((x, y))
if len(path) > 1:
paths.append(path)
return paths
def main():
im = Image.open(sys.argv[1])
paths = create_paths(im)
drawing = xy.Drawing(paths).rotate_and_scale_to_fit(315, 380, step=90)
drawing = drawing.sort_paths()
drawing = drawing.join_paths(tolerance = 0.1)
im = drawing.render()
im.write_to_png('image.png')
# xy.draw(drawing)
if __name__ == '__main__':
main()
| python |
"""
Satellite6Version - file ``/usr/share/foreman/lib/satellite/version.rb``
========================================================================
Module for parsing the content of file ``version.rb`` or ``satellite_version``,
which is a simple file in foreman-debug or sosreport archives of Satellite 6.x.
Typical content of "satellite_version" is::
COMMAND> cat /usr/share/foreman/lib/satellite/version.rb
module Satellite
VERSION = "6.1.3"
end
Note:
This module can only be used for Satellite 6.x
Examples:
>>> sat6_ver = shared[SatelliteVersion]
>>> sat6_ver.full
"6.1.3"
>>> sat6_ver.version
"6.1.3"
>>> sat6_ver.major
6
>>> sat6_ver.minor
1
>>> sat6_ver.release
None
"""
from .. import parser, Parser
from ..parsers import ParseException
from insights.specs import Specs
@parser(Specs.satellite_version_rb)
class Satellite6Version(Parser):
""" Class for parsing the content of ``satellite_version``."""
def parse_content(self, content):
# To keep compatible with combiner satellite_version
self.full = self.release = None
self.version = None
for line in content:
if line.strip().upper().startswith('VERSION'):
self.full = line.split()[-1].strip('"')
self.version = self.full
break
if self.version is None:
raise ParseException('Cannot parse satellite version')
@property
def major(self):
if self.version:
return int(self.version.split(".")[0])
@property
def minor(self):
if self.version:
s = self.version.split(".")
if len(s) > 1:
return int(s[1])
| python |
def main():
# input
N = int(input())
# compute
l_0, l_1 = 2, 1
if N == 1:
print(l_1)
else:
for _ in range(N-1):
l_i = l_0 + l_1
l_0, l_1 = l_1, l_i
print(l_i)
# output
if __name__ == '__main__':
main()
| python |
"""
Qxf2 Services: Utility script to compare images
* Compare two images(actual and expected) smartly and generate a resultant image
* Get the sum of colors in an image
"""
from PIL import Image, ImageChops
import math, os
def rmsdiff(im1,im2):
"Calculate the root-mean-square difference between two images"
h = ImageChops.difference(im1, im2).histogram()
# calculate rms
return math.sqrt(sum(h*(i**2) for i, h in enumerate(h)) / (float(im1.size[0]) * im1.size[1]))
def is_equal(img_actual,img_expected,result):
"Returns true if the images are identical(all pixels in the difference image are zero)"
result_flag = False
if not os.path.exists(img_actual):
print('Could not locate the generated image: %s'%img_actual)
if not os.path.exists(img_expected):
print('Could not locate the baseline image: %s'%img_expected)
if os.path.exists(img_actual) and os.path.exists(img_expected):
actual = Image.open(img_actual)
expected = Image.open(img_expected)
result_image = ImageChops.difference(actual,expected)
color_matrix = ([0] + ([255] * 255))
result_image = result_image.convert('L')
result_image = result_image.point(color_matrix)
result_image.save(result)#Save the result image
if (ImageChops.difference(actual,expected).getbbox() is None):
result_flag = True
else:
#Let's do some interesting processing now
result_flag = analyze_difference_smartly(result)
if result_flag is False:
print("Since there is a difference in pixel value of both images, we are checking the threshold value to pass the images with minor difference")
#Now with threshhold!
result_flag = True if rmsdiff(actual,expected) < 958 else False
#For temporary debug purposes
print('RMS diff score: ',rmsdiff(actual,expected))
return result_flag
def analyze_difference_smartly(img):
"Make an evaluation of a difference image"
result_flag = False
if not os.path.exists(img):
print('Could not locate the image to analyze the difference smartly: %s'%img)
else:
my_image = Image.open(img)
#Not an ideal line, but we dont have any enormous images
pixels = list(my_image.getdata())
pixels = [1 for x in pixels if x!=0]
num_different_pixels = sum(pixels)
print('Number of different pixels in the result image: %d'%num_different_pixels)
#Rule 1: If the number of different pixels is <10, then pass the image
#This is relatively safe since all changes to objects will be more than 10 different pixels
if num_different_pixels < 10:
result_flag = True
return result_flag
def get_color_sum(img):
"Get the sum of colors in an image"
sum_color_pixels = -1
if not os.path.exists(img):
print('Could not locate the image to sum the colors: %s'%actual)
else:
my_image = Image.open(img)
color_matrix = ([0] + ([255] * 255))
my_image = my_image.convert('L')
my_image = my_image.point(color_matrix)
#Not an ideal line, but we don't have any enormous images
pixels = list(my_image.getdata())
sum_color_pixels = sum(pixels)
print('Sum of colors in the image %s is %d'%(img,sum_color_pixels))
return sum_color_pixels
#--START OF SCRIPT
if __name__=='__main__':
# Please update below img1, img2, result_img values before running this script
img1 = r'Add path of first image'
img2 = r'Add path of second image'
result_img= r'Add path of result image' #please add path along with resultant image name which you want
# Compare images and generate a resultant difference image
result_flag = is_equal(img1,img2,result_img)
if (result_flag == True):
print("Both images are matching")
else:
print("Images are not matching")
# Get the sum of colors in an image
get_color_sum(img1)
| python |
from requests import get
def myip():
return get('http://checkip.amazonaws.com/').text.strip()
| python |
#Tres personas deciden invertir su dinero para fundar una empresa. Cada una de ellas invierte una cantidad distinta.
#Obtener el porcentaje que cada quien invierte con respecto a la cantidad total invertida.
primera_inversion = float(input("Ingrese la primera inversion \n"))
segunda_inversion = float(input("Ingrese la segunda inversion \n"))
tercera_inversion = float(input("Ingrese la tercera inversion \n"))
total_invertido =primera_inversion+segunda_inversion+tercera_inversion
print("EL porcentaje es de: " + str(primera_inversion*100/total_invertido))
print("EL porcentaje es de: " + str(segunda_inversion*100/total_invertido))
print("EL porcentaje es de: " + str(tercera_inversion*100/total_invertido))
| python |
import os
with open('locationsCOMSAT.csv') as f:
header = f.readline()
g = [l.rstrip().split(',') for l in f.readlines()]
## all information in string, not numerics
cmda = 'python createjobscriptsnora10a.py'
cmd = 'python createjobscriptsnora10.py'
ncdir = '/work/users/kojito/nora10/nc'
start = '2011'
end = '2011' ## including the end
orog = '/work/users/kojito/nora10/nc/orog/NORA10_11km_orog_new3.nc'
def customsubmit(varname, timeres, name, lat, lon, alt, initial = False):
cm = cmda if initial else cmd
scriptfname = '%s_%s.sh' % ('C' + name[6:], varname)
c = '%s %s %s %s %s %s/%s/NORA10_%s_11km_%s_ %s %s %s %s' % (
cm, name, lon, lat, alt, ncdir, varname, timeres, varname,
start, end, orog, scriptfname)
os.system(c)
os.system('submit %s' % scriptfname)
for name, lat, lon, alt in g:
# customsubmit('ta_2m', '1H', name, lat, lon, alt, initial=True)
# customsubmit('pr', '1H', name, lat, lon, alt)
customsubmit('wss_10m', '1H', name, lat, lon, alt)
# customsubmit('hur_2m', '1H', name, lat, lon, alt)
# customsubmit('ps', '3H', name, lat, lon, alt)
# customsubmit('clt', '1H', name, lat, lon, alt)
# customsubmit('albedo', '1H', name, lat, lon, alt)
# customsubmit('rls', '1H', name, lat, lon, alt)
# customsubmit('rss', '1H', name, lat, lon, alt)
# customsubmit('ts_0m', '1H', name, lat, lon, alt)
| python |
import speech_recognition as sr
import pyttsx3
from datetime import datetime
import webbrowser
from subprocess import Popen, CREATE_NEW_CONSOLE
import random
import sys
speech = 0
commands = {}
scripts = {}
responses = {}
active = True
def audio_to_text(recognizer, mic):
if not isinstance(recognizer, sr.Recognizer):
raise TypeError("recognizer must be speech_recognition.Recognizer instance")
if not isinstance(mic, sr.Microphone):
raise TypeError("mic must be speech_recognition.Microphone instance")
result = { "success": True, "input": None }
with mic as source:
recognizer.adjust_for_ambient_noise(source, duration = 1)
audio_input = recognizer.listen(source)
try:
result["input"] = recognizer.recognize_google(audio_input)
except sr.UnknownValueError:
result["input"] = None
except sr.RequestError:
result["success"] = False
result["input"] = "speech recognition Google API is unavailable"
return result
def speak(text):
speech.say(text)
speech.runAndWait()
def read_entire_file(filepath):
try:
file = open(filepath, "r")
file_contents = file.read()
file.close()
return file_contents
except IOError:
print("Couldn't read " + filepath)
exit
def get_resource(resource_path):
file_contents = read_entire_file(resource_path)
resource = {}
lines = file_contents.split("\n")
for line in lines:
resource_item = line.split(" : ")
resource.update({resource_item[0] : resource_item[1].split(",")})
return resource
def match(command_type, words):
for vocab_word in commands[command_type]:
if vocab_word in words:
return True
return False
def react(input):
if active:
if match("search", input):
execute_search_command(input)
elif match("start", input):
execute_start_command(input)
elif match("time", input):
execute_time_command()
elif match("weather", input):
execute_weather_command()
elif match("hello", input):
execute_greet_command()
elif match("bye", input):
execute_bye_command()
elif match("thanks", input):
execute_thanks_command()
elif match("sleep", input):
execute_sleep_command()
else:
if match("wake", input):
execute_wake_command()
def execute_wake_command():
speak("I'm here")
global active
active = True
def execute_sleep_command():
speak("Going to sleep")
global active
active = False
def execute_time_command():
current_time = datetime.now()
speak("It's " + current_time.strftime("%H:%M %A %d of %B %Y"))
print("It's ", current_time.strftime("%H:%M %A %d of %B %Y"))
def execute_search_command(words):
speak("Opening in the browser")
query = "robot ai uprising"
for vocab_word in commands["search"]:
if vocab_word in words:
query = words[len(vocab_word) + 1:] # substring with only query in it ('+ 1' for one space)
break
url = "https://www.google.com/search?q={}".format(query)
webbrowser.open(url)
def execute_weather_command():
execute_search_command("search weather")
def execute_greet_command():
response = responses["hello"]
speak(response[random.randint(0, len(response) - 1)])
def execute_bye_command():
response = responses["bye"]
speak(response[random.randint(0, len(response) - 1)])
sys.exit()
def execute_thanks_command():
response = responses["thanks"]
speak(response[random.randint(0, len(response) - 1)])
def execute_start_command(words):
# occasionaly sid will give a response
# P = 0.5 * 0.5 * 0.5 = 0.125, i.e. the response will be given in 12.5% of the occurences
if (random.randint(0, 1) + random.randint(0, 1) + random.randint(0, 1)) == 3:
speak(responses["ok"][random.randint(0, len(responses["ok"]) - 1)])
for script_name in scripts.keys():
if script_name in words:
for script_command in scripts[script_name]:
Popen(script_command, stdin=None, stdout=None, stderr=None, shell=True, creationflags=CREATE_NEW_CONSOLE)
break
def main():
r = sr.Recognizer()
mic = sr.Microphone(device_index = 1) # if no device_index supplied, then default mic (i'm not using the default one atm)
global speech
speech = pyttsx3.init()
voices = speech.getProperty('voices')
speech.setProperty("voice", voices[2].id)
speech.setProperty('rate', 125)
global commands
global scripts
global responses
commands = get_resource("resources/commands.sid")
scripts = get_resource("resources/start_scripts.sid")
responses = get_resource("resources/responses.sid")
while True:
result = audio_to_text(r, mic)
if not result["success"]:
print("Technical problems: " + result["input"])
break
elif result["input"] == None:
print("words could not be discerned")
else:
print("You said: " + result["input"])
react(result["input"])
main() | python |
'''1. 编写 Demo 类,使得下边代码可以正常执行:
>>> demo = Demo()
>>> demo.x
'FishC'
>>> demo.x = "X-man"
>>> demo.x
'X-man'
'''
class Demo:
def __getattr__(self , name):
return 'FishC'
| python |
import os
import torch
import torch.nn as nn
import unittest
from fusion.architecture.projection_head import LatentHead
os.environ['KMP_DUPLICATE_LIB_OK']='True'
class TestLatentHead(unittest.TestCase):
def test_forward(self):
dim_in = 32
dim_l = 64
latent_head = LatentHead(dim_in, dim_l, use_linear=True)
x = torch.rand((4, dim_in))
y = latent_head.forward(x)
self.assertEqual(y.size()[1], dim_l)
if __name__ == '__main__':
unittest.main()
| python |
import balanced
balanced.configure('ak-test-2eKlj1ZDfAcZSARMf3NMhBHywDej0avSY')
debit = balanced.Debit.fetch('/debits/WD5SwXr9jcCfCmmjTH5MCMFD')
dispute = debit.dispute | python |
import mcprot
import asyncio
import logging
logging.basicConfig(level = logging.INFO)
stream = mcprot.PacketStream('localhost', 25565)
loop = asyncio.get_event_loop()
result = loop.run_until_complete(stream.get_status())
print(result)
| python |
from typing import Any, Dict, List
from electionguard.types import BALLOT_ID
from .base import BaseResponse, BaseRequest, Base
from .election import CiphertextElectionContext
from .tally import CiphertextTally
__all__ = [
"CiphertextTallyDecryptionShare",
"DecryptTallyShareRequest",
"DecryptionShareRequest",
"DecryptionShareResponse",
]
DecryptionShare = Any
ElectionGuardCiphertextTally = Any
class CiphertextTallyDecryptionShare(Base):
"""
A DecryptionShare provided by a guardian for a specific tally.
Optionally can include ballot_shares for challenge ballots.
"""
election_id: str # TODO: not needed since we have the tally_name?
tally_name: str
guardian_id: str
tally_share: DecryptionShare
"""The EG Decryptionshare that includes a share for each contest in the election."""
ballot_shares: Dict[BALLOT_ID, DecryptionShare] = {}
"""A collection of shares for each challenge ballot."""
class DecryptTallyShareRequest(BaseRequest):
"""A request to partially decrypt a tally and generate a DecryptionShare."""
guardian_id: str
encrypted_tally: CiphertextTally
context: CiphertextElectionContext
class DecryptionShareRequest(BaseRequest):
"""A request to submit a decryption share."""
share: CiphertextTallyDecryptionShare
class DecryptionShareResponse(BaseResponse):
"""A response that includes a collection of decryption shares."""
shares: List[CiphertextTallyDecryptionShare]
| python |
import pandas as pd
import streamlit as st
import numpy as np
df = pd.read_csv('data/raw/ames_housing_data.csv')
max_price = df['SalePrice'].max()+50
min_price = df['SalePrice'].min()
bins = np.linspace(min_price, max_price, 60)
inds = np.digitize(df['SalePrice'], bins)
price_groups = [bins[inds[i]] for i in range(df['SalePrice'].size)]
df['price_groups'] = np.round(price_groups)
df['log_price'] = np.log(df['SalePrice'])
max_price_log = df['log_price'].max()+.01
min_price_log = df['log_price'].min()
bins_log = np.linspace(min_price_log, max_price_log, 60)
inds_log = np.digitize(df['log_price'], bins_log)
price_groups_log = []
for i in range(df['log_price'].size):
price_groups_log.append(bins_log[inds_log[i]])
df['log_price_groups'] = price_groups_log
st.title('Ames Housing Project')
st.write(df.head(10))
st.bar_chart(df['price_groups'].value_counts())
st.subheader('Log Transformation')
st.bar_chart(np.round(df['log_price_groups'], 2).value_counts())
#st.bar_chart(hist_vals2) | python |
import os
from typing import Dict
from allennlp.interpret.attackers import Attacker, Hotflip
from allennlp_demo.common import config, http
class MaskedLmModelEndpoint(http.ModelEndpoint):
def __init__(self):
c = config.Model.from_file(os.path.join(os.path.dirname(__file__), "model.json"))
super().__init__(c)
def load_attackers(self) -> Dict[str, Attacker]:
hotflip = Hotflip(self.predictor, "bert")
hotflip.initialize()
return {"hotflip": hotflip}
if __name__ == "__main__":
endpoint = MaskedLmModelEndpoint()
endpoint.run()
| python |
import os
import unittest
import warnings
from flask import json
import webapp
from config import TestingConfig, Config
class HomeViewTest(unittest.TestCase):
#@unittest.skip
def setUp(self):
self.app = webapp.app.test_client()
self.app.testing = True
#@unittest.skip
def test_home_page(self):
home = self.app.get('/')
self.assertIn('Home Page', str(home.data))
class UserDataBase(unittest.TestCase):
tmp_user_id = -1
user_data = json.dumps({
"id": 0,
"nick": "Alice",
"first_name": "Foo",
"last_name": "Bar",
"mail": "[email protected]",
"pass": "pass",
"phone": "616949232",
"is_mod": False,
"ban_reason": "Razon expulsion",
"points": 0,
"avatar": "http://images.com/235gadfg",
"fnac": "2019-04-07",
"dni": "123456784",
"place": "Madrid",
"desc": "Hi I am the fuking Alice",
"token": "2sf78gsf68hsf5asfh68afh68a58fha68f"
})
user_data2 = json.dumps({
"id": 0,
"nick": "Alice2",
"first_name": "Foo",
"last_name": "Bar",
"mail": "[email protected]",
"pass": "pass",
"phone": "666999223",
"is_mod": True,
"ban_reason": "Razon expulsion",
"points": 0,
"avatar": "http://images.com/235gadfg",
"fnac": "2019-04-07",
"dni": "167666666",
"place": "Madrid",
"desc": "Hi I am the fuking Alice2",
"token": "2sf78gsf68hsf5asfh68afh6gha68f"
})
user_login = json.dumps({
"nick": "Alice",
"pass": "pass",
"remember": True
})
user2_login = json.dumps({
"nick": "Alice2",
"pass": "pass",
"remember": True
})
user_update = json.dumps({
"nick": "Alice",
"first_name": "Foo",
"last_name": "BarBar",
"mail": "[email protected]",
"pass": "pass",
"phone": "616949232",
"is_mod": True,
"ban_reason": "Razon expulsion",
"points": 0,
"avatar": "http://images.com/235gadfg",
"fnac": "2019-04-07",
"dni": "123456784",
"place": "Madrid",
"desc": "Hi I am the fuking Alice updated",
"token": "2sf78gsf68hsf5asfh68afh68a58fha68f",
"pass_hash": "s32uh5423j5h23jh52jh35"
})
#@unittest.skip
def setUp(self):
self.app = webapp.app.test_client()
self.app.testing = True
#@unittest.skip
def test_add_user(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
r_json = self.app.post('/user', data=self.user_data, content_type='application/json').get_json()
self.assertIn('info', str(r_json)) # Check successful insertion
user_id = r_json["message"]
self.__class__.tmp_user_id = user_id
check = self.app.get('/profile/Alice')
self.assertIn('616949232', str(check.get_json())) # Check get info
self.app.post('/login', data=self.user_login, content_type='application/json')
self.app.delete('/user')
#@unittest.skip
def test_session_user(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.post('/user', data=self.user_data, content_type='application/json')
r_json = self.app.post('/login', data=self.user_login, content_type='application/json').get_json()
self.assertIn('Alice', str(r_json)) # Check successful login
r_json = self.app.get('/user').get_json()
self.assertIn('Alice', str(r_json)) # Check get logged user info
r_json = self.app.get('/logout').get_json() # Logout
self.assertIn('out', str(r_json)) # Check successful
r_json = self.app.get('/user').get_json() # Try get my info
self.assertIn('Not logged in', str(r_json)) # Check successful
self.app.post('/login', data=self.user_login, content_type='application/json')
self.app.delete('/user')
#@unittest.skip
def test_update_user(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
id = self.app.post('/user', data=self.user_data, content_type='application/json').get_json()["message"]
self.app.post('/login', data=self.user_login, content_type='application/json')
self.app.post('/login', data=self.user_login, content_type='application/json') # Login to set the session
r_json = self.app.put('/user', data=self.user_update, content_type='application/json').get_json()
msg = r_json["message"]
self.assertIn(str(id), msg) # Check successful update
r = self.app.get('/user').get_json()
self.assertIn("BarBar", str(r)) # Check sucessful update
self.app.delete('/user')
#@unittest.skip
def test_delete_user(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
id = self.app.post('/user', data=self.user_data, content_type='application/json').get_json()["message"]
self.app.post('/login', data=self.user_login, content_type='application/json')
r_json = self.app.delete('/user').get_json()
msg = r_json["message"]
self.assertIn(str(id), msg) # Check successful deletion
r = self.app.post('/login', data=self.user_login, content_type='application/json').get_json()
self.assertIn("User not found", str(r)) # Check unsuccessful login
#@unittest.skip
def test_mod_users(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
r_json = self.app.post('/user', data=self.user_data,
content_type='application/json').get_json() # User created
user_id = r_json["message"]
self.__class__.tmp_user_id = user_id
r_json = self.app.put('/user/' + str(user_id) + '/mod').get_json()
self.assertIn('Ok', str(r_json)) # Check set mod
self.app.post('/login', data=self.user_login, content_type='application/json') # Login to set the session
r_json = self.app.get('/user/' + str(user_id)).get_json()
self.assertIn('Alice', str(r_json)) # Check get user info
r_json = self.app.put('/user/' + str(user_id), data=self.user_update,
content_type='application/json').get_json()
self.assertIn('updated', str(r_json)) # Check update user info
r_json = self.app.delete('/user/' + str(user_id)).get_json()
self.assertIn('deleted', str(r_json)) # Check delete user info
r_json = self.app.post('/login', data=self.user_login, content_type='application/json').get_json() # Login to set the session
self.assertIn('not found', str(r_json)) # Check get user info
#@unittest.skip
def test_ban_users(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
r_json = self.app.post('/user', data=self.user_data,
content_type='application/json').get_json() # User created
mod_user_id = r_json["message"]
r_json = self.app.post('/user', data=self.user_data2,
content_type='application/json').get_json() # User created
ban_user_id = r_json["message"]
self.app.put('/user/' + str(mod_user_id) + '/mod')
self.app.post('/login', data=self.user_login, content_type='application/json') # Login to set the session
ban_data = json.dumps({
"ban_reason": "Ban for example",
"ban_until": "9999-04-13"
})
r_json = self.app.put('/user/' + str(ban_user_id) + '/ban', data=ban_data,
content_type='application/json').get_json()
self.assertIn('(' + str(ban_user_id) + ') banned', str(r_json)) # Check the ban
r_json = self.app.post('/login', data=self.user2_login,
content_type='application/json').get_json() # Login to check
self.assertIn("Ban for example", str(r_json))
self.app.delete('/user/' + str(ban_user_id))
self.app.delete('/user/' + str(mod_user_id))
#@unittest.skip
def test_list_search_users(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
id1 = self.app.post('/user', data=self.user_data, content_type='application/json').get_json()["message"]
id2 = self.app.post('/user', data=self.user_data2, content_type='application/json').get_json()["message"]
self.app.put('/user/' + str(id2) + '/mod')
r_json = self.app.get('users').get_json()
self.assertIn("\'length\'", str(r_json))
r_json = self.app.get('/search/users?text=Alice').get_json()
self.assertIn("\'length\'", str(r_json))
self.app.post('/login', data=self.user2_login, content_type='application/json')
self.app.delete('/user/' + str(id1)).get_json()
self.app.delete('/user/' + str(id2)).get_json()
class ProductDataBase(unittest.TestCase):
user_id: int = 1
prod_data = json.dumps({
"descript": "This product is wonderful",
"price": 0,
"categories": [
"Moda"
],
"title": "Producto Molongo",
"bid_date": "1999-12-24 23:45:11",
"boost_date": "1999-12-24 23:45:12",
"visits": 0,
"followers": 0,
"publish_date": "2019-04-07",
"main_img": "http://images.com/123af3",
"photo_urls": [
"http://images.com/123af3"
],
"place": "Zaragoza",
"is_removed": True,
"ban_reason": "Razon Baneo"
})
prod_data2 = json.dumps({
"descript": "This product is wonderful uno",
"price": 34,
"categories": [
"Moda"
],
"title": "Producto Molongo2",
"bid_date": "1999-12-24 23:45:11",
"boost_date": "1999-12-24 23:45:12",
"visits": 0,
"followers": 0,
"publish_date": "2019-04-07",
"main_img": "http://images.com/123af3",
"photo_urls": [
"http://images.com/123af3"
],
"place": "Zaragoza",
"is_removed": True,
"ban_reason": "Razon Baneo"
})
prod_update = json.dumps({
"descript": "This product is wonderful",
"price": 55,
"categories": [
"Moda", "Complementeos"
],
"title": "Producto Molongo",
"bid_date": "1999-12-24 22:45:13",
"main_img": "http://images.com/hola",
"photo_urls": [
"http://images.com/122af3",
"http://images.com/fgfgfgfgfgf"
],
"place": "Madrid"
})
#@unittest.skip
def setUp(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app = webapp.app.test_client()
self.app.testing = True
# Create user and login
self.user_id = \
self.app.post('/user', data=UserDataBase.user_data, content_type='application/json').get_json()[
"message"]
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
#@unittest.skip
def test_add_product(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
r_json = self.app.post('/product', data=self.prod_data, content_type='application/json').get_json()
self.assertIn('info', str(r_json)) # Check successful insertion
product_id = r_json["message"]
check = self.app.get('/product/' + str(product_id))
self.assertIn('Zaragoza', str(check.get_json()["place"])) # Check get info
#@unittest.skip
def test_update_product(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
r_json = self.app.post('/product', data=self.prod_data, content_type='application/json').get_json()
self.assertIn('info', str(r_json)) # Check successful insertion
product_id = r_json["message"]
r_json = self.app.put('/product/' + str(product_id), data=self.prod_update,
content_type='application/json').get_json()
self.assertIn('updated', str(r_json)) # Check successful insertion
check = self.app.get('/product/' + str(product_id))
self.assertIn('fgfgfgfgfgf', str(check.get_json())) # Check get info
self.assertIn('122af3', str(check.get_json())) # Check get info
self.assertIn('Complementeos', str(check.get_json())) # Check get info
self.assertNotIn('123af3', str(check.get_json())) # Check get info
#@unittest.skip
def test_delete_product(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
r_json = self.app.post('/product', data=self.prod_data, content_type='application/json').get_json()
self.assertIn('info', str(r_json)) # Check successful insertion
product_id = r_json["message"]
r_json = self.app.delete('/product/' + str(product_id)).get_json()
self.assertIn('info', str(r_json)) # Check successful deletion
r_json = self.app.get('/product/' + str(product_id)).get_json()
self.assertIn('not found', str(r_json)) # Check successful deletion
#@unittest.skip
def test_list_search_product(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.post('/product', data=self.prod_data, content_type='application/json')
self.app.post('/product', data=self.prod_data2, content_type='application/json')
self.app.get('/logout')
self.app.post('/user', data=UserDataBase.user_data2, content_type='application/json')
self.app.post('/login', data=UserDataBase.user2_login, content_type='application/json')
r_json = self.app.get('/products').get_json()
self.assertIn('Producto Molongo', str(r_json)) # Check successful list
r_json = self.app.get('/search/products?text=Molongo').get_json()
self.assertIn('Producto Molongo\'', str(r_json)) # Check successful search
self.assertIn('Producto Molongo2', str(r_json)) # Check successful search
r_json = self.app.get('/products/' + str(self.user_id)).get_json()
self.assertIn('Producto Molongo\'', str(r_json)) # Check successful list by user
self.assertIn('Producto Molongo2', str(r_json)) # Check successful search
self.app.delete('/user')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
#@unittest.skip
def test_list_search_product_adv(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.post('/product', data=self.prod_data, content_type='application/json')
self.app.post('/product', data=self.prod_data2, content_type='application/json')
self.app.get('/logout')
self.app.post('/user', data=UserDataBase.user_data2, content_type='application/json')
self.app.post('/login', data=UserDataBase.user2_login, content_type='application/json')
r_json = self.app.get('/products').get_json()
self.assertIn('Producto Molongo', str(r_json)) # Check successful list
prod_search = json.dumps({
"descript": "wonderful",
"price_max": 35,
"price_min": 33,
"category": "Moda",
"title": "Producto Molongo",
"place": "Zaragoza"
})
r_json = self.app.post('/search/products/adv', data=prod_search, content_type='application/json').get_json()
self.assertIn('Producto Molongo2', str(r_json)) # Check successful search
prod_search = json.dumps({
"price_max": 35,
"price_min": 33
})
r_json = self.app.post('/search/products/adv', data=prod_search, content_type='application/json').get_json()
self.assertIn('Producto Molongo2', str(r_json)) # Check successful search
self.assertNotIn('This product is wonderful uno', str(r_json)) # Check successful search
self.app.delete('/user')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
#@unittest.skip
def test_follows_product(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
prod_id = self.app.post('/product', data=self.prod_data, content_type='application/json').get_json()[
"message"]
r_json = self.app.post('/product/' + str(prod_id) + '/follow').get_json()
self.assertIn('follows', str(r_json)) # Check successful follow
r_json = self.app.get('/user/follows').get_json()
self.assertIn("Producto Molongo", str(r_json)) # Check the follows
r_json = self.app.post('/product/' + str(prod_id) + '/unfollow').get_json()
self.assertIn('unfollows', str(r_json)) # Check successful unfollow
r_json = self.app.get('/user/follows').get_json()
self.assertNotIn('Producto Molongo', str(r_json)) # Check the unfollows
#@unittest.skip
def test_ban_products(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.put('/user/' + str(self.user_id) + '/mod', data=UserDataBase.user_data,
content_type='application/json')
prod_id = self.app.post('/product', data=self.prod_data, content_type='application/json').get_json()[
"message"]
ban_data = json.dumps({
"ban_reason": "Ban for example"
})
r_json = self.app.put('/product/' + str(prod_id) + '/ban', data=ban_data,
content_type='application/json').get_json()
self.assertIn('banned', str(r_json)) # Check successful ban
#@unittest.skip
def tearDown(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.delete('/user')
class ProductsBids(unittest.TestCase):
#@unittest.skip
def setUp(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app = webapp.app.test_client()
self.app.testing = True
# Create user and login
self.user_id = \
self.app.post('/user', data=UserDataBase.user_data, content_type='application/json').get_json()[
"message"]
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
self.product_id = \
self.app.post('/product', data=ProductDataBase.prod_data, content_type='application/json').get_json()[
"message"]
#@unittest.skip
def test_open_close_bid(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
data = json.dumps({"bid_until": "1999-12-24 23:45:10"})
r_json = self.app.put('/product/' + str(self.product_id) + "/bidup", data=data,
content_type='application/json').get_json()
self.assertIn('1999-12-24 23:45:10', str(r_json)) # Check successful bid up
r_json = self.app.get('/bids').get_json()
self.assertIn('\'length\': ', str(r_json)) # Check bids
r_json = self.app.get('/bid/' + str(self.product_id)).get_json()
self.assertIn('1999-12-24 23:45:10', str(r_json)) # Check bid
r_json = self.app.put('/product/' + str(self.product_id) + "/biddown", data=data,
content_type='application/json').get_json()
self.assertIn('finished', str(r_json)) # Check successful bid down
#@unittest.skip
def test_bid_prod(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
data = json.dumps({"bid_until": "2999-12-24 23:45:10"})
self.app.put('/product/' + str(self.product_id) + "/bidup", data=data, content_type='application/json')
data = json.dumps({"bid": "999.99"})
r_json = self.app.post('/bid/' + str(self.product_id), data=data, content_type='application/json').get_json()
self.assertIn('Successful bid with ' + str(999.99), str(r_json)) # Check bids
r_json = self.app.get('/bid/' + str(self.product_id)).get_json()
self.assertIn('999.99', str(r_json)) # Check bid with the bid
#@unittest.skip
def tearDown(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.delete('/user')
class TradesProducts(unittest.TestCase):
#@unittest.skip
def setUp(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app = webapp.app.test_client()
self.app.testing = True
# Create users and login
self.seller_id = self.user_id = \
self.app.post('/user', data=UserDataBase.user_data, content_type='application/json').get_json()[
"message"]
self.app.put('/user/' + str(self.seller_id) + '/mod')
self.buyer_id = self.user_id = \
self.app.post('/user', data=UserDataBase.user_data2, content_type='application/json').get_json()[
"message"]
# Post product
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
self.product_id = \
self.app.post('/product', data=ProductDataBase.prod_data, content_type='application/json').get_json()[
"message"]
self.app.get('/logout')
#@unittest.skip
def test_trades(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Create Trade from buyer
self.app.post('/login', data=UserDataBase.user2_login, content_type='application/json')
json_data = json.dumps({
"seller_id": str(self.seller_id),
"buyer_id": str(self.buyer_id),
"product_id": str(self.product_id)
})
r_json = self.app.post('/trade', data=json_data, content_type='application/json').get_json()
self.assertIn('info', str(r_json)) # Check successful trade created
trade_id = r_json["message"]
json_data = json.dumps({
"price": "99.9",
"products": [],
})
r_json = self.app.post('/trade/' + str(trade_id) + '/offer', data=json_data,
content_type='application/json').get_json()
self.assertIn('Successful new offer', str(r_json)) # Check create offer
json_data = json.dumps({
"price": "22.9",
"products": [],
})
r_json = self.app.put('/trade/' + str(trade_id) + '/offer', data=json_data,
content_type='application/json').get_json()
self.assertIn('Successful offer update', str(r_json)) # Check update
r_json = self.app.get('/trades').get_json()
self.assertIn('\'length\': ', str(r_json)) # Check list trades
r_json = self.app.get('/trade/' + str(trade_id)).get_json()
self.assertIn('\'seller_id\': ' + str(self.seller_id), str(r_json)) # Check get info
r_json = self.app.put('/trade/' + str(trade_id) + '/confirm').get_json()
self.assertIn('Success confirm', str(r_json)) # Check get info
r_json = self.app.put('/trade/' + str(trade_id) + '/confirm').get_json()
self.assertIn('Success unconfirm', str(r_json)) # Check get info
r_json = self.app.put('/trade/' + str(trade_id) + '/confirm').get_json()
self.assertIn('Success confirm', str(r_json)) # Check get info
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
r_json = self.app.put('/trade/' + str(trade_id) + '/confirm').get_json()
self.assertIn('Success confirm and close', str(r_json)) # Check get info
# See sold from seller
r_json = self.app.get('/products/' + str(self.seller_id)).get_json()
self.assertIn('\'sold\': \'True\'', str(r_json)) # Check get info
r_json = self.app.get('/products').get_json()
self.assertNotIn('Producto Molongo', str(r_json)) # Check get info
#@unittest.skip
def test_trades_delete(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Create Trade from buyer
self.app.post('/login', data=UserDataBase.user2_login, content_type='application/json')
json_data = json.dumps({
"seller_id": str(self.seller_id),
"buyer_id": str(self.buyer_id),
"product_id": str(self.product_id)
})
r_json = self.app.post('/trade', data=json_data, content_type='application/json').get_json()
self.assertIn('info', str(r_json)) # Check successful trade created
trade_id = r_json["message"]
json_data = json.dumps({
"price": "99.9",
"products": [],
})
r_json = self.app.post('/trade/' + str(trade_id) + '/offer', data=json_data,
content_type='application/json').get_json()
self.assertIn('Successful new offer', str(r_json)) # Check create offer
json_data = json.dumps({
"price": "22.9",
"products": [],
})
r_json = self.app.put('/trade/' + str(trade_id) + '/offer', data=json_data,
content_type='application/json').get_json()
self.assertIn('Successful offer update', str(r_json)) # Check update
json_data = json.dumps({
"body": "HELLO THERE!"
})
r_json = self.app.post('/msgs/' + str(trade_id), data=json_data, content_type='application/json').get_json()
self.assertIn('Message created', str(r_json)) # Check successful creation
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
r_json = self.app.get('/trades').get_json()
self.assertIn('\'length\': ', str(r_json)) # Check list trades
r_json = self.app.get('/trade/' + str(trade_id)).get_json()
self.assertIn('\'seller_id\': ' + str(self.seller_id), str(r_json)) # Check get info
self.app.put('/trade/' + str(trade_id) + '/confirm').get_json()
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
r_json = self.app.put('/trade/' + str(trade_id) + '/delete').get_json()
self.assertIn('Success delete', str(r_json)) # Check get info
r_json = self.app.get('/trades').get_json()
self.assertNotIn('22.9', str(r_json)) # Check get info
#@unittest.skip
def tearDown(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Post test
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
self.app.delete('/user/' + str(self.buyer_id))
self.app.delete('/user/' + str(self.seller_id))
class CommentsAndMessages(unittest.TestCase):
#@unittest.skip
def setUp(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app = webapp.app.test_client()
self.app.testing = True
# Create users and login
self.seller_id = \
self.app.post('/user', data=UserDataBase.user_data, content_type='application/json').get_json()[
"message"]
self.app.put('/user/' + str(self.seller_id) + '/mod')
self.buyer_id = \
self.app.post('/user', data=UserDataBase.user_data2, content_type='application/json').get_json()[
"message"]
#@unittest.skip
def test_comments(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.post('/login', data=UserDataBase.user2_login, content_type='application/json')
json_data = json.dumps({
"body": "ESRES UN CRACK",
"points": "3",
})
r_json = self.app.post('/comment/' + str(self.seller_id), data=json_data,
content_type='application/json').get_json()
self.assertIn('info', str(r_json)) # Check successful creation
r_json = self.app.get('/comments/' + str(self.seller_id)).get_json()
self.assertIn('ESRES UN CRACK', str(r_json)) # Check successful get
# @unittest.skip
def test_delete_comment(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.post('/login', data=UserDataBase.user2_login, content_type='application/json')
json_data = json.dumps({
"body": "ESRES UN CRACK",
"points": "3",
})
comment_id = self.app.post('/comment/' + str(self.seller_id), data=json_data,
content_type='application/json').get_json()["message"]
r_json = self.app.get('/comments/' + str(self.seller_id)).get_json()
self.assertIn('ESRES UN CRACK', str(r_json)) # Check successful get
self.app.post('/logout')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
r_json = self.app.delete('/comment/' + str(comment_id) + "/del", data=json_data,
content_type='application/json').get_json()
self.assertIn('deleted', str(r_json)) # Check successful get
r_json = self.app.get('/comments/' + str(self.seller_id)).get_json()
self.assertNotIn('ESRES UN CRACK', str(r_json)) # Check successful get
#@unittest.skip
def test_messages(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Post product
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
self.product_id = \
self.app.post('/product', data=ProductDataBase.prod_data, content_type='application/json').get_json()[
"message"]
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user2_login, content_type='application/json')
json_data = json.dumps({
"seller_id": str(self.seller_id),
"buyer_id": str(self.buyer_id),
"product_id": str(self.product_id)
})
trade_id = self.app.post('/trade', data=json_data, content_type='application/json').get_json()["message"]
json_data = json.dumps({
"body": "HELLO THERE!"
})
r_json = self.app.post('/msgs/' + str(trade_id), data=json_data, content_type='application/json').get_json()
self.assertIn('Message created', str(r_json)) # Check successful creation
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
json_data = json.dumps({
"body": "HELLO HERE!"
})
r_json = self.app.post('/msgs/' + str(trade_id), data=json_data, content_type='application/json').get_json()
self.assertIn('Message created', str(r_json)) # Check successful creation
r_json = self.app.get('/msgs/' + str(trade_id)).get_json()
self.assertIn('HELLO HERE!', str(r_json)) # Check successful get
#@unittest.skip
def tearDown(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.get('/logout').get_json()
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json').get_json()
self.app.delete('/user/' + str(self.buyer_id)).get_json()
self.app.delete('/user/' + str(self.seller_id)).get_json()
class Notifications(unittest.TestCase):
#@unittest.skip
def setUp(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app = webapp.app.test_client()
self.app.testing = True
# Create users and login
self.user_id = \
self.app.post('/user', data=UserDataBase.user_data, content_type='application/json').get_json()[
"message"]
self.app.put('/user/' + str(self.user_id) + '/mod')
#@unittest.skip
def test_delete_all_notifications(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
json_data = json.dumps({
"user_id": self.user_id,
"product_id": 0,
"category": "null",
"text": "Nuevo producto en categoria e interés"
})
self.app.post('/notification', data=json_data, content_type='application/json').get_json()
json_data = json.dumps({
"user_id": self.user_id,
"product_id": 0,
"category": "null",
"text": "Otra cosa"
})
self.app.post('/notification', data=json_data, content_type='application/json').get_json()
json_data = json.dumps({
"user_id": self.user_id,
"product_id": 0,
"category": "null",
"text": "Otra cosa 2"
})
self.app.post('/notification', data=json_data, content_type='application/json').get_json()
r_json = self.app.delete('/notifications').get_json()
self.assertIn('Successful delete', str(r_json)) # Check successful
r_json = self.app.get('/notifications').get_json()
self.assertIn('0', str(r_json)) # Check successful get 0 elements
#@unittest.skip
def test_create_get_notification(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
json_data = json.dumps({
"user_id": self.user_id,
"product_id": 0,
"category": "null",
"text": "Otra cosa 2"
})
r_json = self.app.post('/notification', data=json_data, content_type='application/json').get_json()
self.assertIn('Notification pushed', str(r_json)) # Check successful creation
r_json = self.app.get('/notifications').get_json()
self.assertIn('Otra cosa', str(r_json)) # Check successful get
#@unittest.skip
def test_follow_notify(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
user_2 = \
self.app.post('/user', data=UserDataBase.user_data2, content_type='application/json').get_json()[
"message"]
self.app.post('/login', data=UserDataBase.user2_login, content_type='application/json')
r_json = self.app.post('/product', data=ProductDataBase.prod_data, content_type='application/json').get_json()
product_id = r_json["message"]
# Follow
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
self.app.post('/product/' + str(product_id) + '/follow')
# Update
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user2_login, content_type='application/json')
r_json = self.app.put('/product/' + str(product_id), data=ProductDataBase.prod_update,
content_type='application/json').get_json()
# Check
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
r_json = self.app.get('/notifications').get_json()
self.assertIn('precio', str(r_json)) # Check successful get
self.app.delete('/user/' + str(user_2)).get_json()
# @unittest.skip
def test_pay_notify(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
user_2 = \
self.app.post('/user', data=UserDataBase.user_data2, content_type='application/json').get_json()[
"message"]
self.app.post('/login', data=UserDataBase.user2_login, content_type='application/json')
r_json = self.app.post('/product', data=ProductDataBase.prod_data,
content_type='application/json').get_json()
product_id = r_json["message"]
# add interest
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
json_data = json.dumps({
"list": ["Moda", "Complementos"]
})
self.app.post('/categories/interest', data=json_data, content_type='application/json')
# Pay
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user2_login, content_type='application/json')
iban = "ES809999123125412535"
json_data = json.dumps({
"amount": 9.99,
"iban": iban,
"boost_date": "1999-12-24",
"product_id": int(product_id)
})
r_json = self.app.post('/payment', data=json_data, content_type='application/json').get_json()
self.assertIn('info', str(r_json)) # Check successful pay created
# Check
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
r_json = self.app.get('/notifications').get_json()
self.assertIn('categoria', str(r_json)) # Check successful get
self.app.delete('/user/' + str(user_2)).get_json()
# @unittest.skip
def test_product_notify(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
user_2 = \
self.app.post('/user', data=UserDataBase.user_data2, content_type='application/json').get_json()[
"message"]
# add interest
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
json_data = json.dumps({
"list": ["Moda", "Complementos"]
})
self.app.post('/categories/interest', data=json_data, content_type='application/json')
# New product
self.app.post('/login', data=UserDataBase.user2_login, content_type='application/json')
self.app.post('/product', data=ProductDataBase.prod_data,
content_type='application/json')
# Check
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
r_json = self.app.get('/notifications').get_json()
self.assertIn('categoria', str(r_json)) # Check successful get
self.app.delete('/user/' + str(user_2)).get_json()
#@unittest.skip
def tearDown(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.delete('/user')
class UploadFiles(unittest.TestCase):
#@unittest.skip
def setUp(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app = webapp.app.test_client()
self.app.testing = True
# Create users and login
self.user_id = \
self.app.post('/user', data=UserDataBase.user_data, content_type='application/json').get_json()[
"message"]
#@unittest.skip
def test_upload(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
f = open('./test/jake.jpg', 'rb')
data = {'file': f}
r_json = self.app.post('/upload', content_type='multipart/form-data', data=data).get_json()
file_url = r_json["message"]
f.close()
self.assertIn('info', str(r_json)) # Check successful upload
r = self.app.get(file_url)
self.assertIn("[200 OK]", str(r))
r.close()
file = file_url.split('/')[2]
os.remove("./images/" + file)
#@unittest.skip
def tearDown(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.delete('/user')
class Reports(unittest.TestCase):
#@unittest.skip
def setUp(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app = webapp.app.test_client()
self.app.testing = True
# Create users and login
self.user_id = \
self.app.post('/user', data=UserDataBase.user_data, content_type='application/json').get_json()[
"message"]
self.app.put('/user/' + str(self.user_id) + '/mod')
#@unittest.skip
def test_new_report(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
json_data = json.dumps({
"user_id": self.user_id,
"reason": "Porque si y punto en boca"
})
r_json = self.app.post('/report', data=json_data, content_type='application/json').get_json()
self.assertIn('info', str(r_json)) # Check successful upload
product_id = self.app.post('/product', data=ProductDataBase.prod_data, content_type='application/json').get_json()["message"]
json_data = json.dumps({
"user_id": self.user_id,
"product_id": product_id,
"reason": "Porque si y punto en boca otra vez"
})
r_json = self.app.post('/report', data=json_data, content_type='application/json').get_json()
self.assertIn('info', str(r_json)) # Check successful upload
#@unittest.skip
def test_get_reports(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
json_data = json.dumps({
"user_id": self.user_id,
"reason": "Porque si y punto en boca"
})
self.app.post('/report', data=json_data, content_type='application/json')
r_json = self.app.get('/reports').get_json()
self.assertIn('Porque si y punto en boca', str(r_json)) # Check successful get
#@unittest.skip
def test_delete_report(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
json_data = json.dumps({
"user_id": self.user_id,
"reason": "Porque si y punto en boca"
})
id = self.app.post('/report', data=json_data, content_type='application/json').get_json()["message"]
r_json = self.app.delete('/report/'+str(id)).get_json()
self.assertIn('deleted', str(r_json)) # Check successful upload
r_json = self.app.get('/reports').get_json()
self.assertNotIn('Porque si y punto en boca', str(r_json))
#@unittest.skip
def tearDown(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.delete('/user')
class Interest(unittest.TestCase):
#@unittest.skip
def setUp(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app = webapp.app.test_client()
self.app.testing = True
# Create users and login
self.user_id = \
self.app.post('/user', data=UserDataBase.user_data, content_type='application/json').get_json()[
"message"]
self.app.put('/user/' + str(self.user_id) + '/mod')
#@unittest.skip
def test_delete_all_interests(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
json_data = json.dumps({
"list": ["Moda", "Deporte"]
})
r_json = self.app.post('/categories/interest', data=json_data, content_type='application/json').get_json()
self.assertIn("Interest pushed", str(r_json)) # Check successful get 0 elements
r_json = self.app.get('/categories/interest').get_json()
self.assertIn("Moda", str(r_json)) # Check successful get 0 elements
r_json = self.app.delete('/categories/interest', data=json_data, content_type='application/json' ).get_json()
self.assertIn('Successful delete', str(r_json)) # Check successful
r_json = self.app.get('/categories/interest').get_json()
self.assertIn('0', str(r_json)) # Check successful get 0 elements
#@unittest.skip
def test_get_categories(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
r_json = self.app.get('/categories').get_json()
self.assertIn('Moda', str(r_json)) # Check successful upload
#@unittest.skip
def tearDown(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.delete('/user')
class PaymentsTest(unittest.TestCase):
#@unittest.skip
def setUp(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app = webapp.app.test_client()
self.app.testing = True
# Create users and login
self.modder = \
self.app.post('/user', data=UserDataBase.user_data, content_type='application/json').get_json()[
"message"]
self.app.put('/user/' + str(self.modder) + '/mod')
self.user = self.user_id = \
self.app.post('/user', data=UserDataBase.user_data2, content_type='application/json').get_json()[
"message"]
# Post product
self.app.post('/login', data=UserDataBase.user2_login, content_type='application/json')
self.product_id = \
self.app.post('/product', data=ProductDataBase.prod_data, content_type='application/json').get_json()[
"message"]
self.app.get('/logout')
#@unittest.skip
def test_new_pay(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Create Trade from buyer
self.app.post('/login', data=UserDataBase.user2_login, content_type='application/json')
iban = "ES809999123125412535"
json_data = json.dumps({
"amount": 9.99,
"iban": iban,
"boost_date": "1999-12-24",
"product_id": int(self.product_id)
})
r_json = self.app.post('/payment', data=json_data, content_type='application/json').get_json()
self.assertIn('info', str(r_json)) # Check successful pay created
#@unittest.skip
def test_delete_pay(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Create Trade from buyer
self.app.post('/login', data=UserDataBase.user2_login, content_type='application/json')
iban = "ES809999123125412535"
json_data = json.dumps({
"amount": 9.99,
"iban": iban,
"boost_date": "1999-12-24",
"product_id": int(self.product_id)
})
r_json = self.app.post('/payment', data=json_data, content_type='application/json').get_json()
self.assertIn('info', str(r_json)) # Check successful pay created
pay_id = r_json["message"]
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
r_json = self.app.put('/payment/check/' + str(pay_id), data=json_data, content_type='application/json').get_json()
self.assertIn('deleted', str(r_json)) # Check deleted offer
r_json = self.app.put('/payment/check/' + str(pay_id), data=json_data,
content_type='application/json').get_json()
self.assertIn('not found', str(r_json)) # Check deleted offer
#@unittest.skip
def test_list_pays(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Create Trade from buyer
self.app.post('/login', data=UserDataBase.user2_login, content_type='application/json')
iban = "ES809999123125412535"
json_data = json.dumps({
"amount": 9.99,
"iban": iban,
"boost_date": "1999-12-24",
"product_id": int(self.product_id)
})
self.app.post('/payment', data=json_data, content_type='application/json').get_json()
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
r_json = self.app.get('/payments').get_json()
self.assertIn(iban, str(r_json)) # Check deleted offer
#@unittest.skip
def tearDown(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Post test
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
self.app.delete('/user/' + str(self.user))
self.app.delete('/user/' + str(self.modder))
if __name__ == "__main__":
unittest.main()
| python |
import pydub
import pytube
output_path = "C:/Users/epics/Music"
segments = []
playlist = pytube.Playlist("https://youtube.com/playlist?list=PL3PHwew8KnCl2ImlXd9TQ6UnYveqK_5MC")
for i in range(0,16):
segments.append(pydub.AudioSegment.from_file(f"{output_path}/.ytmp3_cache/{i}.mp3",format="mp4"))
sum(segments).export(f"{output_path}/{sanitize_filename(playlist.title)}.mp3", format="mp3")
| python |
import mapping
import struct
import types
#import logging
#log = logging.getLogger('util.primitives.structures')
class enum(list):
'''
>>> suits = enum(*'spades hearts diamonds clubs'.split())
>>> print suits.clubs
3
>>> print suits['hearts']
1
'''
def __init__(self, *args):
list.__init__(self, args)
def __getattr__(self, elem):
return self.index(elem)
def __getitem__(self, i):
if isinstance(i, basestring):
return self.__getattr__(i)
else:
return list.__getitem__(self, i)
class EnumValue(object):
def __init__(self, name, int, **kwds):
self.str = name
self.int = int
for k,v in kwds.items():
setattr(self, k, v)
def __str__(self):
return self.str
def __int__(self):
return self.int
def __cmp__(self, other):
try:
other_int = int(other)
except:
return 1
else:
return cmp(int(self), other_int)
def __repr__(self):
return '<%s %s=%d>' % (type(self).__name__, str(self), int(self))
class _EnumType(type):
def __new__(self, clsname, bases, vardict):
clsdict = {}
values = []
ValueType = vardict.get('ValueType', EnumValue)
for name, value in vardict.items():
if name == 'ValueType' or name.startswith('_') or isinstance(value, types.FunctionType):
clsdict[name] = value
continue
if isinstance(value, dict):
EVal = ValueType(name, **value)
elif isinstance(value, int):
EVal = ValueType(name, value)
elif isinstance(value, tuple):
EVal = ValueType(name, *value)
values.append(EVal)
for val in values:
clsdict[str(val)] = val
_known = {}
for val in values:
values_dict = dict(vars(val))
equiv = values_dict.values()
for eq in equiv:
try:
hash(eq)
except TypeError:
continue
_known[eq] = val
clsdict['_known'] = _known
return type.__new__(self, clsname, bases, clsdict)
class _Enum(object):
__metaclass__ = _EnumType
ValueType = EnumValue
def __call__(self, something):
if isinstance(something, self.ValueType):
return something
if isinstance(something, dict):
something = something.get('int')
return self._known.get(something, None)
def Enum(Name, Type = EnumValue, **kws):
enum_dict = dict(vars(_Enum))
enum_dict.update(ValueType = Type, **kws)
return _EnumType(Name, (_Enum,), enum_dict)()
def new_packable(fmt, byteorder='!', invars=None):
invars = invars or []
slots = fmt[::2]
fmtstring = byteorder + ''.join(fmt[1::2])
class packable(object):
__slots__, _fmt, invariants = slots, fmtstring, invars
@classmethod
def unpack(cls,data):
o = cls(*struct.unpack(cls._fmt, data))
assert all(invar(o) for invar in cls.invariants)
return o
def __init__(self, *a, **kw):
i = -1
for i, d in enumerate(a): setattr(self, self.__slots__[i], d)
for field in self.__slots__[i+1:]: setattr(self, field, 0)
for k in kw: setattr(self, k, kw[k])
def pack(self):
return struct.pack(self._fmt, *(getattr(self, field)
for field in self.__slots__))
def __iter__(self):
return ((s, getattr(self, s)) for s in self.__slots__)
def __len__(self): return struct.calcsize(self._fmt)
__str__ = pack
def __eq__(self, other):
o = ()
for slot in self.__slots__:
sval = getattr(self, slot)
oval = getattr(other, slot, o)
if oval is o: return False
if oval != sval: return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def copy(self):
return self.unpack(self.pack())
return packable
def unpack_named(format, *args):
"""
Like struct.unpack, but with names. Name/value pairs are put into a dictionary and
returned.
Usage:
my_hash = unpack_named( data format, name1, name2, ..., nameN, data )
In addition to all the normal pack/unpack keycodes like I, B, and H, you can also
use an uppercase R to indicate the "rest" of the data. Logically, the R can only
appear at the end of the format string.
Example:
>>> testdata = struct.pack("!HIB", 1,4000L,3) + "some extraneous data"
>>> magic_hash = unpack_named("!HIBR", "one", "four thousand long", "three", "extra", testdata)
>>> v = magic_hash.values()
>>> v.sort()
>>> print v
[1, 3, 4000, 'some extraneous data']
"""
data = args[-1]
# if format has our special R character, make sure it's at end
rest = None
if 'R' in format:
if format.find('R') != len(format) - 1:
raise AssertionError("R character in format string to unpack_named can only appear at the end")
else:
format = format[:-1] # chop off the last character
sz = struct.calcsize(format)
# slice the "rest" off of the data
rest = data[sz:]
data = data[:sz]
# unpack using the ever handy struct module
tup = struct.unpack(format, data)
# give names to our newly unpacked items
magic_hash = {}
for i in xrange(len(tup)):
magic_hash[ args[i] ] = tup[i]
if rest:
magic_hash[ args[i+1] ] = rest
return mapping.to_storage(magic_hash)
def remove_from_list(my_list, remove_these):
my_list = my_list[:]
remove_list = [e for e in my_list if e in remove_these]
for e in remove_list: my_list.remove(e)
return my_list
class oset(set):
def __init__(self, iterable=None):
self.data = []
if iterable is None:
iterable = []
self.update(iterable, init=True)
def add(self, val):
'''
>>> a = oset([1,2,3])
>>> a.add(3)
>>> a
oset([1, 2, 3])
>>> a = oset([1,2,3])
>>> a.add(4)
>>> a
oset([1, 2, 3, 4])
'''
if val not in self.data:
self.data.append(val)
set.add(self, val)
def __getitem__(self,n):
'''
>>> a = oset([8,4,6])
>>> a[1]
4
>>> a[1:]
oset([4, 6])
'''
if isinstance(n, slice):
return type(self)(self.data[n])
return self.data[n]
def __iter__(self):
return iter(self.data)
def clear(self):
del self.data[:]
set.clear(self)
def pop(self):
ret = set.pop(self)
self.data.remove(ret)
return ret
def remove(self, item):
self.data.remove(item)
set.remove(self, item)
def discard(self, item):
try: self.remove(item)
except ValueError: pass
except KeyError: pass
def union(self, other):
if not isinstance(other, oset):
other = oset(other)
return self | other
def __or__(self, other):
if not isinstance(other, set):
raise ValueError, "other must be a set"
ret = oset(self)
ret.update(other)
return ret
def intersection(self, other):
if not isinstance(other, oset):
other = oset(other)
return self & other
def __and__(self, other):
if not isinstance(other, set):
raise ValueError, "other must be a set"
a = oset(self)
b = other
return a - (a - b)
def difference(self, other):
other = oset(other)
return self - other
def __sub__(self, other):
if not isinstance(other, set):
raise ValueError, "other must be a set"
first = oset(self)
first -= other
return first
def symmetric_difference(self, other):
if not isinstance(other, oset):
other = oset(other)
return self ^ other
def __xor__(self, other):
if not isinstance(other, set):
raise ValueError, "other must be a set"
return (self | other) - (self & other)
def copy(self):
return oset(self)
def update(self, other, init=False):
if not isinstance(other, oset) and not init:
other = oset(other)
self.__ior__(other, init=init)
def __ior__(self, other, init=False):
if not isinstance(other, set) and not init:
raise ValueError, "other must be a set"
for i in other:
self.add(i)
return self
def intersection_update(self, other):
if not isinstance(other, oset):
other = oset(other)
self &= other
def __iand__(self, other):
if not isinstance(other, set):
raise ValueError, "other must be a set"
self -= (self & other)
def difference_update(self, other):
if not isinstance(other, oset):
other = oset(other)
self -= other
def __isub__(self, other):
if not isinstance(other, set):
raise ValueError, "other must be a set"
for item in other:
self.discard(item)
return self
def symmetric_difference_update(self, other):
if not isinstance(other, oset):
other = oset(other)
self ^= other
def __ixor__(self, other):
if not isinstance(other, set):
raise ValueError, "other must be a set"
b = oset(other)
b -= self
self -= other
self |= b
return self
class roset(oset):
def add(self,val):
if val in self:
self.data.remove(val)
self.data.append(val)
else:
oset.add(self,val)
def insert(self, idx, item):
if item in self:
self.data.remove(item)
self.data.insert(idx, item)
set.add(self, item)
class EmptyQueue(Exception): pass
class PriorityQueue(object):
'''
PriorityQueues sort their elements on insertion, using the heapq module.
Not thread-safe!
>>> pq = PriorityQueue('last')
>>> pq += ('first', 0)
>>> pq += ('third', 3)
>>> pq += ('second', 2)
>>> while len(pq): print pq.next()
first
second
third
last
>>> len(pq)
0
'''
default_priority = 5
def __init__(self, *args):
self.q = [(self.default_priority, arg) for arg in args]
# Sort elements if we got them
self.key = lambda a: a[0]
self.q.sort(key=self.key)
def __len__(self):
return len(self.q)
def count(self, x):
return self.q.count(x)
def peek(self):
'Peek at the next element.'
if not self.q: raise EmptyQueue
__, item = self.q[0]
return item
def __iadd__(self, elemtuple):
if isinstance(elemtuple, (tuple, list)):
if len(elemtuple) != 2:
raise TypeError('add to the PriorityQueue like += (item, priority) or just += item')
self.append(*elemtuple)
else:
self.append(elemtuple)
return self
def __nonzero__(self):
return self.q.__len__()
def append(self, item, priority = default_priority):
self.q.append((priority, item))
self.q.sort(key=self.key)
def next(self):
__, item = self.q.pop(0)
return item
def __repr__(self):
return "<PriorityQueue %r>" % self.q
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=True)
| python |
from __future__ import absolute_import
from __future__ import print_function
import os
import yaml
import argparse
import sys
import numpy as np
from flask import Flask, request, jsonify
import json
import os
import io
from werkzeug.utils import secure_filename
import subprocess
AUDIO_STORAGE = os.path.join("/content", "audio_storage")
if not os.path.isdir(AUDIO_STORAGE):
os.makedirs(AUDIO_STORAGE)
import timeit
from DatasetLoader import loadWAV
from SpeakerNet import *
import wget
parser = argparse.ArgumentParser(description = "SpeakerNet");
parser.add_argument('--config', type=str, default=None, help='Config YAML file');
## Data loader
parser.add_argument('--max_frames', type=int, default=200, help='Input length to the network for training');
parser.add_argument('--eval_frames', type=int, default=300, help='Input length to the network for testing; 0 uses the whole files');
parser.add_argument('--batch_size', type=int, default=200, help='Batch size, number of speakers per batch');
parser.add_argument('--max_seg_per_spk', type=int, default=500, help='Maximum number of utterances per speaker per epoch');
parser.add_argument('--nDataLoaderThread', type=int, default=5, help='Number of loader threads');
parser.add_argument('--augment', type=bool, default=False, help='Augment input')
## Training details
parser.add_argument('--test_interval', type=int, default=10, help='Test and save every [test_interval] epochs');
parser.add_argument('--max_epoch', type=int, default=500, help='Maximum number of epochs');
parser.add_argument('--trainfunc', type=str, default="", help='Loss function');
## Optimizer
parser.add_argument('--optimizer', type=str, default="adam", help='sgd or adam');
parser.add_argument('--scheduler', type=str, default="steplr", help='Learning rate scheduler');
parser.add_argument('--lr', type=float, default=0.001, help='Learning rate');
parser.add_argument("--lr_decay", type=float, default=0.95, help='Learning rate decay every [test_interval] epochs');
parser.add_argument('--weight_decay', type=float, default=0, help='Weight decay in the optimizer');
## Loss functions
parser.add_argument("--hard_prob", type=float, default=0.5, help='Hard negative mining probability, otherwise random, only for some loss functions');
parser.add_argument("--hard_rank", type=int, default=10, help='Hard negative mining rank in the batch, only for some loss functions');
parser.add_argument('--margin', type=float, default=0.1, help='Loss margin, only for some loss functions');
parser.add_argument('--scale', type=float, default=30, help='Loss scale, only for some loss functions');
parser.add_argument('--nPerSpeaker', type=int, default=1, help='Number of utterances per speaker per batch, only for metric learning based losses');
parser.add_argument('--nClasses', type=int, default=5994, help='Number of speakers in the softmax layer, only for softmax-based losses');
## Load and save
parser.add_argument('--initial_model', type=str, default="", help='Initial model weights');
parser.add_argument('--save_path', type=str, default="exps/exp1", help='Path for model and logs');
## Training and test data
parser.add_argument('--train_list', type=str, default="data/train_list.txt", help='Train list');
parser.add_argument('--test_list', type=str, default="data/test_list.txt", help='Evaluation list');
parser.add_argument('--train_path', type=str, default="data/voxceleb2", help='Absolute path to the train set');
parser.add_argument('--test_path', type=str, default="data/voxceleb1", help='Absolute path to the test set');
parser.add_argument('--musan_path', type=str, default="data/musan_split", help='Absolute path to the test set');
parser.add_argument('--rir_path', type=str, default="data/RIRS_NOISES/simulated_rirs", help='Absolute path to the test set');
## Model definition
parser.add_argument('--n_mels', type=int, default=40, help='Number of mel filterbanks');
parser.add_argument('--log_input', type=bool, default=False, help='Log input features')
parser.add_argument('--model', type=str, default="", help='Name of model definition');
parser.add_argument('--encoder_type', type=str, default="SAP", help='Type of encoder');
parser.add_argument('--nOut', type=int, default=512, help='Embedding size in the last FC layer');
## For test only
parser.add_argument('--eval', dest='eval', action='store_true', help='Eval only')
## Distributed and mixed precision training
parser.add_argument('--port', type=str, default="8888", help='Port for distributed training, input as text');
parser.add_argument('--distributed', dest='distributed', action='store_true', help='Enable distributed training')
parser.add_argument('--mixedprec', dest='mixedprec', action='store_true', help='Enable mixed precision training')
args = parser.parse_args();
## Parse YAML
def find_option_type(key, parser):
for opt in parser._get_optional_actions():
if ('--' + key) in opt.option_strings:
return opt.type
raise ValueError
if args.config is not None:
with open(args.config, "r") as f:
yml_config = yaml.load(f, Loader=yaml.FullLoader)
for k, v in yml_config.items():
if k in args.__dict__:
typ = find_option_type(k, parser)
args.__dict__[k] = typ(v)
else:
sys.stderr.write("Ignored unknown parameter {} in yaml.\n".format(k))
#
# Load Model
#
def loadParameters(path, model):
if not os.path.isfile(path):
url = 'http://www.robots.ox.ac.uk/~joon/data/baseline_v2_ap.model'
wget.download(url, '/app/baseline_v2_ap.model')
self_state = model.module.state_dict()
loaded_state = torch.load(path, map_location="cpu")
for name, param in loaded_state.items():
origname = name
if name not in self_state:
name = name.replace("module.", "")
if name not in self_state:
print("%s is not in the model."%origname)
continue
if self_state[name].size() != loaded_state[origname].size():
print("Wrong parameter length: %s, model: %s, loaded: %s"%(origname, self_state[name].size(), loaded_state[origname].size()))
continue
self_state[name].copy_(param)
def load_model():
s = SpeakerNetCpu(**vars(args))
s = WrappedModel(s).cpu()
print("load model", args.initial_model)
loadParameters(path=args.initial_model , model= s)
pytorch_total_params = sum(p.numel() for p in s.module.__S__.parameters())
print('Total parameters: ',pytorch_total_params)
return s
def loadAudio(file):
audio = loadWAV(file, args.eval_frames, evalmode=True)
return torch.FloatTensor(audio)
# Flask
app = Flask(__name__)
s = load_model()
@app.route("/api/predict", methods=['POST'])
def api_predict():
"""
Required params:
audio
"""
audio_file_1 = request.files['audio'] # Required
if audio_file_1:
filename_1 = os.path.join(AUDIO_STORAGE,secure_filename(audio_file_1.filename))
start = timeit.default_timer()
audio_file_1.save(filename_1) # Save audio in audio_storage, path: audio_storage/filename_1
out = subprocess.call('ffmpeg -y -i %s -ac 1 -vn -acodec pcm_s16le -ar 16000 %s >/dev/null 2>/dev/null' %(filename_1,filename_1), shell=True)
if out != 0:
raise ValueError('Conversion failed %s.'%fname)
data = loadAudio(filename_1)
stop = timeit.default_timer()
print('Load file: ', stop - start)
start = timeit.default_timer()
re = s(data).detach().numpy().tolist()
stop = timeit.default_timer()
print('Model run: ', stop - start)
return json.dumps({'vector': re})
return "please provide audio file"
def test():
with open('/content/drive/MyDrive/colabdrive/Thesis/devices/train.txt', 'r') as f:
lines = f.readlines()
result = {}
for line in lines:
filename_1 = line.split(" ")[-1].rstrip()
name = line.split(" ")[0]
if name not in result:
result[name] = []
try:
data = loadAudio(filename_1)
re = s(data).detach().numpy().tolist()
result[name].append(re)
except Exception as e:
print(e)
import json
with open('/content/result.json', 'w') as fp:
json.dump(result, fp)
if __name__ == '__main__':
# app.run(host='0.0.0.0', port='6677', debug=False)
test()
| python |
import tensorflow as tf
import numpy as np
x_data = np.random.rand(100).astype(np.float32)
y_data = 0.1*x_data + 0.3
W = tf.Variable(tf.random_uniform([1],-1.0,1.0))#产生均匀分布的随机张量
b = tf.Variable(tf.zeros([1]))
y = W*x_data + b
loss = tf.reduce_mean(tf.square(y-y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init) #激活
for step in range(201):
sess.run(train)
if step % 20 == 0:
print(step,sess.run(W),sess.run(b)) | python |
output = input['fields']
| python |
from functools import reduce
from typing import List
import numpy as np
__all__ = [
"ABCDElement", "Media", "FreeSpace", "ThinLens",
"FlatInterface", "CurvedInterface", "ABCDCompositeElement",
"ThickLens", "PlanoConvexLens"]
class ABCDElement:
@property
def length(self) -> float:
return 0
def __init__(self, *args, name="") -> None:
self.name = name
"""Accepts A, B, C, D matrix elements or a matrix itself"""
if len(args) == 4:
self._A = args[0]
self._B = args[1]
self._C = args[2]
self._D = args[3]
elif len(args) == 1 and isinstance(args[0], np.ndarray) and self.__is_square_matrix_of_dim(args[0], 2):
self.matrix = args[0]
else:
raise ValueError("No matrix definition present in init.")
def __is_square_matrix_of_dim(self, m: np.ndarray, dim: int):
return all(len(row) == len(m) for row in m) and len(m) == dim
@property
def matrix(self) -> np.ndarray:
return np.array([[self._A, self._B], [self._C, self._D]])
@matrix.setter
def matrix(self, value: np.ndarray):
self._A = value[0][0]
self._B = value[0][1]
self._C = value[1][0]
self._D = value[1][1]
def act(self, q_param: complex) -> complex:
nom = self._A * q_param + self._B
denom = self._C * q_param + self._D
return nom / denom
class Media(ABCDElement):
@property
def length(self) -> float:
return self._d
def __init__(self, d, n):
self._d = d
self.n = n
super().__init__(1, d, 0, 1, name=f"Media(d={d}, n={n})")
class FreeSpace(Media):
"""Propagation in free space or in a medium of constant refractive index"""
@property
def length(self) -> float:
return self._d
def __init__(self, d) -> None:
self._d = d
super().__init__(d=d, n=1)
self.name = f"FreeSpace(d={d})"
class ThinLens(ABCDElement):
"""Thin lens aproximation. Only valid if the focal length is much greater than the thickness of the lens"""
@property
def f(self):
return self._f
def __init__(self, f: float) -> None:
self._f = f
super().__init__(1, 0, -1/f, 1, name=f"ThinLens(f={f})")
class FlatInterface(ABCDElement):
"""Refraction at a flat interface"""
def __init__(self, n1, n2) -> None:
"""
Args:
n1 (float): Refractive index of first media
n2 (float): Refractive index of second media
"""
super().__init__(1, 0, 0, n1 / n2, name=f"FlatInterface(n1={n1}, n2={n2})")
class CurvedInterface(ABCDElement):
"""Refraction at a curved interface"""
@property
def n1(self):
return self._n1
@property
def n2(self):
return self._n2
@property
def R(self):
return self._R
def __init__(self, n1, n2, R) -> None:
"""
Args:
n1 (float): Refractive index of the material the ray is propagating from
n2 (float): Refractive index of the material the ray is propagating to
R (float): Curviture of the boundary that is positive for convex boundary and negative for concave boundary.
"""
self._n1 = n1
self._n2 = n2
self._R = R
super().__init__(self.__build_matrix(), name=f"CurvedInterface(n1={n1}, n2={n2}, R={R})")
def __build_matrix(self) -> np.ndarray:
return np.array([
[1, 0],
[-1*(self.n2 - self.n1) / (self.n2 * self.R), self.n1 / self.n2]
])
class ABCDCompositeElement(ABCDElement):
"""Represents ABCDelement that consists of child elements"""
@property
def length(self) -> float:
return reduce(lambda a, b: a +b , [e.length for e in self.childs])
def __init__(self, childs: List[ABCDElement], name="") -> None:
self.name = ""
self.childs = childs
super().__init__(self._build_matrix(), name=name)
def _build_matrix(self) -> np.ndarray:
if len(self.childs) == 0:
return np.identity(2)
return reduce(lambda c, b: c.dot(b), [e.matrix for e in reversed(self.childs)])
class ThickLens(ABCDCompositeElement):
"""Propagation through ThickLens."""
@property
def f(self) -> float:
# Using Lens Maker's formula
# + before 1/R2 is due to assumed positive R2
f_inv = (self._n/1 - 1) * (1/self._R1 + 1/self._R2)
return 1/f_inv
def __init__(self, R1, n, R2, d) -> None:
""" It is assumed, that the refractive index of free space is 1
Args:
R1 (float, positive): Curviture of the first face of the lens
n (float): Refractive index of the lens
R2 (float, positive): Curviture of the second face of the lens
d (float): Thickness of the lens
"""
self._n = n
self._R1 = R1
self._R2 = R2
self._d = d
components = [
CurvedInterface(1, n, R1),
Media(d, n),
CurvedInterface(n, 1, -R2)
]
super().__init__(components, name=f"ThickLens(R1={R1}, d={d}, R2={R2}, n={n})")
class PlanoConvexLens(ThickLens):
@property
def is_inversed(self):
return self.__inversed
def __init__(self, R, d, n, inversed=False) -> None:
if inversed:
super().__init__(R, n, float("inf"), d)
self.name = f"PlanConvexLens(R={R}, d={d}, n={n})"
else:
super().__init__(float("inf"), n, R, d)
self.name = f"PlanConvexLens(R={R}, d={d}, n={n})"
self.__inversed = inversed
| python |
class StatusHost:
hostname: str
device_id: str
uptime: int
power_time: int
time: str
timestamp: int
fwversion: str
devmodel: str
netrole: str
loadavg: float
totalram: int
freeram: int
temperature: int
cpuload: float
height: int
def __init__(self, data):
self.hostname = data.get("hostname")
self.device_id = data.get("device_id")
self.uptime = data.get("uptime")
self.power_time = data.get("power_time")
self.time = data.get("time")
self.timestamp = data.get("timestamp")
self.fwversion = data.get("fwversion")
self.devmodel = data.get("devmodel")
self.netrole = data.get("netrole")
self.loadavg = data.get("loadavg")
self.totalram = data.get("totalram")
self.freeram = data.get("freeram")
self.temperature = data.get("temperature")
self.cpuload = data.get("cpuload")
self.height = data.get("height")
| python |
from __future__ import division
import numpy as np
import os
import pandas as pd
import itertools
import matplotlib.pyplot as plt
## required in 3D plot
from mpl_toolkits.mplot3d import Axes3D
import xml.etree.ElementTree as ET
import time
import pylab as pl
from IPython import display
import sys
import time
import copy
import operator
from trackgenius.utilities.background import Background
from trackgenius.trackingnegobi import TrackingNegotiationLog
from trackgenius.utilities.helper import *
class PredictUtilitySpace(TrackingNegotiationLog):
def __init__(
self,
Input_path,
Player,
Log_path,
Log_file
):
TrackingNegotiationLog.__init__(
self,
Input_path,
Player,
Log_path,
Log_file
)
## only generate background
def _show_background(self, Agents, player_Utility_uncertain, NashPoint, ParetoFrontier, issue_predict):
plt.figure(figsize=(8, 8))
plt.scatter([player_Utility_uncertain[i][0] for i in player_Utility_uncertain.keys()],
[player_Utility_uncertain[i][1] for i in player_Utility_uncertain.keys()],
color = "r", alpha = 0.5, s = 0.5)
## plot pareto
for pareto in ParetoFrontier:
pl.scatter(pareto[0], pareto[1], marker = "v", color = "purple", alpha = 1.0, s = 40.0)
## plot nash
pl.scatter(NashPoint[0], NashPoint[1], color = "black", alpha = 1.0, s = 50.0)
plt.xlim(0, 1.05)
plt.ylim(0, 1.05)
plt.xlabel(Agents[0])
plt.ylabel(Agents[1])
plt.title("The rebuilt of utility space")
plt.show()
## generate key information of predictions
def _generate_player_Utility_pred(self, Domains, Values, Evaluations_pred, Parameter_dict_pred, Weights):
Evaluations_pred_norm = copy.deepcopy(Evaluations_pred)
for player in Domains:
for eva in Evaluations_pred_norm[player].keys():
Evaluations_pred_norm[player][eva] = [i/max(Evaluations_pred[player][eva]) for i in Evaluations_pred[player][eva]]
for value in Values:
for i, val in enumerate(Values[value]):
Parameter_dict_pred[player][val] = []
Parameter_dict_pred[player][val].append(Evaluations_pred_norm[player][value][i])
Parameter_dict_pred[player][val].append(Weights[player][value])
all_bids, all_bid_num = Background._generateAllBids(self, Values)
player_Utility_pred = Background._genAllPlayersAllUtility3(self, Parameter_dict_pred, all_bids)
NashPoint_pred, ParetoFrontier_pred = Background._generateParetoAndNash(player_Utility_pred)
return Evaluations_pred_norm, player_Utility_pred, NashPoint_pred, ParetoFrontier_pred
## calculate the accuracy of predicted pareto frontier and Nash solution
def _ParetoNashBidAccuracy(self, info_summary):
## define variable
ParetoFrontier, ParetoFrontier_pred, NashPoint, NashPoint_pred = info_summary["ParetoFrontier"], info_summary["ParetoFrontier_pred"], info_summary["NashPoint"], info_summary["NashPoint_pred"]
player_Utility, player_Utility_pred = info_summary["player_Utility"], info_summary["player_Utility_pred"]
## find Nash bid
Nash_bid = ValueFindKey(player_Utility, NashPoint)
Nash_bid_pred = ValueFindKey(player_Utility_pred, NashPoint_pred)
## find Pareto_bids and calcualte accuracy
Pareto_bid_list = [ValueFindKey(player_Utility, utility) for utility in ParetoFrontier]
Pareto_bid_list_pred = [ValueFindKey(player_Utility_pred, utility) for utility in ParetoFrontier_pred]
Pareto_acc = 0.0
for bid in Pareto_bid_list_pred:
if bid in Pareto_bid_list:
Pareto_acc += 1
Pareto_acc /= len(Pareto_bid_list)
return Pareto_acc, Nash_bid, Nash_bid_pred, Pareto_bid_list, Pareto_bid_list_pred
def _evaluation_Pred_and_Plot_and_Acc(self, info_summary,
Domains = None,
agent_index = None,
Values = None,
Evaluations_pred = None,
Parameter_dict_pred = None,
Weights = None,
Bids = None,
Alter_num = None,
baseutility = None,
bottom_utility = None,
TYPE = None,
if_Eval_Pred = True,
if_Generate_Util = True,
if_Show_Plot = True,
if_Stat_Nash = True,
if_Print_Stat_Nash = True):
## define variables
Pareto_acc, Nash_bid, Nash_bid_pred, Nash_diff, Pareto_bid_list, Pareto_bid_list_pred, issue_predict = None, None, None, None, None, None, None
if if_Eval_Pred == True:
Evaluations_pred, bottom_evaluation = ValueEvaluationsConnect(Domains, agent_index, Values, Evaluations_pred, Weights, Bids, Alter_num, baseutility, bottom_utility, TYPE)
if if_Generate_Util == True:
Evaluations_pred_norm, player_Utility_pred, NashPoint_pred, ParetoFrontier_pred = PredictUtilitySpace._generate_player_Utility_pred(self, Domains, Values, Evaluations_pred, Parameter_dict_pred, Weights)
info_summary["player_Utility_pred"] = player_Utility_pred
info_summary["NashPoint_pred"] = NashPoint_pred
info_summary["ParetoFrontier_pred"] = ParetoFrontier_pred
if if_Show_Plot == True:
PredictUtilitySpace._show_background(self, info_summary["Agents"],
info_summary["player_Utility_pred"],
info_summary["NashPoint_pred"],
info_summary["ParetoFrontier_pred"],
issue_predict)
if if_Stat_Nash == True:
Pareto_acc, Nash_bid, Nash_bid_pred, Pareto_bid_list, Pareto_bid_list_pred = PredictUtilitySpace._ParetoNashBidAccuracy(self, info_summary)
Nash_diff = BidDifference(Nash_bid, Nash_bid_pred)
#print("Pareto_acc, Nash_bid, Nash_bid_pred", Pareto_acc, Nash_bid, Nash_bid_pred)
info_Nash_Pareto_Pred = {}
info_Nash_Pareto_Pred["Pareto_acc"] = Pareto_acc
info_Nash_Pareto_Pred["Nash_bid"] = Nash_bid
info_Nash_Pareto_Pred["Nash_bid_pred"] = Nash_bid_pred
info_Nash_Pareto_Pred["Nash_diff"] = Nash_diff
info_Nash_Pareto_Pred["Pareto_bid_list"] = Pareto_bid_list
info_Nash_Pareto_Pred["Pareto_bid_list_pred"] = Pareto_bid_list_pred
if if_Print_Stat_Nash == True:
print("Pareto_acc:", info_Nash_Pareto_Pred["Pareto_acc"])
print("Nash_diff:", info_Nash_Pareto_Pred["Nash_diff"])
print("Nash_bid:", info_Nash_Pareto_Pred["Nash_bid"])
print("Nash_bid_pred:", info_Nash_Pareto_Pred["Nash_bid_pred"])
return info_summary, info_Nash_Pareto_Pred, Evaluations_pred, Evaluations_pred_norm, Parameter_dict_pred
## a 4-step algorithm for rebuilding outcome space with unknown opponent's evaluation values
def _trackinghistory(self, info_summary, agent_name, incomplete_info_level, start_round, end_round, speed, order_text, save_Path, Visulisation, print_result):
##################################################################
## Step 1 : estimate bottom bids, baseutility and initialisation ##
##################################################################
## get target agent index
agent_index = info_summary["Agents"].index(agent_name)
own_agent_index = agent_index-1
issue_predict = {}
## the bid with minimal utility (bottom_utility) in own preference
bottom_utility_list = [info_summary["player_Utility"][bid][own_agent_index] for bid in info_summary["player_Utility"].keys()]
bottom_utility_index = np.argmin(bottom_utility_list)
bottom_utility = bottom_utility_list[bottom_utility_index]
bottom_bid = [bid for i, bid in enumerate(info_summary["player_Utility"].keys()) if i == bottom_utility_index][0]
## base utility (can be tuned for other project)
baseutility = (1.0 - bottom_utility)/4 + bottom_utility
## use Background to generate key information
Values, Parameter_dict, _, Weights, Evaluations = Background._readIntegrateXML(self)
## extract the name of domains
Domains = [i for i in Weights.keys()]
Issues_num = len(Weights[Domains[agent_index]].keys())
## deepcopy
Weights_pred = copy.deepcopy(Weights)
Evaluations_pred = copy.deepcopy(Evaluations)
Parameter_dict_pred = copy.deepcopy(Parameter_dict)
## reset Weights_pred
for i in Weights_pred[Domains[agent_index]].keys():
Weights_pred[Domains[agent_index]][i] = 1.0/Issues_num
## reset Evaluations_pred as 0.01
for i in Evaluations_pred[Domains[agent_index]].keys():
Evaluations_pred[Domains[agent_index]][i] = [baseutility]*len(Evaluations[Domains[agent_index]][i])
##########################################################################
## Step 2 : estimate opponent max utility bids (for 0.04 * Total_round) ##
##########################################################################
## 0.04 * Total_round
## end_round - start_round >= 25
max_utility_estimation = round(info_summary["Total_round"]*0.04)
## the estimated maximum bids list of first 10 rounds
max_bids_list = [info_summary["Log_history"][i][agent_index] for i in range(max_utility_estimation)]
max_bids_dict = {bid:int(0) for bid in max_bids_list}
for bid in max_bids_list:
max_bids_dict[bid] += 1
if print_result == True:
print("--------Opponent's Max_Utility_Bids----------")
print("max_bids_dict", max_bids_dict)
## sort from the highest to lowest
#### it is list after sorting
max_bids_dict_ordered = sorted(max_bids_dict.items(), key=lambda kv: kv[1], reverse=True)
max_bid = max_bids_dict_ordered[0][0]
if print_result == True:
print("--------------Step2--------------")
info_summary, info_Nash_Pareto_Pred, Evaluations_pred, Evaluations_pred_norm, Parameter_dict_pred = PredictUtilitySpace._evaluation_Pred_and_Plot_and_Acc(self,
info_summary,
Domains = Domains,
agent_index = agent_index,
Values = Values,
Evaluations_pred = Evaluations_pred,
Parameter_dict_pred = Parameter_dict_pred,
Weights = Weights,
Bids = max_bid,
Alter_num = 1.0,
baseutility = baseutility,
bottom_utility = bottom_utility,
TYPE = "MAX",
if_Eval_Pred = True,
if_Generate_Util = True,
if_Show_Plot = Visulisation,
if_Stat_Nash = True,
if_Print_Stat_Nash = print_result)
##################################################
## Step 3 : generate space based on bottom bids ##
##################################################
if print_result == True:
print("--------------Step3--------------")
own_max_bid_list = [bid for bid in info_summary["player_Utility"].keys() if info_summary["player_Utility"][bid][own_agent_index] == 1.0]
own_max_bid = own_max_bid_list[0]
#print("own_max_bid", own_max_bid)
info_summary, info_Nash_Pareto_Pred, Evaluations_pred, Evaluations_pred_norm, Parameter_dict_pred = PredictUtilitySpace._evaluation_Pred_and_Plot_and_Acc(self,
info_summary,
Domains = Domains,
agent_index = agent_index,
Values = Values,
Evaluations_pred = Evaluations_pred,
Parameter_dict_pred = Parameter_dict_pred,
Weights = Weights,
Bids = own_max_bid,
Alter_num = bottom_utility,
baseutility = baseutility,
bottom_utility = bottom_utility,
TYPE = "MIN",
if_Eval_Pred = True,
if_Generate_Util = True,
if_Show_Plot = Visulisation,
if_Stat_Nash = True,
if_Print_Stat_Nash = print_result)
##################################
## Step 4 : other max in 0.04 ##
##################################
if print_result == True:
print("--------------Step4--------------")
if len(max_bids_dict_ordered) > 1:
## find the median frequency in max_bids_dict_ordered
Median_freq = np.median(list(set([max_bids_dict_ordered[i][1] for i in range(len(max_bids_dict_ordered))])))
for i in range(1, len(max_bids_dict_ordered)):
diff_tmp = BidDifference(max_bids_dict_ordered[0][0], max_bids_dict_ordered[i][0])
if (max_bids_dict_ordered[i][1] >= Median_freq and diff_tmp < 2) or diff_tmp < 2:
other_max_bid = max_bids_dict_ordered[i][0]
info_summary, info_Nash_Pareto_Pred, Evaluations_pred, Evaluations_pred_norm, Parameter_dict_pred = PredictUtilitySpace._evaluation_Pred_and_Plot_and_Acc(self,
info_summary,
Domains = Domains,
agent_index = agent_index,
Values = Values,
Evaluations_pred = Evaluations_pred,
Parameter_dict_pred = Parameter_dict_pred,
Weights = Weights,
Bids = other_max_bid,
Alter_num = 0.95,
baseutility = baseutility,
bottom_utility = bottom_utility,
TYPE = "MAX",
if_Eval_Pred = True,
if_Generate_Util = True,
if_Show_Plot = Visulisation,
if_Stat_Nash = True,
if_Print_Stat_Nash = print_result)
## The original outcome space for comparison
if Visulisation == True:
## show real situation
print("--------------Original outcome space for comparison--------------")
PredictUtilitySpace._show_background(self, info_summary["Agents"],
info_summary["player_Utility"],
info_summary["NashPoint"],
info_summary["ParetoFrontier"],
issue_predict)
pred_summary = {}
#pred_summary["bids_dict"] = bids_dict
pred_summary["Values"] = Values
pred_summary["Parameter_dict"] = Parameter_dict
pred_summary["Parameter_dict_pred"] = Parameter_dict_pred
pred_summary["Weights"] = Weights
pred_summary["Weights_pred"] = Weights_pred
pred_summary["Evaluations"] = Evaluations
pred_summary["Evaluations_pred"] = Evaluations_pred
pred_summary["Evaluations_pred_norm"] = Evaluations_pred_norm
#print("------------------")
#print("Values", Values)
#print("------------------")
#print("Parameter_dict", Parameter_dict)
#print("------------------")
#print("Weights", Weights)
#print("------------------")
#print("Weights_pred", Weights_pred)
#print("------------------")
#print("Evaluations", Evaluations)
#print("------------------")
#print("Evaluations_pred", Evaluations_pred)
#print("------------------")
#print("Evaluations_pred_norm", Evaluations_pred_norm)
#print("------------------")
#print("Parameter_dict_pred", Parameter_dict_pred)
return pred_summary, info_Nash_Pareto_Pred
def predicting(self, info_summary, agent_name, incomplete_info_level = [False, False, False, False, True], Type = "BOTH", start_round = 0, end_round = None, speed = None, order_text = False, save_Path = None, Visulisation = True, print_result = True):
## incomplete_info_level = [False, False, False, False, False]
## [Rank_bids,
## incomplete of own weights,
## incomplete of own evaluation values,
## incomplete of oppo weights,
## incomplete of oppo evaluation values]
if incomplete_info_level == [False, False, False, False, True]:
pred_summary = PredictUtilitySpace._trackinghistory(self, info_summary, agent_name, incomplete_info_level, start_round, end_round, speed, order_text, save_Path, Visulisation, print_result)
return pred_summary | python |
import flask
import requests
import sqlalchemy
from sqlalchemy import orm
_HAS_PSYCOPG2 = False
try:
import psycopg2
_HAS_PSYCOPG2 = True
except ImportError:
pass
from .base import ExceptionConverter
class ArgumentErrorConverter(ExceptionConverter):
@classmethod
def convert(cls, exc):
if not isinstance(exc, sqlalchemy.exc.ArgumentError):
raise ValueError()
return dict(
title="SQLArgumentError",
detail=(
"Tried to generate SQL query with unknown attribute! Check your filter "
"for typos and virtual attributes."
),
http_status=requests.codes["unprocessable"],
meta={"sql_exception": str(exc)} if flask.current_app.debug else None,
)
ArgumentErrorConverter.register()
class NoResultFoundConverter(ExceptionConverter):
@classmethod
def convert(cls, exc):
if not isinstance(exc, orm.exc.NoResultFound):
raise ValueError()
return dict(
title="SQLNoResultFound",
detail="Object not found!",
http_status=requests.codes["not_found"],
meta={"sql_exception": str(exc)} if flask.current_app.debug else None,
)
NoResultFoundConverter.register()
class MultipleResultsFoundConverter(ExceptionConverter):
@classmethod
def convert(cls, exc):
if not isinstance(exc, orm.exc.MultipleResultsFound):
raise ValueError()
return dict(
title="SQLMulitpleResultsFound",
detail="Query was supposed to return one, but many found!",
http_status=requests.codes["unprocessable"],
meta={"sql_exception": str(exc)} if flask.current_app.debug else None,
)
MultipleResultsFoundConverter.register()
class UniqueViolationConverter(ExceptionConverter):
@classmethod
def convert(cls, exc):
if not isinstance(exc, psycopg2.errors.UniqueViolation):
raise ValueError()
return dict(
title="SQLUniqueViolation",
detail=(
"Unique constraint violated! "
+ (getattr(getattr(exc, "diag", None), "message_detail", ""))
),
http_status=requests.codes["conflict"],
meta={"psql_exception": str(exc)} if flask.current_app.debug else None,
)
if _HAS_PSYCOPG2:
class CheckViolationConverter(ExceptionConverter):
@classmethod
def convert(cls, exc):
if not isinstance(exc, psycopg2.errors.CheckViolation):
raise ValueError()
return dict(
title="SQLCheckViolation",
detail="SQL check constraint violated!",
http_status=requests.codes["unprocessable"],
meta={
"psql_exception": str(exc),
"psql_diag": f"{getattr(getattr(exc, 'diag', None), 'constraint_name', '')}",
}
if flask.current_app.debug
else None,
)
CheckViolationConverter.register()
class ForeignKeyViolationConverter(ExceptionConverter):
@classmethod
def convert(cls, exc):
if not isinstance(exc, psycopg2.errors.ForeignKeyViolation):
raise ValueError()
return dict(
title="SQLForeignKeyViolation",
detail=(
"Referential integity violation! You most probably tried to "
"delete a parent object while there are still children "
"referencing it."
),
http_status=requests.codes["unprocessable"],
meta={
"psql_exception": str(exc),
"psql_diag": f"{getattr(getattr(exc, 'diag', None), 'constraint_name', '')}",
}
if flask.current_app.debug
else None,
)
CheckViolationConverter.register()
class NotNullViolationConverter(ExceptionConverter):
@classmethod
def convert(cls, exc):
if not isinstance(exc, psycopg2.errors.NotNullViolation):
raise ValueError()
try:
additional_details = exc.args[0].split("DETAIL")[0].strip()
except Exception:
additional_details = ""
detail = "Not-null constraint violated!"
if additional_details:
detail = detail + f" ({additional_details})"
return dict(
title="SQLNotNullViolation",
detail=detail,
http_status=requests.codes["unprocessable"],
meta={
"psql_exception": str(exc),
"psql_diag": f" [{getattr(getattr(exc, 'diag', None), 'message_primary', '')}]",
}
if flask.current_app.debug
else None,
)
NotNullViolationConverter.register()
class IntegrityErrorConverter(ExceptionConverter):
@classmethod
def convert(cls, exc):
if not isinstance(exc, sqlalchemy.exc.IntegrityError):
raise ValueError()
orig = getattr(exc, "orig", None)
if isinstance(orig, psycopg2.errors.UniqueViolation):
retv = UniqueViolationConverter.convert(orig)
elif isinstance(orig, psycopg2.errors.CheckViolation):
retv = CheckViolationConverter.convert(orig)
elif isinstance(orig, psycopg2.errors.ForeignKeyViolation):
retv = ForeignKeyViolationConverter.convert(orig)
elif isinstance(orig, psycopg2.errors.NotNullViolation):
retv = NotNullViolationConverter.convert(orig)
else:
raise ValueError()
if flask.current_app.debug:
retv["meta"] = retv.get("meta", dict())
retv["meta"]["exc"] = str(exc)
return retv
IntegrityErrorConverter.register()
class InvalidRequestErrorConverter(ExceptionConverter):
@classmethod
def convert(cls, exc):
if not isinstance(exc, sqlalchemy.exc.InvalidRequestError):
raise ValueError()
if "'any()' not implemented for scalar attributes. Use has()." in exc.args:
return dict(
title="InvalidFilters",
detail="Invalid filters querystring parameter: for fileds on relations use `has`, not `any`.",
http_status=requests.codes["unprocessable"],
source={"parameter": "filter"},
)
raise ValueError()
class DataErrorConverter(ExceptionConverter):
@classmethod
def convert(cls, exc):
if not isinstance(exc, sqlalchemy.exc.DataError):
raise ValueError()
if hasattr(exc, "orig"):
return dict(
title="DataError",
detail=f"Datastore error not caught by validation: {';'.join(_.strip() for _ in exc.orig.args)}",
http_status=requests.codes["unprocessable"],
source={"pointer": "body"},
)
raise ValueError()
class SQLAlchemyErrorConverter(ExceptionConverter):
@classmethod
def convert(cls, exc):
if not isinstance(exc, sqlalchemy.exc.SQLAlchemyError):
raise ValueError()
meta = {}
if flask.current_app.debug:
meta = {"exception": str(exc)}
orig = getattr(exc, "orig", None)
if orig:
meta["driver_exception"] = str(orig)
return dict(
title=type(exc).__name__,
detail="Unexpected database error caused by either a backend bug or infrastructure outages.",
http_status=requests.codes["✗"],
meta=meta,
)
| python |
from troposphere import Template
from troposphere.iot import (
Certificate,
Policy,
PolicyPrincipalAttachment,
Thing,
ThingPrincipalAttachment,
TopicRule,
TopicRulePayload,
Action,
LambdaAction,
)
t = Template()
certificate = Certificate(
'MyCertificate',
CertificateSigningRequest='CSRParameter',
Status='StatusParameter',
)
policy = Policy(
'MyPolicy',
PolicyDocument={'Version': '2012-10-17'},
PolicyName='NameParameter',
)
policy_principal = PolicyPrincipalAttachment(
'MyPolicyPrincipalAttachment',
PolicyName='NameParameter',
Principal='arn:aws:iot:ap-southeast-2:123456789012',
)
thing = Thing(
'MyThing',
AttributePayload={
'Attributes': {
'myAttributeA': 'MyAttributeValueA',
'myAttributeB': 'MyAttributeValueB',
}
},
ThingName='NameParameter',
)
thing_principal = ThingPrincipalAttachment(
'MyThingPrincipalAttachment',
ThingName='NameParameter',
Principal='arn:aws:iot:ap-southeast-2:123456789012',
)
topic_rule = TopicRule(
'MyTopicRule',
RuleName='NameParameter',
TopicRulePayload=TopicRulePayload(
RuleDisabled=True,
Sql='SELECT temp FROM SomeTopic WHERE temp > 60',
Actions=[
Action(
Lambda=LambdaAction(
FunctionArn='arn',
),
),
],
),
)
t.add_resource(certificate)
t.add_resource(policy)
t.add_resource(policy_principal)
t.add_resource(thing)
t.add_resource(thing_principal)
t.add_resource(topic_rule)
print(t.to_json())
| python |
import sys, math
code = {
"TTT": "F","TTC": "F", "TTA":"L", "TTG":"L",
"CTT":"L", "CTC":"L", "CTA":"L", "CTG":"L",
"ATT":"I", "ATC":"I", "ATA":"I", "ATG":"M",
"GTT":"V", "GTC":"V", "GTA":"V", "GTG":"V",
"TCT":"S", "TCC":"S", "TCA":"S", "TCG": "S",
"CCT":"P", "CCC":"P", "CCA":"P", "CCG":"P",
"ACT":"T", "ACC":"T", "ACA":"T", "ACG":"T",
"GCT":"A", "GCC":"A", "GCA":"A", "GCG":"A",
"TAT":"Y", "TAC":"Y", "TAA":"[", "TAG":"[",
"CAT":"H", "CAC":"H", "CAA":"Q", "CAG":"Q",
"AAT":"N", "AAC":"N", "AAA":"K", "AAG":"K",
"GAT":"D", "GAC":"D", "GAA":"E", "GAG":"E",
"TGT":"C", "TGC":"C", "TGA":"]", "TGG":"W",
"CGT":"R", "CGC":"R", "CGA":"R", "CGG":"R",
"AGT":"B", "AGC":"B", "AGA":"R", "AGG":"R",
"GGT":"G", "GGC":"G", "GGA":"G", "GGG":"G"
}
code_16 = {
"TTT":"A","TTC": "A", "TTA":"A", "TTG":"A",
"CTT":"B", "CTC":"B", "CTA":"B", "CTG":"B",
"ATT":"C", "ATC":"C", "ATA":"C", "ATG":"C",
"GTT":"D", "GTC":"D", "GTA":"D", "GTG":"D",
"TCT":"E", "TCC":"E", "TCA":"E", "TCG":"E",
"CCT":"F", "CCC":"F", "CCA":"F", "CCG":"F",
"ACT":"G", "ACC":"G", "ACA":"G", "ACG":"G",
"GCT":"H", "GCC":"H", "GCA":"H", "GCG":"H",
"TAT":"I", "TAC":"I", "TAA":"I", "TAG":"I",
"CAT":"J", "CAC":"J", "CAA":"J", "CAG":"J",
"AAT":"K", "AAC":"K", "AAA":"K", "AAG":"K",
"GAT":"L", "GAC":"L", "GAA":"L", "GAG":"L",
"TGT":"M", "TGC":"M", "TGA":"M", "TGG":"M",
"CGT":"N", "CGC":"N", "CGA":"N", "CGG":"N",
"AGT":"O", "AGC":"O", "AGA":"O", "AGG":"O",
"GGT":"P", "GGC":"P", "GGA":"P", "GGG":"P"
}
code_12 = {
"GTT":"A","CTA":"A","ATG":"A","GTA":"A","GTC":"A","ATC":"A","ATA":"A","CTT":"B","CTC":"B","GTG":"B","TTA":"B","TTT":"B","CTG":"C","TTC":"C","ATT":"C","TTG":"C","ACC":"D","TCA":"D","ACG":"D","GCA":"D","GCC":"E","TCG":"E","CCG":"E","GCG":"E","CCC":"E","TCC":"F","CCT":"F","TCT":"F","GCT":"F","CCA":"F","ACA":"F","ACT":"F","GAA":"G","GAC":"G","GAT":"G","CAA":"G","AAT":"G","CAT":"G","CAG":"G","AAC":"H","AAG":"H","AAA":"H","GAG":"H","TAC":"H","TAG":"I","CAC":"I","TAA":"I","TAT":"I","CGA":"J","GGC":"J","TGG":"J","GGA":"J","CGG":"K","AGC":"K","TGA":"K","CGC":"K","AGA":"K","AGG":"L","TGT":"L","TGC":"L","CGT":"L","GGT":"L","AGT":"L","GGG":"L"}
inputSequence = sys.argv[1]
scramble = False
if len(sys.argv) == 3:
scramble = sys.argv[2]
switch = 0
aaSequences = ["","",""]
for i in range(len(inputSequence)):
if i + 2 < len(inputSequence):
aaSequences[switch] += code_12[inputSequence[i:i+3]]
switch = (switch + 1) % 3
print("Frame 1:",aaSequences[0], "Frame 2:",aaSequences[1], "Frame 3:", aaSequences[2])
if scramble:
print("scramble on")
aaSequences = sorted(aaSequences)
print("Frame 1:",aaSequences[0], "Frame 2:",aaSequences[1], "Frame 3:", aaSequences[2])
codeTTPOMinus1 = {
"F" : ["TTT","TTC"],
"L" : ["TTA","TTG","CTT","CTC","CTA","CTG"],
"I" : ["ATT","ATC","ATA"],
"M":["ATG"],
"V" : ["GTT","GTC","GTA","GTG"],
"P" : ["CCT", "CCC", "CCA", "CCG"],
"T" : ["ACT", "ACC", "ACA", "ACG"],
"A" : ["GCT","GCC", "GCA", "GCG"],
"Y" : ["TAT", "TAC"],
"[" : ["TAA", "TAG"],
"H" : ["CAT","CAC"],
"Q" : ["CAA", "CAG"],
"N" : ["AAT", "AAC"],
"K" : ["AAA", "AAG"],
"D" : ["GAT", "GAC"],
"E": ["GAA", "GAG"],
"C": ["TGT", "TGC"],
"]" : ["TGA"],
"W" : ["TGG"],
"R" : ["CGT", "CGC", "CGA", "CGG", "AGA", "AGG"],
"S" : ["TCT", "TCC", "TCA", "TCG"],
"B" : ["AGT", "AGC"],
"G" : ["GGT", "GGC", "GGA", "GGG"]
}
code_16TTPOMinus1 = {
"A" : ["TTT","TTC","TTA","TTG"],
"B" : ["CTT","CTC","CTA","CTG"],
"C" : ["ATT","ATC","ATA","ATG"],
"D" : ["GTT","GTC","GTA","GTG"],
"E" : ["TCT", "TCC", "TCA", "TCG"],
"F" : ["CCT", "CCC", "CCA", "CCG"],
"G" : ["ACT", "ACC", "ACA", "ACG"],
"H" : ["GCT","GCC", "GCA", "GCG"],
"I" : ["TAT", "TAC","TAA", "TAG"],
"J" : ["CAT","CAC","CAA", "CAG"],
"K" : ["AAT", "AAC","AAA", "AAG"],
"L" : ["GAT", "GAC","GAA", "GAG"],
"M" : ["TGT", "TGC","TGA","TGG"],
"N" : ["CGT", "CGC", "CGA", "CGG"],
"O" : ["AGA", "AGG","AGT", "AGC"],
"P": ["GGT", "GGC", "GGA", "GGG"]
}
code_12TTPOMinus1 = {
"A" : ["GTT","CTA","ATG","GTA","GTC","ATC","ATA"],
"B" : ["CTT","CTC","GTG","TTA","TTT"],
"C" : ["CTG","TTC","ATT","TTG"],
"D" : ["ACC","TCA","ACG","GCA"],
"E" : ["GCC","TCG","CCG","GCG","CCC"],
"F" : ["TCC","CCT","TCT","GCT","CCA","ACA","ACT"],
"G" : ["GAA","GAC","GAT","CAA","AAT","CAT","CAG"],
"H" : ["AAC","AAG","AAA","GAG","TAC"],
"I" : ["TAG","CAC","TAA","TAT"],
"J" : ["CGA","GGC","TGG","GGA"],
"K" : ["CGG","AGC","TGA","CGC","AGA"],
"L" : ["AGG","TGT","TGC","CGT","GGT","AGT","GGG"]
}
reconstructedSequence = ""
resultArr = []
for i in range(len(aaSequences[0])):
#print("Iteration: ",i)
triplets0 = code_12TTPOMinus1[aaSequences[0][i]]
triplets1 = code_12TTPOMinus1[aaSequences[1][i]] if i < len(aaSequences[1]) else []
triplets2 = code_12TTPOMinus1[aaSequences[2][i]] if i < len(aaSequences[2]) else []
#print(triplets0, triplets1, triplets2, resultArr)
found = False
for elem in resultArr:
for entry in triplets0:
#print(entry[0],entry[1])
if elem[3] == entry[0] and elem[4] == entry[1]:
if reconstructedSequence == "":
reconstructedSequence += elem
else:
#print(elem)
reconstructedSequence += elem[2] + elem[3] + elem[4]
found = True
#print(reconstructedSequence, elem, triplets0[0])
break
if found:
break
if not found and reconstructedSequence != "":
print("error, wrong order! ", reconstructedSequence)
break
resultArr = []
for entry0 in triplets0:
if triplets1 != []:
for entry1 in triplets1:
if triplets2 != []:
for entry2 in triplets2:
if entry0[1] == entry1[0] and entry0[2] == entry2[0] and entry1[1] == entry2[0] and entry1[2] == entry2[1]:
resultArr.append(entry0 + entry1[2] + entry2[2])
else:
if entry0[1] == entry1[0] and entry0[2] == entry1[1]:
resultArr.append(entry0 + entry1[2])
else:
resultArr.append(entry0)
#print(resultArr)
found = False
for elem in resultArr:
#print(reconstructedSequence, elem)
if reconstructedSequence != "":
if len(elem) == 5:
if elem[0] == reconstructedSequence[-2] and elem[1] == reconstructedSequence[-1]:
reconstructedSequence += elem[2] + elem[3] + elem[4]
found = True
break
elif len(elem) == 4:
if elem[0] == reconstructedSequence[-2] and elem[1] == reconstructedSequence[-1]:
reconstructedSequence += elem[2] + elem[3]
found = True
break
else:
if elem[0] == reconstructedSequence[-2] and elem[1] == reconstructedSequence[-1]:
reconstructedSequence += elem[2]
found = True
break
else:
reconstructedSequence += elem
found = True
break
if not found or len(reconstructedSequence) != len(inputSequence): #the latter could be replaced by 3*len(aaSequences[2]) + 2 assuming aaSequences[2] is the shortest amino acid-like sequence
print("error, wrong order!", reconstructedSequence)
sys.exit()
print(inputSequence)
matches = ""
for i in range(len(inputSequence)):
if inputSequence[i] == reconstructedSequence[i]:
matches += "|"
else:
matches += " "
print(matches)
print(reconstructedSequence) | python |
"""Class to infer with the model."""
from pathlib import Path
import torch
from PIL import Image
from torch.cuda.amp import autocast
from torch.nn import DataParallel
from torch.utils.data import DataLoader
from tqdm import tqdm
from .config import Config
from .data import INPUT_CHANNELS, OUTPUT_CHANNELS, TestDataset
from .model import UNet
from .train import Trainer
class Inference:
"""Class to infer with the model."""
def __init__(
self,
image_dir: Path,
load_dir: Path,
use_best_model: bool,
config: Config,
):
"""Store config and initialize everything.
Args:
image_dir: Path to the directory containing the input images
load_dir: Directory from where to load the model's weights
use_best_model: Whether to use the best model (wrt accuracy)
config: The hyper-param config
"""
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu"
)
self.dataset = TestDataset(image_dir)
self.loader = DataLoader(
self.dataset,
batch_size=config.test_batch_size,
pin_memory=True,
)
model = UNet(INPUT_CHANNELS, OUTPUT_CHANNELS, config)
self.model = DataParallel(model).to(self.device)
Trainer.load_weights(self.model, load_dir, use_best_model)
self.config = config
def infer(self, output_dir: Path) -> None:
"""Infer with the model.
Args:
output_dir: Directory where to dump the model's outputs
"""
output_dir = output_dir.expanduser()
if not output_dir.exists():
output_dir.mkdir(parents=True)
# Turn off batch-norm updates
self.model.eval()
with tqdm(total=len(self.dataset), desc="Inference") as progress_bar:
for images, names in self.loader:
images = images.to(self.device)
with autocast(enabled=self.config.mixed_precision):
logits = self.model(images)[0]
predictions = torch.sigmoid(logits)
# Convert float32 in [0, 1] to uint8 in [0, 255]
outputs = (predictions * 255).squeeze(1).byte()
# Pillow needs numpy ndarrays; it fails with PyTorch tensors
outputs_np = outputs.cpu().numpy()
for img, name in zip(outputs_np, names):
path = output_dir / name
Image.fromarray(img).save(path)
progress_bar.update()
| python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.