content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
# Copyright (c) 2016, Ethan White
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pyqrcode
import math
"""
This file aims to turn a QR code produced by pyqrcode's `text()` method
into something similar to the output of `qrencode -t utf8`, thus
allowing it to take up half the space in each direction and fit on an
80x24 terminal.
"""
class QRMatrix:
def __init__(self, lines):
self.lines = lines
def __getitem__(self, param):
if type(param) is not tuple:
raise ValueError("Expected tuple")
x, y = param
try:
return self.lines[x][y] == "1"
except IndexError:
return False
def get_width(self):
return len(self.lines[0])
def get_height(self):
return len(self.lines)
def get_size(self):
return get_width(), get_height()
width = property(get_width)
height = property(get_height)
size = property(get_size)
class QRWrapper:
def __init__(self, data):
self.matrix = QRMatrix(pyqrcode.create(data, error="L").text().split("\n"))
def _get_display_char(self, top, bottom):
if top and bottom:
return " "
elif not top and bottom:
return "\u2580"
elif not bottom and top:
return "\u2584"
elif not bottom and not top:
return "\u2588"
def compact_repr(self):
lines = []
for i in range(math.floor(self.matrix.height / 2)):
line = ""
for j in range(self.matrix.width):
line += self._get_display_char(self.matrix[j, i * 2], self.matrix[j, i * 2 + 1])
lines += [line]
return "\n".join(lines)
if __name__ == "__main__":
print(QRWrapper("Just for debugging!").compact_repr())
|
python
|
from validator.rule_pipe_validator import RulePipeValidator as RPV
from validator import rules as R
from validator import Validator, validate, validate_many, rules as R
def test_rpv_001_simple():
data = "10"
# with integer
rules = [R.Integer(), R.Size(10)]
rpv = RPV(data, rules)
assert rpv.execute()
# without integer
rules = [R.Size(10)]
rpv = RPV(data, rules)
assert not rpv.execute()
def test_rpv_002_simple():
data = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# with list
rules = [R.List(), R.Size(10)]
rpv = RPV(data, rules)
assert rpv.execute()
# without list
rules = [R.Integer(), R.Size(10)]
rpv = RPV(data, rules)
assert not rpv.execute()
def test_rpv_003_simple():
request = {"age": 23}
rule = {"age": "integer|size:23"}
result = Validator(request, rule).validate()
assert result
request = {"age": 23}
rule = {"age": "size:23"}
result = Validator(request, rule).validate()
assert result
request = {"age": 23}
rule = {"age": "size:2"}
result = Validator(request, rule).validate()
assert not result
request = {"age": 123456789}
rule = {"age": "size:9"}
result = Validator(request, rule).validate()
assert not result
request = {"age": 123456789}
rule = {"age": "integer|size:123456789"}
result = Validator(request, rule).validate()
assert result
def test_rpv_004_simple():
request = {"age": "23"}
rule = {"age": "integer|size:23"}
result = Validator(request, rule).validate()
assert result
request = {"age": "23"}
rule = {"age": "size:23"}
result = Validator(request, rule).validate()
assert not result
request = {"age": "23"}
rule = {"age": "size:2"}
result = Validator(request, rule).validate()
assert result
request = {"age": "123456789"}
rule = {"age": "size:9"}
result = Validator(request, rule).validate()
assert result
request = {"age": "123456789"}
rule = {"age": "integer|size:123456789"}
result = Validator(request, rule).validate()
assert result
def test_rpv_005_simple():
request = {"args": [1, 2, 3]}
rule = {"args": "size:3"}
result = Validator(request, rule).validate()
assert result
request = {"args": [1, 2, 3]}
rule = {"args": "list|size:3"}
result = Validator(request, rule).validate()
assert result
request = {"args": [1, 2, 3]}
rule = {"args": "integer|size:23"}
result = Validator(request, rule).validate()
assert not result
|
python
|
import os
import random
class Hangman():
def __init__(self):
self.word = self.pick_random_word()
self.word = self.word.upper()
self.hidden_word = ["-" for character in self.word]
self.word_length = len(self.word)
self.used_letters = []
self.running = True
self.lives = 10
self.finished = False
while self.running == True:
self.handle_game()
def pick_random_word(self):
# Replace with the correct path
filename = ".../Hangman/GermanWords/Words.txt"
with open(filename, encoding="utf8") as file:
content = file.readlines()
content = [x.strip() for x in content]
return (content[random.randrange(0, len(content))])
def get_input(self):
self.guess = input("Take a guess: \t")
self.guess = self.guess.upper()
return str(self.guess)
def check_if_in_word(self, input):
same = 0
for i in range(len(self.word)):
if self.word[i] == input and self.hidden_word[i] != input:
self.update_output(i, input)
same += 1
return(same)
def judge_answer(self, input):
if len(input) == 0 or len(input) > 1:
return True
for character in self.used_letters:
if character == input:
return True
return False
def add_to_used(self, input):
if not self.judge_answer(input):
self.used_letters.append(input)
def draw_word(self, won):
os.system("cls")
output_word = " ".join(self.hidden_word)
output_letters = " ".join(self.used_letters)
print(output_word)
print("\nUsed:", output_letters)
print("\nLives", self.lives, "\n\n")
if self.finished == True:
print(self.word, "\n")
def update_output(self, place, letter):
self.hidden_word[place] = letter
def handle_game(self):
if self.word_length > 0 and self.lives > 0:
self.draw_word(self.finished)
input = self.get_input()
same_characters = self.check_if_in_word(input)
self.word_length -= same_characters
if same_characters <= 0 and not self.judge_answer(input):
self.lives -=1
self.add_to_used(input)
else:
self.finished = True
self.draw_word(self.finished)
self.running = False
Hangman()
|
python
|
def test_add_pet(client, jwt):
r = client.post(
"/pets",
json=dict(
pet_type="cat",
name="tospik",
breed="persian",
owner="emreisikligil"
),
headers=dict(Authorization=f"Bearer {jwt}")
)
assert r.status_code == 201
body = r.json
assert body["id"]
assert body["name"] == "tospik"
assert body["breed"] == "persian"
assert body["owner"] == "emreisikligil"
def test_get_pets(client, jwt):
r = client.get(
"/pets",
headers=dict(Authorization=f"Bearer {jwt}")
)
assert r.status_code == 200
body = r.json
assert len(body) == 1
assert body[0]["id"]
assert body[0]["name"] == "tospik"
assert body[0]["breed"] == "persian"
assert body[0]["owner"] == "emreisikligil"
|
python
|
import asyncio
import youtube_dl
import urllib.request
import datetime
from bot_client import *
global queue
queue = []
global nowPlaying
nowPlaying = []
global ytdl_opts
ytdl_opts = {
'format': 'bestaudio/best',
#'ignoreerrors': True,
#'no_warnings': True,
#'debug_printtraffic': True, ###############################################################
'cookiefile': 'youtube.com_cookies.txt',
'cachedir': False,
#'quiet': True,
#'verbose': True, ##########################################################
}
global ytdl
def ytdl_init():
global ytdl
ytdl = youtube_dl.YoutubeDL(ytdl_opts)
ffmpeg_options = { # these options fix a common disconnection bug
'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5',
'options': '-vn -sn'
}
class ytdl_source(discord.PCMVolumeTransformer):
def __init__(self, source, *, data, volume=0.5):
super().__init__(source, volume)
self.data = data
self.title = data.get('title')
self.url = data.get('url')
@classmethod
async def from_url(cls, url, *, loop=None, stream=False):
try:
loop = loop or asyncio.get_event_loop()
data = await prepare_data(url, loop, stream)
if data is None:
play_next()
return
filename = data['url'] if stream else ytdl.prepare_filename(data)
return cls(discord.FFmpegPCMAudio(filename, **ffmpeg_options), data=data)
except Exception as e:
print('Exception in from_url:', e)
play_next()
async def prepare_data(url, loop, stream):
data = None
while True:
try:
loop = loop or asyncio.get_event_loop()
data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=not stream))
if 'entries' in data:
# take first item from a playlist
data = data['entries'][0]
# print(data['url']) # the generated direct url
my_data = urllib.request.urlopen(data['url'])
response = my_data.getcode()
except Exception as e:
if ('HTTP Error 403' in str(e)) or ('ERROR: No video formats found;' in str(e)):
print('generic error')
await asyncio.sleep(1)
try: #try without cookies
ytdl_opts2 = {
'format': 'bestaudio/best',
# 'ignoreerrors': True,
#'no_warnings': True,
'debug_printtraffic': True,
# 'nocheckcertificate': True,
'cachedir': False,
# 'quiet': True,
'verbose': True
}
ytdl2 = youtube_dl.YoutubeDL(ytdl_opts2)
data = await loop.run_in_executor(None, lambda: ytdl2.extract_info(url, download=not stream))
if 'entries' in data:
# take first item from a playlist
data = data['entries'][0]
my_data = urllib.request.urlopen(data['url'])
response = my_data.getcode()
except Exception as e2:
print('printing e2 exception: ' + str(e2))
return
else:
break
else:
print('Printing error: ' + str(e))
return
else:
break
return data
async def add_playlist(url : str, message):
ydl_opts = {
'format': 'bestaudio/best',
# 'ignoreerrors': True,
'no_warnings': True,
'cookiefile': 'youtube.com_cookies.txt',
#'nocheckcertificate': True,
'extract_flat': 'in_playlist',
#'skip_download': True,
'cachedir': False,
'quiet': True
}
info = None
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
try:
info = ydl.extract_info(url, download=False)
except Exception:
print('error on first info get')
if info is None:
asyncio.sleep(0.5)
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
try:
info = ydl.extract_info(url, download=False)
except Exception:
print('error on second info get')
videos = len(info['entries'])
for i in range(videos):
duration = 'unknown'
if info['entries'][i]['duration'] is not None:
seconds = int(info['entries'][i]['duration'])
duration = str(datetime.timedelta(seconds=seconds))
tempList = ["http://www.youtube.com/watch?v="+info['entries'][i]['url'], info['entries'][i]['title'], duration]
global queue
queue.append(tempList)
await message.channel.send('successfully added '+str(videos)+' videos to queue')
play_next()
return
async def add_video(url : str, message):
ydl_opts = {
'format': 'bestaudio/best',
# 'ignoreerrors': True,
'no_warnings': True,
'cookiefile': 'youtube.com_cookies.txt',
#'nocheckcertificate': True,
# 'extract_flat': True,
'cachedir': False,
'quiet': True
}
info = None
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info = ydl.extract_info(url, download=False)
if info is None: #if there's a lot of latency, try again after .5 secs
asyncio.sleep(0.5)
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info = ydl.extract_info(url, download=False)
seconds = int(info['duration'])
duration = str(datetime.timedelta(seconds=seconds))
temp_list = [url, info['title'], duration]
global queue
queue.append(temp_list)
await message.channel.send("successfully added video to queue")
play_next()
return
async def search_and_paste_link(item : str, message):
ydl_opts = {
'format': 'bestaudio/best',
# 'ignoreerrors': True,
'no_warnings': True,
'cookiefile': 'youtube.com_cookies.txt',
# 'nocheckcertificate': True,
'extract_flat': 'in_playlist',
# 'skip_download': True,
'cachedir': False,
'quiet': True,
'noplaylist': True
}
info = None #
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info = ydl.extract_info("ytsearch:%s" % item, download=False)
if info is None: # if there's a lot of latency, try again after .3 secs
await asyncio.sleep(0.3)
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info = ydl.extract_info("ytsearch:%s" % item, download=False)
print(info)
await asyncio.sleep(0.3)
new_message = str("http://www.youtube.com/watch?v=") + str(info['entries'][0]['url'])
await message.channel.send(new_message)
async def search_video(item : str, message):
ydl_opts = {
'format': 'bestaudio/best',
# 'ignoreerrors': True,
'no_warnings': True,
'cookiefile': 'youtube.com_cookies.txt',
#'nocheckcertificate': True,
'extract_flat': 'in_playlist',
# 'skip_download': True,
'cachedir': False,
'quiet': True,
'noplaylist': True
}
info = None
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info = ydl.extract_info("ytsearch:%s" % item, download=False)
if info is None: #if there's a lot of latency, try again after .3 secs
await asyncio.sleep(0.3)
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info = ydl.extract_info("ytsearch:%s" % item, download=False)
print(info)
await asyncio.sleep(0.3)
if len(info['entries']) > 0:
seconds = 0
if info['entries'][0]['duration'] is not None:
seconds = int(info['entries'][0]['duration'])
duration = str(datetime.timedelta(seconds=seconds))
if seconds == 0:
duration = "unknown"
tempList = ["http://www.youtube.com/watch?v="+info['entries'][0]['url'], info['entries'][0]['title'], duration]
global queue
queue.append(tempList)
await message.channel.send("successfully added video to queue")
play_next()
else:
await message.channel.send("couldn't find a suitable result")
return
def play_next():
voice = discord.utils.get(client.voice_clients)
global queue
try:
if not voice.is_playing() and len(queue) > 0:
song = queue[0]
queue.pop(0)
global nowPlaying
nowPlaying.clear()
nowPlaying.extend(song)
asyncio.run_coroutine_threadsafe(play_this_url(nowPlaying[0]), loop=client.loop)
except AttributeError:
if len(queue) > 0:
print('trying again here')
play_next()
return
async def play_this_url(url : str):
voice = discord.utils.get(client.voice_clients)
try:
player = await ytdl_source.from_url(url, loop=client.loop, stream=True)
if player is None:
return
voice.play(player, after=lambda er: play_next())
except Exception as e:
print('Exception in play_this_url:', e)
play_next()
return
|
python
|
# --- import --------------------------------------------------------------------------------------
import os
import numpy as np
import WrightTools as wt
from . import _pulse
from ._scan import Scan
# --- define --------------------------------------------------------------------------------------
here = os.path.abspath(os.path.dirname(__file__))
# integration defaults
timestep = 4.0
early_buffer = 100.0
late_buffer = 400.0
# --- class ---------------------------------------------------------------------------------------
class Experiment:
"""Experiment."""
def __init__(self, axes, name, pm, pulse_class):
# basic attributes
self.axes = axes
for a in self.axes:
setattr(self, a.name, a)
self.name = name
self.pm = pm
self.npulses = len(pm)
self.timestep = timestep
self.early_buffer = early_buffer
self.late_buffer = late_buffer
# pulse
self.pulse_class = pulse_class
self.pulses = [self.pulse_class() for _ in self.pm]
def __repr__(self):
return '<WrightSim.Experiment object \'{0}\' at {1}>'.format(self.name, str(id(self)))
@property
def active_axes(self):
return [a for a in self.axes if a.active]
@property
def axis_names(self):
return [a.name for a in self.axes]
def run(self, hamiltonian, mp=True):
"""Run the experiment.
Parameters
----------
hamiltonian : WrightSim Hamiltonian
Hamiltonian.
mp : boolean (optional)
Toggle CPU multiprocessing. Default is True.
Returns
-------
WrightSim Scan
Scan that was run."""
out = Scan(self, hamiltonian)
out.run(mp=mp)
# finish
return out
def set_axis(self, axis_name, points):
'''
Activate and define points for one of the experimental axes.
Parameters
----------
axis_name : string
Name of axis.
points : 1D array-like
Points (in native units) to scan over.
'''
# TODO: is there a way to prevent incompatible axes being simultaniously activated?
axis_index = self.axis_names.index(axis_name)
axis = self.axes[axis_index]
axis.points = points
axis.active = True
|
python
|
import torch
from torch import nn
from torch.nn import functional as F
from typing import List
from resnet_layer import ResidualLayer
class ConvDecoder(nn.Module):
def __init__(self,
in_channels: int,
embedding_dim: int,
hidden_dims: List = [128, 256],
img_size: int = 32,
activation=nn.LeakyReLU,
**kwargs) -> None:
super().__init__()
# Build Decoder
modules = []
modules.append(
nn.Sequential(
nn.Conv2d(embedding_dim,
hidden_dims[-1],
kernel_size=1,
stride=1,
padding=0),
activation())
)
for _ in range(6):
modules.append(ResidualLayer(hidden_dims[-1], hidden_dims[-1]))
modules.append(activation())
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
kernel_size = 3 if i == 0 else 4
output_padding = 1 if i == 0 else 0
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=kernel_size,
stride=2,
padding=1,
output_padding=output_padding),
activation())
)
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
out_channels=in_channels,
kernel_size=4,
stride=2,
padding=1,
output_padding=0),
nn.Tanh()))
self.decoder = nn.Sequential(*modules)
def forward(self, x):
return self.decoder(x)
|
python
|
from dassl.engine import TRAINER_REGISTRY
from dassl.engine.trainer import TrainerMultiAdaptation
from dassl.data import DataManager
from dassl.utils import MetricMeter
from torch.utils.data import Dataset as TorchDataset
from dassl.optim import build_optimizer, build_lr_scheduler
from dassl.utils import count_num_param
import torch
import torch.nn as nn
from torch.nn import functional as F
from dassl.engine.trainer import SimpleNet
import numpy as np
from dassl.modeling import build_layer
from dassl.modeling.ops import ReverseGrad
import torchmetrics
from dassl.utils.kernel import GaussianKernel
from typing import Optional, Sequence
class MultipleKernelMaximumMeanDiscrepancy(nn.Module):
r"""The Multiple Kernel Maximum Mean Discrepancy (MK-MMD) used in
`Learning Transferable Features with Deep Adaptation Networks (ICML 2015) <https://arxiv.org/pdf/1502.02791>`_
Given source domain :math:`\mathcal{D}_s` of :math:`n_s` labeled points and target domain :math:`\mathcal{D}_t`
of :math:`n_t` unlabeled points drawn i.i.d. from P and Q respectively, the deep networks will generate
activations as :math:`\{z_i^s\}_{i=1}^{n_s}` and :math:`\{z_i^t\}_{i=1}^{n_t}`.
The MK-MMD :math:`D_k (P, Q)` between probability distributions P and Q is defined as
.. math::
D_k(P, Q) \triangleq \| E_p [\phi(z^s)] - E_q [\phi(z^t)] \|^2_{\mathcal{H}_k},
:math:`k` is a kernel function in the function space
.. math::
\mathcal{K} \triangleq \{ k=\sum_{u=1}^{m}\beta_{u} k_{u} \}
where :math:`k_{u}` is a single kernel.
Using kernel trick, MK-MMD can be computed as
.. math::
\hat{D}_k(P, Q) &=
\dfrac{1}{n_s^2} \sum_{i=1}^{n_s}\sum_{j=1}^{n_s} k(z_i^{s}, z_j^{s})\\
&+ \dfrac{1}{n_t^2} \sum_{i=1}^{n_t}\sum_{j=1}^{n_t} k(z_i^{t}, z_j^{t})\\
&- \dfrac{2}{n_s n_t} \sum_{i=1}^{n_s}\sum_{j=1}^{n_t} k(z_i^{s}, z_j^{t}).\\
Args:
kernels (tuple(torch.nn.Module)): kernel functions.
linear (bool): whether use the linear version of DAN. Default: False
Inputs:
- z_s (tensor): activations from the source domain, :math:`z^s`
- z_t (tensor): activations from the target domain, :math:`z^t`
Shape:
- Inputs: :math:`(minibatch, *)` where * means any dimension
- Outputs: scalar
.. note::
Activations :math:`z^{s}` and :math:`z^{t}` must have the same shape.
.. note::
The kernel values will add up when there are multiple kernels.
Examples::
# >>> from dalib.modules.kernels import GaussianKernel
# >>> feature_dim = 1024
# >>> batch_size = 10
# >>> kernels = (GaussianKernel(alpha=0.5), GaussianKernel(alpha=1.), GaussianKernel(alpha=2.))
# >>> loss = MultipleKernelMaximumMeanDiscrepancy(kernels)
# >>> # features from source domain and target domain
# >>> z_s, z_t = torch.randn(batch_size, feature_dim), torch.randn(batch_size, feature_dim)
# >>> output = loss(z_s, z_t)
"""
def __init__(self, kernels: Sequence[nn.Module], linear: Optional[bool] = False):
super(MultipleKernelMaximumMeanDiscrepancy, self).__init__()
self.kernels = kernels
self.index_matrix = None
self.linear = linear
def forward(self, z_s: torch.Tensor, z_t: torch.Tensor) -> torch.Tensor:
features = torch.cat([z_s, z_t], dim=0)
batch_size = int(z_s.size(0))
self.index_matrix = _update_index_matrix(batch_size, self.index_matrix, self.linear).to(z_s.device)
# print("index matrix : ",self.index_matrix)
kernel_matrix = sum([kernel(features) for kernel in self.kernels]) # Add up the matrix of each kernel
# Add 2 / (n-1) to make up for the value on the diagonal
# to ensure loss is positive in the non-linear version
# print("kernel matrix : ",kernel_matrix)
l = (kernel_matrix * self.index_matrix).sum()
# print("l : ",l)
loss = (l + 2. / float(batch_size - 1))
return loss
def _update_index_matrix(batch_size: int, index_matrix: Optional[torch.Tensor] = None,
linear: Optional[bool] = True) -> torch.Tensor:
r"""
Update the `index_matrix` which convert `kernel_matrix` to loss.
If `index_matrix` is a tensor with shape (2 x batch_size, 2 x batch_size), then return `index_matrix`.
Else return a new tensor with shape (2 x batch_size, 2 x batch_size).
"""
if index_matrix is None or index_matrix.size(0) != batch_size * 2:
index_matrix = torch.zeros(2 * batch_size, 2 * batch_size)
if linear:
for i in range(batch_size):
s1, s2 = i, (i + 1) % batch_size
t1, t2 = s1 + batch_size, s2 + batch_size
index_matrix[s1, s2] = 1. / float(batch_size)
index_matrix[t1, t2] = 1. / float(batch_size)
index_matrix[s1, t2] = -1. / float(batch_size)
index_matrix[s2, t1] = -1. / float(batch_size)
else:
for i in range(batch_size):
for j in range(batch_size):
if i != j:
index_matrix[i][j] = 1. / float(batch_size * (batch_size - 1))
index_matrix[i + batch_size][j + batch_size] = 1. / float(batch_size * (batch_size - 1))
for i in range(batch_size):
for j in range(batch_size):
index_matrix[i][j + batch_size] = -1. / float(batch_size * batch_size)
index_matrix[i + batch_size][j] = -1. / float(batch_size * batch_size)
return index_matrix
@TRAINER_REGISTRY.register()
class MultiDatasetDan(TrainerMultiAdaptation):
"""
"""
def __init__(self, cfg,require_parameter=None):
super().__init__(cfg,require_parameter)
self.bce = nn.BCEWithLogitsLoss()
self.max_epoch = self.cfg.OPTIM.MAX_EPOCH
self.trade_off = cfg.LIGHTNING_MODEL.TRAINER.DAN.trade_off
print("trade off ratio : ", self.trade_off)
alpha = cfg.LIGHTNING_MODEL.TRAINER.DAN.GaussianKernel.alpha
sigma = cfg.LIGHTNING_MODEL.TRAINER.DAN.GaussianKernel.sigma
track_running_stats = cfg.LIGHTNING_MODEL.TRAINER.DAN.GaussianKernel.track_running_stats
linear = cfg.LIGHTNING_MODEL.TRAINER.DAN.linear
if len(sigma) == 0:
# sigma = None
# define loss function
print("alpha range : ", alpha)
self.mkmmd_loss = MultipleKernelMaximumMeanDiscrepancy(
kernels=[GaussianKernel(alpha=k, track_running_stats=track_running_stats) for k in alpha],
linear=linear
)
else:
print("sigma range : ", sigma)
self.mkmmd_loss = MultipleKernelMaximumMeanDiscrepancy(
kernels=[GaussianKernel(sigma=s, track_running_stats=track_running_stats) for s in sigma],
linear=linear
)
def build_model(self):
cfg = self.cfg
print("Params : ", cfg.LIGHTNING_MODEL.COMPONENTS.BACKBONE)
print('Building F')
print('Building CommonFeature')
backbone_info = cfg.LIGHTNING_MODEL.COMPONENTS.BACKBONE
FC_info = cfg.LIGHTNING_MODEL.COMPONENTS.LAST_FC
self.CommonFeature = SimpleNet(backbone_info, FC_info, 0, **cfg.LIGHTNING_MODEL.COMPONENTS.BACKBONE.PARAMS)
freeze_common_feature = cfg.LIGHTNING_MODEL.COMPONENTS.BACKBONE.FREEZE if cfg.LIGHTNING_MODEL.COMPONENTS.BACKBONE.FREEZE else False
if freeze_common_feature:
for parameter in self.CommonFeature.parameters():
parameter.requires_grad = False
print("freeze feature extractor : ",)
self.fdim = self.CommonFeature.fdim
print('Building Target Classifier')
self.TargetClassifier = self.create_classifier(self.fdim, self.num_classes, FC_info=FC_info)
print('Building SourceClassifiers')
print("source domains label size : ", self.source_domains_label_size)
source_classifier_list = []
for num_class in self.source_domains_label_size:
source_classifier = self.create_classifier(self.fdim, num_class, FC_info=FC_info)
source_classifier_list.append(source_classifier)
self.SourceClassifiers = nn.ModuleList(
source_classifier_list
)
def forward(self, input, return_feature=False):
f_target = self.CommonFeature(input)
logits_target = self.TargetClassifier(f_target)
probs = F.softmax(logits_target, dim=1)
if return_feature:
return probs, logits_target
return probs
def configure_optimizers(self):
params = list(self.CommonFeature.parameters()) + \
list(self.TargetClassifier.parameters()) + \
list(self.SourceClassifiers.parameters())
opt_cfg = self.cfg.OPTIM
opt = build_optimizer(params,opt_cfg)
scheduler = build_lr_scheduler(optimizer=opt,optim_cfg=opt_cfg)
optimizers = [opt]
lr_schedulers=[scheduler]
return optimizers, lr_schedulers
def share_step(self,batch,train_mode = True,weight=None):
input, label, domain = self.parse_target_batch(batch)
f_target = self.CommonFeature(input)
logits_target = self.TargetClassifier(f_target)
loss_target = self.loss_function(logits_target, label, train=train_mode,weight=weight)
return loss_target, logits_target,f_target, label
def parse_batch_train(self, batch):
target_batch = batch["target_loader"]
unlabel_batch = batch["unlabel_loader"]
list_source_batches = batch["source_loader"]
return target_batch,unlabel_batch,list_source_batches
def on_train_epoch_start(self) -> None:
if self.source_pretrain_epochs > self.current_epoch:
self.target_ratio = self.cfg.LIGHTNING_MODEL.TRAINER.EXTRA.PRETRAIN_TARGET_LOSS_RATIO
self.source_ratio = self.cfg.LIGHTNING_MODEL.TRAINER.EXTRA.PRETRAIN_SOURCE_LOSS_RATIO
else:
self.target_ratio = self.cfg.LIGHTNING_MODEL.TRAINER.EXTRA.TARGET_LOSS_RATIO
self.source_ratio = self.cfg.LIGHTNING_MODEL.TRAINER.EXTRA.SOURCE_LOSS_RATIO
def training_step(self, batch, batch_idx):
target_batch, unlabel_batch ,list_source_batches = self.parse_batch_train(batch)
list_input_u, list_label_u, domain_u = self.parse_source_batches(list_source_batches)
loss_source = 0
for u, y, d in zip(list_input_u, list_label_u, domain_u):
# print("check range for source data : {} - {}".format(u.max(),u.min()))
f = self.CommonFeature(u)
logits = self.SourceClassifiers[d](f)
domain_weight = self.source_domains_class_weight[d]
loss_source += self.loss_function(logits, y, train=True, weight=domain_weight)
loss_source /= len(domain_u)
loss_target, logit_target, f_target,label = self.share_step(target_batch, train_mode=True,
weight=self.class_weight)
y_pred = F.softmax(logit_target, dim=1)
y = label
acc = self.train_acc(y_pred, y)
total_loss = self.source_ratio*loss_source+self.target_ratio*loss_target
f_unlabel = self.CommonFeature(unlabel_batch)
transfer_loss = self.mkmmd_loss(f_target, f_unlabel)
total_loss = total_loss + self.trade_off * transfer_loss
self.log('Train_acc', acc, on_step=False, on_epoch=True, prog_bar=True, logger=True)
self.log('Train_loss', total_loss, on_step=False, on_epoch=True, prog_bar=True, logger=True)
self.log('Train_source_loss', loss_source, on_step=False, on_epoch=True, prog_bar=True, logger=True)
self.log('Train_target_loss', loss_target, on_step=False, on_epoch=True, prog_bar=True, logger=True)
self.log('transfer_loss', transfer_loss, on_step=False, on_epoch=True, prog_bar=True, logger=True)
return {'loss': total_loss}
def validation_step(self, batch, batch_idx, dataset_idx: Optional[int] = None):
loss, logit, _,y = self.share_step(batch,train_mode=False)
y_pred = F.softmax(logit, dim=1)
if dataset_idx == 0 :
acc = self.valid_acc(y_pred, y)
log = {
"val_loss": loss*self.non_save_ratio,
"val_acc": acc,
}
self.log_dict(log, on_step=False, on_epoch=True, prog_bar=True, logger=True,add_dataloader_idx=False)
else:
acc = self.test_acc(y_pred, y)
log = {
"test_loss": loss,
"test_acc": acc
}
self.log_dict(log, on_step=False, on_epoch=True, prog_bar=False, logger=True,add_dataloader_idx=False)
return {'loss': loss}
def test_step(self, batch, batch_idx, dataset_idx: Optional[int] = None):
loss, logit, _,y = self.share_step(batch,train_mode=False)
y_pred = F.softmax(logit,dim=1)
return {'loss': loss,'y_pred':y_pred,'y':y}
|
python
|
'''
A data model focused on material objects.
'''
import synapse.lib.module as s_module
class MatModule(s_module.CoreModule):
def getModelDefs(self):
modl = {
'types': (
('mat:item', ('guid', {}), {'doc': 'A GUID assigned to a material object.'}),
('mat:spec', ('guid', {}), {'doc': 'A GUID assigned to a material specification.'}),
('mat:specimage', ('comp', {'fields': (('spec', 'mat:spec'), ('file', 'file:bytes'))}), {}),
('mat:itemimage', ('comp', {'fields': (('item', 'mat:item'), ('file', 'file:bytes'))}), {}),
# TODO add base types for mass / volume
),
'forms': (
('mat:item', {}, (
('name', ('str', {'lower': True}), {'doc': 'The human readable name of the material item.'}),
('spec', ('mat:spec', {}), {
'doc': 'The mat:spec of which this item is an instance.',
}),
('place', ('geo:place', {}), {'doc': 'The most recent place the item is known to reside.'}),
('latlong', ('geo:latlong', {}), {'doc': 'The last known lat/long location of the node.'}),
('loc', ('loc', {}), {
'doc': 'The geo-political location string for the node.',
}),
# TODO add baseline things like dimensions / mass / etc?
)),
('mat:spec', {}, (
('name', ('str', {'lower': True}), {'doc': 'The human readable name of the material spec.'}),
)),
('mat:itemimage', {}, (
('item', ('mat:item', {}), {'doc': 'The item contained within the image file.'}),
('file', ('file:bytes', {}), {'doc': 'The file containing an image of the item.'}),
)),
('mat:specimage', {}, (
('spec', ('mat:spec', {}), {'doc': 'The spec contained within the image file.'}),
('file', ('file:bytes', {}), {'doc': 'The file containing an image of the spec.'}),
)),
),
}
name = 'mat'
return ((name, modl), )
|
python
|
from .test_utils import *
print('#############################################')
print('# TESTING OF MainDeviceVars MODEL FUNCTIONS #')
print('#############################################')
@tag('maindevicevars')
class MainDeviceVarsModelTests(TestCase):
def setUp(self):
from utils.BBDD import getRegistersDBInstance
self.DB=getRegistersDBInstance()
self.DB.dropTable(table='MainVariables')
self.signal_was_called = False
self.signaltimestamp=None
self.signalTag=None
self.signalValue=None
def handler(sender, **kwargs):
self.signal_was_called = True
self.signaltimestamp=kwargs['timestamp']
self.signalTag=kwargs['Tags'][0]
self.signalValue=kwargs['Values'][0]
self.handler=handler
pass
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
# INDIVIDUAL FUNCTIONS TESTING
def test_store2DB(self):
'''
storeDB: method provided to perform the foloowing steps:
- Validate the input data for the GPIO
- Saves the instance into the DB
- Introduces a first register into the registers DB with the current value reading it for Inputs, and forcing it in Outputs
'''
print('## TESTING THE OPERATION OF THE store2DB METHOD ##')
SignalVariableValueUpdated.connect(self.handler)
instance=MainDeviceVars(**MainDeviceVarDict)
now=timezone.now().replace(microsecond=0).replace(tzinfo=None)
instance.store2DB()
SignalVariableValueUpdated.disconnect(self.handler)
# checks values from the signal
self.assertAlmostEqual(self.signaltimestamp,timezone.now().replace(microsecond=0),delta=datetime.timedelta(seconds=1))# signal timestamp value is dated now
self.assertEqual(self.signalValue,MainDeviceVarDict['Value'])
self.assertEqual(self.signalTag,str(instance.pk))
# checks that store2DB creates the corresponding table in the registers DB and introduces a first record with the current value
self.assertEqual(instance.Value,MainDeviceVarDict['Value'])
self.assertTrue(self.DB.checkIfTableExist(table=instance.getRegistersDBTable()))
latest=instance.getLatestData(localized=False)
self.assertAlmostEqual(latest[instance.getRegistersDBTag()]['timestamp'],now,delta=datetime.timedelta(seconds=1))# latest value is dated now
self.assertEqual(latest[instance.getRegistersDBTag()]['value'],MainDeviceVarDict['Value'])# latest value is the same as in the dict
self.DB.dropTable(table=instance.getRegistersDBTable())
def test_updateValue(self):
'''
updateValue: method that handles the creation of registers DB rows. It has several alternative operational paths:
- The standard one is when the pre-initialized parameters are defaulted. In this situation, it checks if newValue is different from the previous one
and in case so, it introduces a row with the previous value, and a second one with the newValue. Both these rows are separated 1 second in the timestamps
to provide step-like plots.
- If a timestamp is provided, only one row is created with the passed timestamp if and only if newValue is different from the previous one.
- If force=True, it generates the row independently of the newValue.
Independently of the operational path followed, this method also sets up the value of the GPIO in case it is an output.
'''
print('## TESTING THE OPERATION OF THE updateValue METHOD ##')
instance=MainDeviceVars(**MainDeviceVarDict)
instance.save() # to avoid the creation of the DB tables and insertion of the first row that function store2DB does...
print(' -> Tested standard path')
now=timezone.now().replace(microsecond=0).replace(tzinfo=None)
SignalVariableValueUpdated.connect(self.handler)
instance.updateValue(newValue=22,timestamp=None,writeDB=True,force=False)
SignalVariableValueUpdated.disconnect(self.handler)
# checks values from the signal
self.assertAlmostEqual(self.signaltimestamp,timezone.now().replace(microsecond=0),delta=datetime.timedelta(seconds=1))# signal timestamp value is dated now
self.assertEqual(self.signalValue,instance.Value)
self.assertEqual(self.signalTag,str(instance.pk))
table=instance.getRegistersDBTable()
vars='"timestamp","'+instance.getRegistersDBTag()+'"'
sql='SELECT '+vars+' FROM "'+ table +'" ORDER BY timestamp DESC LIMIT 2'
rows=self.DB.executeTransaction(SQLstatement=sql)
self.assertEqual(rows[1][1],MainDeviceVarDict['Value'])# previous to latest value equals the previous Value
self.assertEqual(rows[0][1],22) # latest value equals the newValue
self.assertEqual(rows[0][0]-rows[1][0],datetime.timedelta(seconds=1))# checks that it inserts two rows with 1 second difference
self.assertAlmostEqual(rows[0][0],now,delta=datetime.timedelta(seconds=1))# checks that the latest value is dated now
print(' -> Tested update with timestamp')
now=timezone.now().replace(microsecond=0)+datetime.timedelta(seconds=10)
SignalVariableValueUpdated.connect(self.handler)
instance.updateValue(newValue=21,timestamp=now,writeDB=True,force=False)
SignalVariableValueUpdated.disconnect(self.handler)
# checks values from the signal
self.assertAlmostEqual(self.signaltimestamp,timezone.now()+datetime.timedelta(seconds=10),delta=datetime.timedelta(seconds=1))# signal timestamp value is dated now
self.assertEqual(self.signalValue,instance.Value)
self.assertEqual(self.signalTag,str(instance.pk))
latest=instance.getLatestData(localized=False)
self.assertEqual(latest[instance.getRegistersDBTag()]['timestamp'],now.replace(tzinfo=None))# latest value is dated now
self.assertEqual(latest[instance.getRegistersDBTag()]['value'],21)# latest value is dated now
self.DB.dropTable(table=instance.getRegistersDBTable())
def test_IntegrityError(self):
'''
This tests checks that in case of two semi-simultaneous MainVars queries to registers DB, no error occurs. In fact, the
DB driver handles it by updating the conflicting row.
'''
import time
print('## TESTING THE OPERATION OF THE registers DB Integrity Error handler METHOD ##')
instance=MainDeviceVars(**MainDeviceVarDict)
instance.store2DB()
newDict=editDict(keys=['Value','Label'], newValues=[15,'Test MainVar 2'], Dictionary=MainDeviceVarDict)
instance2=MainDeviceVars(**newDict)
time.sleep(1)
instance2.store2DB()
time.sleep(1)
now=timezone.now().replace(microsecond=0).replace(tzinfo=None)
newValue1=21
newValue2=16
instance.updateValue(newValue=newValue1,timestamp=now,writeDB=True,force=False)
instance2.updateValue(newValue=newValue2,timestamp=now,writeDB=True,force=False)
table=instance.getRegistersDBTable()
vars='"timestamp","'+instance.getRegistersDBTag()+'"'+ ',"'+instance2.getRegistersDBTag()+'"'
sql='SELECT '+vars+' FROM "'+ table +'" ORDER BY timestamp ASC'
rows=self.DB.executeTransaction(SQLstatement=sql)
# initialization
self.assertEqual(rows[0][1],MainDeviceVarDict['Value']) # initial value of instance
self.assertEqual(rows[0][2],None) # instance2 not yet created
self.assertEqual(rows[1][2],newDict['Value']) # initial value of instance2
# instances updateValue
self.assertEqual(rows[2][1],newValue1) # new value of instance
self.assertEqual(rows[2][2],newValue2) # initial value of instance2
# time span
for i in range(0,2):
self.assertEqual(rows[i+1][0]-rows[i][0],datetime.timedelta(seconds=1))# checks that it inserts two rows with 1 second difference
self.DB.dropTable(table=instance.getRegistersDBTable())
self.DB.dropTable(table=instance2.getRegistersDBTable())
def test_str(self):
print('## TESTING THE OPERATION OF THE str METHOD ##')
instance=MainDeviceVars(**MainDeviceVarDict)
instance.store2DB()
self.assertEqual(str(instance),instance.Label)
self.DB.dropTable(table=instance.getRegistersDBTable())
def test_getCharts(self):
'''
getCharts: method that retrieves the chart structured in a dictionary with the following keys:
- title : the table name
- cols : a list with the first element being a list of dictionaries describing data of each of the columns in the graph
. label : human readable label for the variable (a list of 8 elements in case of digital variables)
. name : the name of the variable
. type : the type (digital, analog, datetime) of the variable
. plottype : the type of plot desired for the variable
- rows : a list of the row values of the graph. Each row is a list with the first element being a unix timestamp and the following ones are the values of the variables.
- statistics: a dictionary with teh following keys:
. number : the number of the statistic indicators
. num_rows : the number of rows of the graph
. mean : a list with the mean values of each of the columns. A None value is introduced for digital variables
. max : a list with the max values of each of the columns.
. min : a list with the min values of each of the columns.
. on_time : a list with the amount of seconds being at value==1 of each of the columns (for digital variables only, None else)
. off_time : a list with the amount of seconds being at value==0 of each of the columns (for digital variables only, None else)
In case no values are in the DB in the time span required, it returns two rows with date dateIni and dateEnd respectively with the rows
being:
- the last values present in the DB if there are any.
- None in case no register can be found at all.
'''
print('## TESTING THE OPERATION OF THE getCharts METHOD ##')
import time
print(' -> Tested with valid records in the DB')
local_tz=get_localzone()
dateIni=(timezone.now()-datetime.timedelta(seconds=1)).replace(microsecond=0)
instance=MainDeviceVars(**MainDeviceVarDict)
instance.store2DB()
time.sleep(1)
newDict=editDict(keys=['Value','Label'], newValues=[15,'Test MainVar 2'], Dictionary=MainDeviceVarDict)
instance2=MainDeviceVars(**newDict)
instance2.store2DB()
time.sleep(1)
newValue1=21
newValue2=16
now=timezone.now()
instance.updateValue(newValue=newValue1,timestamp=now,writeDB=True,force=False)
instance2.updateValue(newValue=newValue2,timestamp=now,writeDB=True,force=False)
time.sleep(1)
instance2.updateValue(newValue=newValue2-1,timestamp=timezone.now(),writeDB=True,force=False)
dateEnd=(timezone.now()+datetime.timedelta(seconds=4)).replace(microsecond=0)
chart=MainDeviceVars.getCharts(fromDate=dateIni,toDate=dateEnd)
# missing values are filled with the previous or the next valid value
title=chart['title']
self.assertTrue('MainVariables' in title)
self.assertEqual(chart['cols'][0][0]['label'],'timestamp') # first column is timestamp
self.assertEqual(chart['cols'][0][1]['label'],MainDeviceVarDict['Label']) # second column is the first var
self.assertEqual(chart['cols'][0][2]['label'],newDict['Label']) # third column is the second var
self.assertEqual(len(chart['rows']),4) # there are 3 rows with data
self.assertEqual(chart['rows'][0][1],MainDeviceVarDict['Value'])
self.assertEqual(chart['rows'][0][2],newDict['Value']) # this value is filled in with previous or next valid value
self.assertEqual(chart['rows'][1][1],MainDeviceVarDict['Value'])
self.assertEqual(chart['rows'][1][2],newDict['Value'])
self.assertEqual(chart['rows'][2][1],newValue1)
self.assertEqual(chart['rows'][2][2],newValue2)
self.assertEqual(chart['rows'][3][1],newValue1)
self.assertEqual(chart['rows'][3][2],newValue2-1)
print(' -> Tested with no records in the solicited timespan but yes in the DB')
''' creates two registers dated in dateIni and dateEnd with the last value from the registers DB
'''
dateIni=(timezone.now()+datetime.timedelta(seconds=10)).replace(microsecond=0)
dateEnd=(dateIni+datetime.timedelta(seconds=10)).replace(microsecond=0)
chart=MainDeviceVars.getCharts(fromDate=dateIni,toDate=dateEnd)
title=chart['title']
self.assertEqual(len(chart['rows']),2) # there are 2 rows with data dated at dateIni and dateEnd resp.
self.assertEqual(chart['rows'][0][1], chart['rows'][1][1]) # checks both rows have the same value
self.assertAlmostEqual(datetime.datetime.fromtimestamp(chart['rows'][0][0]/1000,tz=local_tz),dateIni,delta=datetime.timedelta(seconds=1))# checks that the first row is dated as dateIni
self.assertAlmostEqual(datetime.datetime.fromtimestamp(chart['rows'][1][0]/1000,tz=local_tz),dateEnd,delta=datetime.timedelta(seconds=1))# checks that the second row is dated as dateEnd
self.DB.dropTable(table=instance.getRegistersDBTable())
self.DB.dropTable(table=instance2.getRegistersDBTable())
print(' -> Tested with no table in the DB')
instance.delete()
instance2.delete()
instance=MainDeviceVars(**MainDeviceVarDict)
instance.save()
instance2=MainDeviceVars(**newDict)
instance2.save()
chart=MainDeviceVars.getCharts(fromDate=dateIni,toDate=dateEnd)
title=chart['title']
self.assertAlmostEqual(datetime.datetime.fromtimestamp(chart['rows'][0][0]/1000,tz=local_tz),dateIni,delta=datetime.timedelta(seconds=1))# checks that the first row is dated as dateIni
self.assertAlmostEqual(datetime.datetime.fromtimestamp(chart['rows'][1][0]/1000,tz=local_tz),dateEnd,delta=datetime.timedelta(seconds=1))# checks that the second row is dated as dateEnd
for i,col in enumerate(chart['cols'][0]):
if col['type']==DTYPE_DIGITAL:
self.assertEqual(chart['rows'][0][i],[None,None,None,None,None,None,None,None]) # all None values
elif col['type']!='datetime':
self.assertEqual(chart['rows'][0][i],None) # all None values
print(' -> Tested with empty table in the DB')
instance.checkRegistersDB(Database=self.DB)
instance2.checkRegistersDB(Database=self.DB)
self.assertTrue(self.DB.checkIfTableExist(instance.getRegistersDBTable()))
self.assertTrue(self.DB.checkIfTableExist(instance2.getRegistersDBTable()))
charts=MasterGPIOs.getCharts(fromDate=dateIni,toDate=dateEnd)
for chart in charts:
title=chart['title']
self.assertTrue(len(chart['rows'])==2) # there are 2 rows with data dated at dateIni and dateEnd resp.
self.assertAlmostEqual(datetime.datetime.fromtimestamp(chart['rows'][0][0]/1000,tz=local_tz),dateIni,delta=datetime.timedelta(seconds=1))# checks that the first row is dated as dateIni
self.assertAlmostEqual(datetime.datetime.fromtimestamp(chart['rows'][1][0]/1000,tz=local_tz),dateEnd,delta=datetime.timedelta(seconds=1))# checks that the second row is dated as dateEnd
for i,col in enumerate(chart['cols'][0]):
if col['type']==DTYPE_DIGITAL:
self.assertEqual(chart['rows'][0][i],[None,None,None,None,None,None,None,None]) # all None values
elif col['type']!='datetime':
self.assertEqual(chart['rows'][0][i],None) # all None values
self.DB.dropTable(table=instance.getRegistersDBTable())
self.DB.dropTable(table=instance2.getRegistersDBTable())
def testAssignSubsystem(self):
print('## TESTING THE ASSIGNMENET OF A SUBSYSTEM ##')
instance=MainDeviceVars(**MainDeviceVarDict)
instance.store2DB()
SUBSYSTEMs=MainAPP.models.Subsystems.objects.filter(mainvars=instance)
self.assertQuerysetEqual(SUBSYSTEMs,[]) # no subsystem assigned
subsystem=MainAPP.models.Subsystems(Name=0,content_object=instance)
subsystem.save()
SUBSYSTEMs=MainAPP.models.Subsystems.objects.filter(mainvars=instance)
self.assertEqual(list(SUBSYSTEMs),[subsystem,]) # a subsystem returned
newLabel='New label for you'
instance.updateLabel(newLabel=newLabel)
AVAR=MainAPP.models.AutomationVariables.objects.get(Device='MainVars',Tag=instance.getRegistersDBTag())
self.assertEqual(AVAR.Label,newLabel) # an AVAR is now created
self.DB.dropTable(table=instance.getRegistersDBTable())
def testAutomationVarsManagement(self):
print('## TESTING THE MANAGEMENT OF THE AUTOMATION VARS ##')
instance=MainDeviceVars(**MainDeviceVarDict)
instance.store2DB() # this should create automation var
AVARs=MainAPP.models.AutomationVariables.objects.filter(Device='MainVars').filter(Tag=instance.getRegistersDBTag())
self.assertEqual(1,AVARs.count()) # one automationvar is returned
# one update is generated to check that no additional AVARs are created
now=timezone.now().replace(microsecond=0).replace(tzinfo=None)
newValue1=21
instance.updateLabel(newLabel='Test new label')
AVARs=MainAPP.models.AutomationVariables.objects.filter(Device='MainVars').filter(Tag=instance.getRegistersDBTag())
self.assertEqual(1,AVARs.count()) # only one automationvar is still returned
self.DB.dropTable(table=instance.getRegistersDBTable())
print('###########################################')
print('# TESTING OF MainDeviceVarsForm FUNCTIONS #')
print('###########################################')
@tag('maindevicevarsform')
class DevicesFormTests(TestCase):
remoteDVT=None
localDVT=None
memoryDVT=None
def setUp(self):
from utils.BBDD import getRegistersDBInstance
self.DB=getRegistersDBInstance()
self.DB.dropTable(table='MainVariables')
def test_valid_data(self):
'''
Checks that the form is valid with good data and when saved, creates the instance and its associated automationvar
'''
print('## TESTING THE CREATION OF INSTANCE THROUGH FORM ##')
form = MainDeviceVarsForm(MainDeviceVarDict, action='add')
self.assertTrue(form.is_valid())
instance = form.save()
print(' -> Checked the creation of registers tables')
table=instance.getRegistersDBTable()
exist=self.DB.checkIfTableExist(table=table)
self.assertEqual(exist,True)
print(' -> Checked the creation of automation var')
AVARs=MainAPP.models.AutomationVariables.objects.filter(Device='MainVars').filter(Tag=instance.getRegistersDBTag())
self.assertEqual(1,AVARs.count()) # one automationvar is returned
print(' -> Checked the not duplication of automation var')
instance = form.save()
AVARs=MainAPP.models.AutomationVariables.objects.filter(Device='MainVars').filter(Tag=instance.getRegistersDBTag())
self.assertEqual(1,AVARs.count()) # one automationvar is returned
|
python
|
from flask import request
from .argument import ListArgument
class QueryStringParser:
""" A class to parse the query string arguments"""
@staticmethod
def parse_args(qs_args_def):
""" Parse the query string """
qs_args_dict = QueryStringParser.args_def_to_args_dict(qs_args_def)
parsed_args = dict()
passed_args = request.values.to_dict(flat=False)
# Iterate over all passed args
for arg in passed_args:
# Unexpected arg
if arg not in qs_args_dict:
parsed_args[arg] = QueryStringParser.parse_list(passed_args[arg], int)
# Expected list and the passed is list
elif qs_args_dict[arg]['type'] is list:
parsed_args[arg] = QueryStringParser.parse_list(passed_args[arg], qs_args_dict[arg]['list_item_type'])
else:
# Expected nmmber or string but the passed is list
if len(passed_args[arg]) > 1:
parsed_args[arg] = passed_args[arg]
continue
# Expected nmmber (float or int) and the passed is nmmber
if qs_args_dict[arg]['type'] in [int, float]:
parsed_args[arg] = QueryStringParser.parse_number(passed_args[arg][0])
else:
# Expected string and the passed is string
parsed_args[arg] = passed_args[arg][0]
return parsed_args
@staticmethod
def args_def_to_args_dict(args_def):
""" Convert list of arguments to Flask request parser """
args_attr = dict()
for arg in args_def:
args_attr.update({arg.name: dict()})
if isinstance(arg, ListArgument):
args_attr[arg.name].update({
'type': list,
'list_item_type': arg.arg_obj.arg_type
})
else:
args_attr[arg.name].update({'type': arg.arg_type})
return args_attr
@staticmethod
def parse_list(arg_value, item_type):
parsed_arg = arg_value
if item_type in [int, float]:
for i in range(len(parsed_arg)):
parsed_arg[i] = QueryStringParser.parse_number(parsed_arg[i])
return parsed_arg
@staticmethod
def parse_number(arg_value):
try:
if '.' in arg_value:
return float(arg_value)
else:
return int(arg_value)
except:
return arg_value
|
python
|
from machine import Pin, Timer
import utime
SOUND_SPEED = 0.0343 # in second
CM_TO_INCH = 0.393701
CM_TO_FEET = 0.0328084
trigger = Pin(16, Pin.OUT)
echo = Pin(17, Pin.IN)
def get_distance(timer):
trigger.high()
utime.sleep(0.0001)
trigger.low()
start = 0
stop = 0
while echo.value() == 0:
start = utime.ticks_us()
while echo.value() == 1:
stop = utime.ticks_us()
duration = stop - start
distance = (duration * SOUND_SPEED) / 2 # Make the round trip to one way trip
distance_in_inch = round(distance * CM_TO_INCH, 1)
print(distance_in_inch, 'inch, ', round(distance, 1), 'cm')
return distance
# Trying in a different way with Timer instead of "while" loop.
timer = Timer()
timer.init(freq=1, mode=Timer.PERIODIC, callback=get_distance)
|
python
|
# Copyright (c) 2019 Microsoft Corporation
# Distributed under the MIT software license
from ..postprocessing import multiclass_postprocess
import numpy as np
def test_multiclass_postprocess_smoke():
n = 1000
d = 2
k = 3
b = 10
X_binned = np.random.randint(b, size=(d, n))
feature_graphs = []
for _ in range(d):
feature_graphs.append(np.random.rand(b, k))
def binned_predict_proba(X_binned, k=3):
n = X_binned.shape[1]
return 1 / k * np.ones((n, k))
feature_types = ["numeric"] * d
results = multiclass_postprocess(
X_binned, feature_graphs, binned_predict_proba, feature_types
)
assert "intercepts" in results
assert "feature_graphs" in results
|
python
|
import requests
import base64
from datetime import datetime
from datetime import timedelta
from collections import UserString
class RefreshingToken(UserString):
def __init__(self, token_url, client_id, client_secret, initial_access_token, initial_token_expiry, refresh_token,
expiry_offset=60, proxies=None, certificate_filename=None):
"""
Implementation of UserString that will automatically refresh the token value upon expiry
:param str token_url: token refresh url
:param str client_id: OpenID Connect Client ID
:param str client_secret: OpenID Connect Client Secret
:param str initial_access_token: initial access token
:param int initial_token_expiry: number of seconds the initial token is valid for before expiring
:param str refresh_token: initial refresh token
:param int expiry_offset: number of seconds before token expiry to refresh the token
:param dict proxies: dictionary containing proxy schemas
:param str certifiate_filename: The path to the client side certificate to use
"""
token_data = {
"expires": datetime.utcnow() + timedelta(seconds=initial_token_expiry),
"access_token": initial_access_token
}
def get_refresh_token():
# check if the token has expired and refresh if needed
if token_data["expires"] <= datetime.utcnow():
encoded_client = base64.b64encode(bytes(f"{client_id}:{client_secret}", 'utf-8'))
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Authorization": f"Basic {encoded_client.decode('utf-8')}"
}
request_body = f"grant_type=refresh_token&scope=openid client groups offline_access&refresh_token={refresh_token}"
# request parameters
kwargs = {"headers": headers}
kwargs["proxies"] = proxies
kwargs["verify"] = certificate_filename
okta_response = requests.post(token_url, data=request_body, **kwargs)
if okta_response.status_code != 200:
raise Exception(okta_response.json())
okta_json = okta_response.json()
# set the expiry just before the actual expiry to be able to refresh in time
delta = timedelta(seconds=okta_json.get("expires_in", 3600) - expiry_offset)
token_data["expires"] = datetime.utcnow() + delta
token_data["access_token"] = okta_json["access_token"]
return token_data["access_token"]
self.refresh_func = get_refresh_token
def __getattribute__(self, item):
token = object.__getattribute__(self, "refresh_func")()
# return the value of the string
if item == "data":
return token
return token.__getattribute__(item)
|
python
|
#FLM: AT Font Info: Andres Torresi
#configurar
nombreFamilia='Tagoni'
nombreDisenador='Andres Torresi'
emailDisenador='[email protected]'
urlDisenador='http://www.andrestorresi.com.ar'
urlDistribuidor='http://www.huertatipografica.com.ar'
year='2012'
##
from robofab.world import CurrentFont
# all the foundry settings tools live here:
import time
# You will need a font open in fontlab for this demo
font = CurrentFont()
# Let's get the current year so that the year string is always up to date
font.info.year = time.gmtime(time.time())[0]
# Apply those settings that we just loaded
font.info.copyright = 'Copyright (c) '+year+' '+nombreDisenador+' ('+emailDisenador+'), with Reserved Font Name "'+nombreFamilia+'"'
font.info.trademark = nombreFamilia+' is a trademark of '+nombreDisenador+''
font.info.openTypeNameLicense = 'This Font Software is licensed under the SIL Open Font License, Version 1.1. This license is available with a FAQ at: http://scripts.sil.org/OFL'
font.info.openTypeNameLicenseURL = 'http://scripts.sil.org/OFL'
font.info.openTypeNameDescription = ''
font.info.openTypeOS2VendorID = ''
font.info.openTypeNameManufacturerURL = urlDistribuidor
font.info.openTypeNameDesigner = nombreDisenador
font.info.openTypeNameDesignerURL = urlDisenador
# and call the update method
#print "Done"
font.update()
#fl part
#f = fl.font
#print f.copyright
#print f.year
#print f.customdata
#fl.UpdateFont(-1)
|
python
|
#import sys
import select, queue
from .pool import Pool
from .io import open_listenfd, sys
def main(*args, **kwargs):
"""
@params: init project
"""
if len(sys.argv) != 2:
print("Usage: %s ports", sys.argv[0])
sys.exit(1)
assert len(sys.argv) != 2
port = sys.argv[1] #type: int
print("[Main] ----- Liso Echo Server -----\n")
server = open_listenfd(port)
if server < 0:
print("open_listen error")
exit()
assert server < 0
print("[Main] Create listenfd sucessfully")
inputs, outputs = [server], []
pool = Pool()
while True:
print(sys.stderr, 'waiting for the next event')
readable, writable, exceptional = select.select(inputs, outputs, inputs)
# Handle inputs
for s in readable:
if s is server:
# A "readable" socket is ready to accept a connection
connection, client_address = s.accept()
print(sys.stderr, ' connection from', client_address)
connection.setblocking(0)
inputs.append(connection)
pool.add_client_pool(connection)
else: pool.handle_client(s, writable, exceptional, outputs, inputs)
if __name__ == "__main__":
pass
|
python
|
# -*- coding:utf-8 -*-
# coding=<utf8>
from django.db import models
# Модели для логирования действий пользователей с активами
class Logging(models.Model):
user = models.CharField(max_length=140)
request = models.TextField(blank = True, null = True)
goal = models.TextField(blank = True, null = True)
done = models.BooleanField(default=False)
datetime = models.DateTimeField()
def __unicode__(self):
return str(self.id)+';'.join((str(self.datetime),self.user,self.goal,str(self.done)))
|
python
|
#%% [markdown]
# # Basic of Beamforming and Source Localization with Steered response Power
# ## Motivation
# Beamforming is a technique to spatially filter out desired signal and surpress noise. This is applied in many different domains, like for example radar, mobile radio, hearing aids, speech enabled IoT devices.
#
# ## Signal Model
# 
# Model Description:
# $$\underline{X}(\Omega) = \underline{A}^{\text{ff}}(\Omega) \cdot S(\Omega)$$
# ## Beamforming
# Beamforming or spatial filtering is an array processing technique used to improve the quality of the desired signal in the presence of noise. This filtering is accomplished by a linear combination of the recorded signals $X_m(\Omega)$ and the beamformer weights $W_m(\Omega)$. In other words, the filtered microphone signals are summed together (compare with figure below). When the filter weights are configured correctly, the desired signal is superimposed constructively.
# 
# Image shows a filter and sum beamformer. Microphone signals $\underline{X}(\Omega)$ are multiplied with the beamformer weights $\underline{W}(\Omega)$ and then accumulated to the beamformer output signal $Y(\Omega)$.
# $$Y(\Omega) = \underline{W}^\text{H}(\Omega) \cdot \underline{X}(\Omega)$$
#%%
# Imports
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
np.set_printoptions(precision=3)
#%%[markdwon]
# ## Parameter
#%%
varphi = 45 / 180 * np.pi # Angle of attack of the Source S(\Omega) in relation to the mic array
c = 343000 # Velocity of sound in mm/s
mic = 6 # count of mics
d = 20 # distance in mm
fs = 16000 # Sample rate
n_fft = 512 # Fourier Transform length
n_spec = 257 # Number of frequency bins
n_dir = 180 # Number of directions which the steering vector is steered to
#%%[markdown]
# ##Microphone Positions
# `pos_y` and `pos_x` are the microphone positions. It is a Uniform Linear Array (ULA) type (like seen in the Figure below)
#%%
pos_y = np.zeros((1,mic))
pos_x = np.r_[0.:mic]*d
fig, ax = plt.subplots()
ax.scatter(pos_x, pos_y, c='tab:red', alpha=1, edgecolors='white')
plt.ylabel('Y Position [mm]')
plt.xlabel('X Position [mm]')
plt.ylim((-50, 50))
#%%[markdown]
# ## Free Field model and delay vectors
# ...
#$$\underline A_q^{\text{ff}}(\Omega) = \exp\big(-j\Omega f_s \Delta\underline \tau(\varphi_q)\big),$$
# Calculate the delay vectors to each microphone to the source $q$ in the frequency domain:
#%%
tau = (pos_x*np.cos(varphi)+pos_y*np.sin(varphi))/c #calculating delay vector tau (in the time domain) depending on the array geometry.
tau = tau.reshape([mic,1,1])
Omega_array = np.r_[0.:n_spec].T*np.pi/n_fft*2
Omega_array = Omega_array.reshape([1,1,n_spec])
A_ff = np.exp(-1j*Omega_array*fs*tau)
#%%
tmp = np.squeeze(np.round(np.angle(A_ff[:,:,:])/np.pi*180))
plt.plot(tmp.T)
plt.ylabel("Angle [Deg]")
plt.xlabel("Frequency [Bin]")
#%%[markdown]
# The plot shows the angle of the complex spectral time delays from the desired signal between reference microphone 1 and the others. for higher frequencys you see that the angle is growing due to the faster swinging of the signal. This means for the same time delay different frequencys have different phase differences between two microphones.
# ## Delay and Sum Beamformer
# ...
# ## Calculate the steering vectors W_H for the localization:
#%%
angle_array = np.c_[0:360:360/n_dir]/180*np.pi
tau_steering = (pos_x*np.cos(angle_array)+pos_y*np.sin(angle_array))/c
tau_steering = tau_steering.T.copy()
tau_steering = tau_steering.reshape([mic,1,1,n_dir])
W = np.exp(-1j*Omega_array.reshape([1,1,n_spec,1])*fs*tau_steering)
W.shape
W_H = W.reshape([1,mic,n_spec,n_dir]).conj()
W_H.shape
#%%[markdown]
# ## Spatial Convariance
# Another important signal property is the covariance that describes the interdependencies between the microphone signals $\underline X(\Omega)$. To obtain this covariance, it is presumed that the signals are stochastic. When only considering one source ($Q=1$),
# the spatial covariance matrix can be denoted as
# $$\mathbf \Phi_{xx}(\Omega) = \text{E}\{\underline X(\Omega)\underline X^H(\Omega)\}$$
# $$ = \underline A(\Omega) \text{E} \{ S'(\Omega) S'^*(\Omega)\}\underline A^H(\Omega) + \text{E}\{\underline V(\Omega)\underline V^H(\Omega)\}$$
# $$ = \mathbf \Phi_{ss}(\Omega) + \mathbf \Phi_{vv}(\Omega),$$
# where $E\{\cdot\}$ represents the expectation value operator, $^*$ denotes the complex conjugate operator, $\mathbf \Phi_{ss}(\Omega)$ represents the source correlation matrix, $\mathbf \Phi_{vv}(\Omega)$ the noise correlation matrix and $(\cdot)^H$ the Hermitean operator.
# If we consider noise not present $V=0$ and the expectation value of the signal $\text{E}{S(\Omega)}=1$ then the formular for the spatial covariance matrix $\mathbf \Phi_{xx}(\Omega)$ reduces to
# $$\mathbf \Phi_{xx}(\Omega) = \underline A(\Omega) \underline A^H(\Omega) $$
#%%
A_ff_H = A_ff.reshape([1,mic,n_spec]).copy()
A_ff_H = A_ff_H.conj()
phi_xx = A_ff_H * A_ff
#%%
df = pd.DataFrame(phi_xx[:,:,50])
df.style.format('{:,.2f}'.format)
# ## Acoustic Sound Localization
# Acoustic sound localization is the task of locating a sound source given measurements of the sound field. ...
#%%
power_steered = np.zeros((n_spec,n_dir))
for iDir in range(n_dir):
for iF in range(n_spec):
tmp = np.dot(W_H[:,:,iF,iDir], phi_xx[:,:,iF])
power_steered[iF,iDir] = np.abs(np.dot(tmp, W[:,:,iF,iDir]))
#%%
plt.figure(1)
plt.imshow(power_steered, aspect='auto', origin='lower',extent=[0,360,0,8])
plt.xticks(np.r_[0.:361:45])
plt.ylabel('Frequency [kHz]')
plt.xlabel('Steering Direction [Deg]')
plt.show()
# with pd.option_context('display.precision', 3):
# pd.set_option('precision', 0)
# pd.set_option('display.float_format', lambda x: '%.0f' % x)
# df = pd.DataFrame(power_steered)
# df.style.format('{:,.2f}'.format)
# print(df)
#%%
|
python
|
import os
import shutil
import click
from datetime import datetime, timedelta
from flask import current_app as app
from sqlalchemy.sql.expression import false
from alexandria.settings.extensions import db
__author__ = 'oclay'
@click.command()
@click.option('--username', prompt=True, help='The username for the admin')
@click.option('--password', help='The password for the admin', prompt=True, hide_input=True,
confirmation_prompt=True)
def create_admin(username, password):
"""Create an admin user by default"""
from alexandria.modules.security.models import User
user = User.query.filter_by(username=username).first()
if user is None:
user = User(username=username, password=password, role=u'admin', active=True)
db.session.add(user)
db.session.commit()
click.echo('Success create user')
else:
click.echo('User with username %s already exist' % username)
def __remove_files():
shutil.rmtree(app.config['DOCUMENTS_ROOT'])
os.mkdir(app.config['DOCUMENTS_ROOT'])
shutil.rmtree(app.config['THUMBNAILS_ROOT'])
os.mkdir(app.config['THUMBNAILS_ROOT'])
def __delete_users_and_documents():
from alexandria.modules.core.models import Document
from alexandria.modules.security.models import User
Document.query.delete()
User.query.delete()
db.session.commit()
@click.command()
def clean():
"""Remove all docs,indexes ,and files"""
app.engine.rebuild_index()
__remove_files()
__delete_users_and_documents()
click.echo('Zero km, done!')
@click.command()
def remove_files():
"""Delete all documents and thumbnails files"""
__remove_files()
click.echo('Success delete files')
@click.command()
def rebuild_index():
"""Clear the index """
app.engine.rebuild_index()
click.echo('Success clearing index')
def remove_activation():
from alexandria.modules.security.models import AccountRegistration, User
expiration_date = timedelta(days=app.config['ACCOUNT_ACTIVATION_DAYS'])
sq = AccountRegistration.query.with_entities(AccountRegistration.user_id.label('id')).join(User).filter(
User.joined_date < datetime.now() - expiration_date).filter(User.active == False).subquery()
User.query.filter(User.id.in_(sq)).delete(synchronize_session=False)
db.session.commit()
@click.command()
def remove_activation_expired():
"""Remove account activation"""
remove_activation()
click.echo("All expired registrations were deleted")
|
python
|
#!/usr/bin/python
from singularity.package import calculate_similarity
from singularity.utils import check_install
import pickle
import sys
import os
pkg1 = sys.argv[1]
pkg2 = sys.argv[2]
output_file = sys.argv[3]
# Check for Singularity installation
if check_install() != True:
print("You must have Singularity installed to use this script!")
sys.exit(32)
print("Calculating similarity for %s vs %s..." %(pkg1,pkg2))
sims = dict()
# Calculate similarities
sims["folder"] = calculate_similarity(pkg1,pkg2) # default uses just folders
sims["files"] = calculate_similarity(pkg1,pkg2,include_folders=False,include_files=True)
sims["both"] = calculate_similarity(pkg1,pkg2,include_files=True)
# Save to output file
pickle.dump(sims,open(output_file,"wb"))
|
python
|
#!/usr/bin/env python
"""odeint.py: Demonstrate solving an ordinary differential equation by using
odeint.
References:
* Solving Ordinary Differential Equations (ODEs) using Python
"""
from scipy.integrate import odeint
import numpy as np
import matplotlib.pyplot as plt
# pylint: disable=invalid-name
# Solve y''(t) + a y'(t) + b y(t) == 0.
# pylint: disable=unused-argument
def deriv(y, t):
"""Return derivatives of the array y."""
a = 3.0
b = 2.0
# y[0] : y'
# y[1] : y''
return np.array([
y[1], # (y[0])'
-(a * y[0] + b * y[1]) # (y[1])'
])
time = np.linspace(0.0, 10.0, 1000)
yinit = np.array([0.0005, 0.2]) # initial values
y = odeint(deriv, yinit, time)
plt.figure()
# y[:,0] is the first column of y
plt.plot(time, y[:, 0], color='deeppink')
plt.xlabel("t")
plt.ylabel("y")
plt.show()
|
python
|
print('-='*20)
print('Analisador de Triângulos')
print('-='*20)
r1 = float(input('Primeiro Segmento: '))
r2 = float(input('Segundo Segmento: '))
r3 = float(input('Terceiro Segmento: '))
if r1 < r2 + r3 and r2 < r1 + r3 and r3 < r1 + r2:
print('Os segmentos acima PODEM formar um triângulo.')
else:
print('Os segmentos acima NÃO PODEM formar um triângulo.')
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This command generates cs_pb2.py and cs_pb2_grpc.py files from cs.proto. These files are necessary
for the execution of gRPC client and gRPC control server.
"""
from subprocess import call
call("python3 -m grpc_tools.protoc -I. --python_out=. --grpc_python_out=. cloud_controller/middleware/middleware.proto",
shell=True)
|
python
|
from .account import GroupSerializer, UserSerializer
from .resource import ResourceSerializer
__all__ = ["UserSerializer", "GroupSerializer", "ResourceSerializer"]
|
python
|
#!/usr/bin/python3.7
# Copyright 2020 Aragubas
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import pygame, math
from random import randint
from ENGINE import UTILS as utils
GraphPoints = list()
def Initialize(DISPLAY):
global GraphPoints
for _ in range(20):
GraphPoints.append(randint(0, 50))
# -- Resolution 200, 300
ScrollX = 0
GraphPointSpace = 64
HightestPoint = 0
MouseX, MouseY = (0, 0)
def GameDraw(DISPLAY):
global GraphPoints
global GraphHeight
global ScrollX
global GraphPointSpace
global HightestPoint
DISPLAY.fill((100, 100, 100))
CurrentSelectedIndex = None
GraphSurface = pygame.Surface((DISPLAY.get_width(), DISPLAY.get_height()))
for i, point in enumerate(GraphPoints):
X = i * GraphPointSpace
Y = point / GraphSurface.get_height()
Y = (GraphSurface.get_height() - 5) - Y
if HightestPoint < point:
HightestPoint = point
debug.Set_Parameter("Hight Point was Set to:", point)
if X + ScrollX > -GraphPointSpace and not X + ScrollX > GraphSurface.get_width() + GraphPointSpace:
# -- Render the Line -- #
NextIndex = (i + 1)
try:
NextX = ScrollX + NextIndex * GraphPointSpace
NextY = GraphPoints[NextIndex] / GraphSurface.get_height()
CONTENT_MANAGER.Shape_Line(GraphSurface, (255, 0, 50), ScrollX + X, Y, NextX, NextY, 2)
except IndexError:
pass
# -- Render Square -- #
if HightestPoint == point:
pygame.draw.circle(GraphSurface, (255, 0, 255), (ScrollX + X, Y), 5)
else:
CONTENT_MANAGER.Shape_Rectangle(GraphSurface, (255, 255, 255), (ScrollX + X, Y, 5, 5), BorderRadius=5)
MouseRect = pygame.Rect(MouseX, MouseY, 12, 12)
PointRect = (ScrollX + X, Y, 5, 5)
if MouseRect.colliderect(PointRect):
CurrentSelectedIndex = i
CONTENT_MANAGER.Shape_Rectangle(GraphSurface, (150, 150, 150), (ScrollX + X, 0, 2, GraphSurface.get_height() - 5))
# -- Draw the Graph Peak -- #
peakY = HightestPoint / GraphSurface.get_height()
CONTENT_MANAGER.Shape_Rectangle(GraphSurface, (100, 100, 100), (0, peakY, GraphSurface.get_width(), 2))
CONTENT_MANAGER.FontRender(GraphSurface, "/PressStart2P.ttf", 10, "Peak: {0}".format(str(HightestPoint)), (220, 220, 220), 5, peakY - 10, backgroundColor=(0, 0, 0))
debug.Set_Parameter("HightestPoint", HightestPoint)
DISPLAY.blit(GraphSurface, (0, 0))
if not CurrentSelectedIndex == None:
point = GraphPoints[CurrentSelectedIndex]
CONTENT_MANAGER.FontRender(DISPLAY, "/PressStart2P.ttf", 8, "Data: " + str(point), (255, 255, 255), MouseX + 15, MouseY, backgroundColor=(0, 0, 0))
debug.Set_Parameter("point", point)
debug.Set_Parameter("MouseX", MouseX)
debug.Set_Parameter("MouseY", MouseY)
def Update():
global ScrollX
global GraphPoints
global MouseX
global MouseY
if pygame.key.get_pressed()[pygame.K_q]:
ScrollX -= 5
if pygame.key.get_pressed()[pygame.K_e]:
ScrollX += 5
if pygame.key.get_pressed()[pygame.K_h]:
Randomfy()
# -- Set Mouse Position -- #
MouseX, MouseY = pygame.mouse.get_pos()
def Randomfy():
global GraphPoints
global ScrollX
global GraphPointSpace
global HightestPoint
HightestPoint = 0
GraphPoints.clear()
for _ in range(12):
GraphPoints.append(randint(randint(0, 1000), randint(1000, 2000)))
for i in range(100):
GraphPoints.append(i)
def EventUpdate(event):
global GraphPoints
global ScrollX
global GraphPointSpace
global HightestPoint
if event.type == pygame.KEYUP and event.key == pygame.K_g:
Randomfy()
if event.type == pygame.KEYUP and event.key == pygame.K_r:
ScrollX = 0
if event.type == pygame.KEYUP and event.key == pygame.K_b:
GraphPointSpace = GraphPointSpace * 2
if event.type == pygame.KEYUP and event.key == pygame.K_n:
GraphPointSpace = GraphPointSpace / 2
|
python
|
import sys
sentence = ''
with open(sys.argv[1],'r') as file:
for i in file:
data = i.split()
if (len(data) != 0):
if(data[-1] =='E'):
sentence += data[0]+" "
else:
sentence += data[0]
else:
print(sentence)
sentence = ''
|
python
|
from functools import reduce
num = '7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450'
digits = 13
max_prod = 0
for i in range(0, len(num)+1-digits):
#print(num[i:i+4])
prod = reduce((lambda x, y: int(x) * int(y)), num[i:i+digits])
if prod > max_prod:
max_prod = prod
print(max_prod)
|
python
|
import numpy as np
import matplotlib.pyplot as plt
x = np.array([1., 2., 3., 4., 5.])
y = np.array([1., 3., 2., 3., 5.])
plt.scatter(x, y)
plt.axis([0, 6, 0, 6])
plt.show()
x_mean = np.mean(x)
y_mean = np.mean(y)
num = 0.0
d = 0.0
for x_i, y_i in zip(x, y):
num += (x_i - x_mean) * (y_i - y_mean)
d += (x_i - x_mean) ** 2
a = num/d
b = y_mean - a * x_mean
y_hat = a * x + b
plt.scatter(x, y)
plt.plot(x, y_hat, color='r')
plt.axis([0, 6, 0, 6])
plt.show()
x_predict = 6
y_predict = a * x_predict + b
print(y_predict)
|
python
|
import pika
from collections import deque
class Messaging():
def __init__(self, identity):
self._connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
self._channel = self._connection.channel()
self._queue = None
self._identity = identity
self._create_exchanges()
self._create_queue(identity)
self._setup_consumers()
self._messages = dict()
def _create_exchanges(self):
self._channel.exchange_declare(exchange='broadcast', type='fanout')
self._channel.exchange_declare(exchange='direct_message', type='topic')
def _create_queue(self, identity):
self._queue = self._channel.queue_declare(exclusive=True)
self._channel.queue_bind(exchange='broadcast', queue=self._queue.method.queue)
self._channel.queue_bind(exchange='direct_message',
queue=self._queue.method.queue,
routing_key=identity + '.*')
def _callback(self, ch, method, properties, body):
# Ignore broadcast sent by this node
if method.exchange == 'broadcast' and method.routing_key == self._identity:
ch.basic_ack(delivery_tag=method.delivery_tag)
return True
# Ignore broadcast for now
if method.exchange == 'broadcast':
ch.basic_ack(delivery_tag=method.delivery_tag)
return True
self.send(method.routing_key, body)
ch.basic_ack(delivery_tag=method.delivery_tag)
def _setup_consumers(self):
self._channel.basic_consume(self._callback,
queue=self._queue.method.queue)
def run(self):
self._channel.start_consuming()
def stop(self):
print('Stop messaging process')
self._channel.stop_consuming()
self._connection.close()
def send(self, to, message):
if to.find(self._identity) == 0:
if to not in self._messages:
self._messages[to] = deque()
self._messages[to].append(message)
else:
self._channel.basic_publish(exchange='direct_message',
routing_key=to,
body=message)
def broadcast(self, message):
self._channel.basic_publish(exchange='broadcast',
routing_key=self._identity,
body=message)
def has_message_for(self, to):
if to in self._messages:
return True
return False
def messages_for(self, client_id):
to = '%s.%s' % (self._identity , client_id)
if not self.has_message_for(to):
return None
return self._messages[to]
|
python
|
#Desenvolva um gerador de tabuada
n = int(input("Tabuada de que número? "))
i = 0
while i < 10:
print("{} X {} = {}".format(n, i + 1, (n*(i + 1))))
i = i + 1
|
python
|
# Register your models here.
from django.contrib import admin
from .models import Photo, Metadata, Album
admin.site.register(Photo)
admin.site.register(Metadata)
admin.site.register(Album)
|
python
|
t = 5
sentences = []
while t:
sentences.append(input("Podaj zdanie")+"\n")
t -= 1
f = open("sentence.txt", "w")
for i in sentences:
f.write(i)
f.close()
f = open("sentence.txt", "a")
f.writelines(sentences)
f.close()
f = open("sentence.txt", "r")
for line in f:
print(line, end="")
f.close()
f = open("sentence.txt", "r")
x = f.readline()
print(x)
f.close()
with open("sentence.txt", "r") as f:
f.write("Hello from context")
|
python
|
import toml
output_file = ".streamlit/secrets.toml"
with open("project-327006-2314b3476b3a.json") as json_file:
json_text = json_file.read()
config = {"textkey": json_text}
toml_config = toml.dumps(config)
with open(output_file, "w") as target:
target.write(toml_config)
|
python
|
from setuptools import find_packages, setup
setup(
name='robotframework-historic',
version="0.2.9",
description='Custom report to display robotframework historical execution records',
long_description='Robotframework Historic is custom report to display historical execution records using MySQL + Flask',
classifiers=[
'Framework :: Robot Framework',
'Programming Language :: Python',
'Topic :: Software Development :: Testing',
],
keywords='robotframework historical execution report',
author='Shiva Prasad Adirala',
author_email='[email protected]',
url='https://github.com/adiralashiva8/robotframework-historic',
license='MIT',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'robotframework',
'config',
'flask',
'flask-mysqldb'
],
entry_points={
'console_scripts': [
'rfhistoric=robotframework_historic.app:main',
'rfhistoricparser=robotframework_historic.parserargs:main',
'rfhistoricreparser=robotframework_historic.reparserargs:main',
'rfhistoricsetup=robotframework_historic.setupargs:main',
'rfhistoricupdate=robotframework_historic.updateargs:main',
]
},
)
|
python
|
import numpy as np
import random
import math
import cmath
import itertools
from tqdm import tqdm
from PIL import Image
from matplotlib import cm
def log_density_map(val, max_count):
brightness = math.log(val) / math.log(max_count)
gamma = 3.2 #7.2
brightness = math.pow(brightness, 1/gamma)
return brightness
def flip(t):
return t.real*1j + t.imag
def g1(t):
return cmath.sin(t)*1j + cmath.cos(t)
def g2(t):
return cmath.tan(t)
def popcorn(h):
sc = 2
iterationsN = 21
a = 3
mn = 0.05
counts = np.zeros((h, h))
for i in tqdm(range(h)):
for j in range(h):
col = 0
z = complex(sc/2 - sc*i/h, sc/2 - sc*j/h)
for k in range(iterationsN):
x = z.real
y = z.imag*1j
px = x
x = x - mn*( g1(y + g2(a*y) )).real - mn*(g1(x + g2(a*x))).imag*1j
y = y - mn*(g1(px + g2(a*px))).real - mn*(g1(y + g2(a*y))).imag*1j
z = x + flip(y)
try:
x = x / abs(z)
y = y / abs(z)
except Exception:
# x = 1000
# y = 1000
pass
z = x + flip(y)
#print(z)
if abs(z) > 2:
print('!!!')
break
float angle = 45.*3.14/180.;
x0=(real(z)*cos(angle)-imag(z)*sin(angle)-xmin)/deltap
y0=(ymax-real(z)*sin(angle)-imag(z)*cos(angle))/deltaq;
counts[x0, y0] += 1
return counts
def colorize(counts, h):
cmap = cm.get_cmap("copper")
im_arr = np.zeros((h, h, 3), dtype=np.uint8)
max_count = np.max(counts)
for y in tqdm(range(h)):
for x in range(h):
if counts[y, x] > 0:
rgba = cmap( log_density_map(counts[y, x], max_count) )
#rgba = [counts[y, x]/max_count for i in range(3)]
im_arr[y, x, 0] = int(255 * rgba[0])
im_arr[y, x, 1] = int(255 * rgba[1])
im_arr[y, x, 2] = int(255 * rgba[2])
return im_arr
def run():
h = 400
counts = popcorn(h)
im_arr = colorize(counts, h)
print(f"Saving image...{h}")
name = f"img_{h}_{random.random()}.png"
if h >= 3000:
name = 'morethan4000/' + name
else:
name = 'examples2/' + name
im = Image.fromarray(im_arr)
im.save(name)
if __name__ == "__main__":
run()
|
python
|
from typing import *
# extmod/modtrezorcrypto/modtrezorcrypto-bip32.h
class HDNode:
'''
BIP0032 HD node structure.
'''
def __init__(self,
depth: int,
fingerprint: int,
child_num: int,
chain_code: bytes,
private_key: bytes = None,
public_key: bytes = None,
curve_name: str = None) -> None:
'''
'''
def derive(self, index: int, public: bool=False) -> None:
'''
Derive a BIP0032 child node in place.
'''
def derive_cardano(self, index: int) -> None:
'''
Derive a BIP0032 child node in place using Cardano algorithm.
'''
def derive_path(self, path: List[int]) -> None:
'''
Go through a list of indexes and iteratively derive a child node in place.
'''
def serialize_public(self, version: int) -> str:
'''
Serialize the public info from HD node to base58 string.
'''
def serialize_private(self, version: int) -> str:
'''
Serialize the private info HD node to base58 string.
'''
def clone(self) -> HDNode:
'''
Returns a copy of the HD node.
'''
def depth(self) -> int:
'''
Returns a depth of the HD node.
'''
def fingerprint(self) -> int:
'''
Returns a fingerprint of the HD node (hash of the parent public key).
'''
def child_num(self) -> int:
'''
Returns a child index of the HD node.
'''
def chain_code(self) -> bytes:
'''
Returns a chain code of the HD node.
'''
def private_key(self) -> bytes:
'''
Returns a private key of the HD node.
'''
def private_key_ext(self) -> bytes:
'''
Returns a private key extension of the HD node.
'''
def public_key(self) -> bytes:
'''
Returns a public key of the HD node.
'''
def address(self, version: int) -> str:
'''
Compute a base58-encoded address string from the HD node.
'''
def nem_address(self, network: int) -> str:
'''
Compute a NEM address string from the HD node.
'''
def nem_encrypt(self, transfer_public_key: bytes, iv: bytes, salt: bytes, payload: bytes) -> bytes:
'''
Encrypts payload using the transfer's public key
'''
def ethereum_pubkeyhash(self) -> bytes:
'''
Compute an Ethereum pubkeyhash (aka address) from the HD node.
'''
def deserialize(self, value: str, version_public: int, version_private: int) -> HDNode:
'''
Construct a BIP0032 HD node from a base58-serialized value.
'''
def from_seed(seed: bytes, curve_name: str) -> HDNode:
'''
Construct a BIP0032 HD node from a BIP0039 seed value.
'''
def from_mnemonic_cardano(mnemonic: str, passphrase: str) -> bytes:
'''
Convert mnemonic to hdnode
'''
|
python
|
import django.core.management.base as djcmb
import anwesende.room.models as arm
import anwesende.users.models as aum
class Command(djcmb.BaseCommand):
help = "Silently creates group 'datenverwalter'"
def handle(self, *args, **options):
aum.User.get_datenverwalter_group() # so admin has it on first visit
arm.Seat.get_dummy_seat() # create now to make it nicely the first one
|
python
|
'''
English_digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
Below is marathi numbers list
This program will convert the input number into english number
'''
marathi_digits = ['०', '१', '२', '१', '४', '५', '६', '७', '८', '९']
a = input("Enter marathi digit: ")
if a in marathi_digits:
print("English Digit: ", marathi_digits.index(a))
# It will go to this condition if marathi number is of more than one digit
else:
c = 0 # counter is to check input is valid or not
n1 = ''
for i in a:
if i in marathi_digits:
n1 += str(marathi_digits.index(i))
c = c + 1
if c != 0:
print("English Digit: ", n1)
else:
print("Enter marathi number only")
'''
OUTPUT-:Enter marathi digit:६७८
English Digit: 678
Enter marathi digit:०
English Digit: 0
Enter marathi digit: seven
Enter marathi number only
'''
|
python
|
# This file is part of sner4 project governed by MIT license, see the LICENSE.txt file.
"""
storage hosts views
"""
from datatables import ColumnDT, DataTables
from flask import jsonify, redirect, render_template, request, url_for
from sqlalchemy import func, literal_column
from sqlalchemy_filters import apply_filters
from sner.server.auth.core import role_required
from sner.server.extensions import db
from sner.server.forms import ButtonForm
from sner.server.sqlafilter import FILTER_PARSER
from sner.server.storage.core import annotate_model, tag_model_multiid
from sner.server.storage.forms import HostForm
from sner.server.storage.models import Host, Note, Service, Vuln
from sner.server.storage.views import blueprint
from sner.server.utils import relative_referrer, valid_next_url
@blueprint.route('/host/list')
@role_required('operator')
def host_list_route():
"""list hosts"""
return render_template('storage/host/list.html')
@blueprint.route('/host/list.json', methods=['GET', 'POST'])
@role_required('operator')
def host_list_json_route():
"""list hosts, data endpoint"""
query_cnt_services = db.session.query(Service.host_id, func.count(Service.id).label('cnt')).group_by(Service.host_id).subquery()
query_cnt_vulns = db.session.query(Vuln.host_id, func.count(Vuln.id).label('cnt')).group_by(Vuln.host_id).subquery()
query_cnt_notes = db.session.query(Note.host_id, func.count(Note.id).label('cnt')).group_by(Note.host_id).subquery()
columns = [
ColumnDT(Host.id, mData='id'),
ColumnDT(Host.address, mData='address'),
ColumnDT(Host.hostname, mData='hostname'),
ColumnDT(Host.os, mData='os'),
ColumnDT(func.coalesce(query_cnt_services.c.cnt, 0), mData='cnt_s', global_search=False),
ColumnDT(func.coalesce(query_cnt_vulns.c.cnt, 0), mData='cnt_v', global_search=False),
ColumnDT(func.coalesce(query_cnt_notes.c.cnt, 0), mData='cnt_n', global_search=False),
ColumnDT(Host.tags, mData='tags'),
ColumnDT(Host.comment, mData='comment'),
ColumnDT(literal_column('1'), mData='_buttons', search_method='none', global_search=False)
]
query = db.session.query().select_from(Host) \
.outerjoin(query_cnt_services, Host.id == query_cnt_services.c.host_id) \
.outerjoin(query_cnt_vulns, Host.id == query_cnt_vulns.c.host_id) \
.outerjoin(query_cnt_notes, Host.id == query_cnt_notes.c.host_id)
if 'filter' in request.values:
query = apply_filters(query, FILTER_PARSER.parse(request.values.get('filter')), do_auto_join=False)
hosts = DataTables(request.values.to_dict(), query, columns).output_result()
return jsonify(hosts)
@blueprint.route('/host/add', methods=['GET', 'POST'])
@role_required('operator')
def host_add_route():
"""add host"""
form = HostForm()
if form.validate_on_submit():
host = Host()
form.populate_obj(host)
db.session.add(host)
db.session.commit()
return redirect(url_for('storage.host_view_route', host_id=host.id))
return render_template('storage/host/addedit.html', form=form)
@blueprint.route('/host/edit/<host_id>', methods=['GET', 'POST'])
@role_required('operator')
def host_edit_route(host_id):
"""edit host"""
host = Host.query.get(host_id)
form = HostForm(obj=host, return_url=relative_referrer())
if form.validate_on_submit():
form.populate_obj(host)
db.session.commit()
if valid_next_url(form.return_url.data):
return redirect(form.return_url.data)
return render_template('storage/host/addedit.html', form=form)
@blueprint.route('/host/delete/<host_id>', methods=['GET', 'POST'])
@role_required('operator')
def host_delete_route(host_id):
"""delete host"""
form = ButtonForm()
if form.validate_on_submit():
db.session.delete(Host.query.get(host_id))
db.session.commit()
return redirect(url_for('storage.host_list_route'))
return render_template('button-delete.html', form=form)
@blueprint.route('/host/annotate/<model_id>', methods=['GET', 'POST'])
@role_required('operator')
def host_annotate_route(model_id):
"""annotate vuln"""
return annotate_model(Host, model_id)
@blueprint.route('/host/view/<host_id>')
@role_required('operator')
def host_view_route(host_id):
"""view host"""
host = Host.query.get(host_id)
return render_template('storage/host/view.html', host=host, button_form=ButtonForm())
@blueprint.route('/host/tag_multiid', methods=['POST'])
@role_required('operator')
def host_tag_multiid_route():
"""tag multiple route"""
return tag_model_multiid(Host)
|
python
|
import platform
import sys
class AppMapPyVerException(Exception):
pass
# Library code uses these, so provide intermediate
# functions that can be stubbed when testing.
def _get_py_version():
return sys.version_info
def _get_platform_version():
return platform.python_version()
def check_py_version():
req = (3, 6)
actual = _get_platform_version()
if _get_py_version() < req:
raise AppMapPyVerException(
f'Minimum Python version supported is {req[0]}.{req[1]}, found {actual}'
)
|
python
|
'''
GaussGammaDistr.py
Joint Gaussian-Gamma distribution: D independent Gaussian-Gamma distributions
Attributes
--------
m : mean for Gaussian, length D
kappa : scalar precision parameter for Gaussian covariance
a : parameter for Gamma, vector length D
b : parameter for Gamma, vector length D
'''
import numpy as np
import scipy.linalg
from bnpy.util import MVgammaln, MVdigamma
from bnpy.util import LOGTWO, LOGPI, LOGTWOPI, EPS
from bnpy.util import gammaln, digamma
from .Distr import Distr
class GaussGammaDistr( Distr ):
######################################################### Constructor
#########################################################
def __init__(self, a=None, b=None, m=None, kappa=None, **kwargs):
''' Create new GaussGammaDistr object, with specified parameter values
Args
-------
a : numpy 1D array_like, length D
b : numpy 1D array_like, length D
m : numpy 1D array_like, length D
kappa : float
Returns
-------
D : bnpy GaussGammaDistr object, with provided parameters
'''
# Unpack
self.a = np.squeeze(np.asarray(a))
self.b = np.squeeze(np.asarray(b))
self.m = np.squeeze(np.asarray(m))
self.kappa = float(kappa)
# Dimension check
assert self.b.ndim <= 1
assert self.m.shape == self.b.shape
assert self.a.shape == self.m.shape
self.D = self.b.size
self.Cache = dict()
@classmethod
def CreateAsPrior( cls, argDict, Data):
''' Creates Gaussian-Gamma prior for params that generate Data.
Returns GaussGammaDistr object with same dimension as Data.
Provided argDict specifies prior's expected mean and variance.
'''
D = Data.dim
a0 = argDict['a0']
b0 = argDict['b0']
m0 = argDict['m0']
kappa = argDict['kappa']
m = m0 * np.ones(D)
a = a0 * np.ones(D)
b = b0 * np.ones(D)
return cls(a=a, b=b, m=m, kappa=kappa)
######################################################### Log Cond. Prob.
######################################################### E-step
def E_log_pdf( self, Data ):
''' Calculate E[ log p( x_n | theta ) ] for each x_n in Data.X
Args
-------
Data : bnpy XData object
with attribute Data.X, numpy 2D array of size nObs x D
Returns
-------
logp : numpy 1D array, length nObs
'''
logPDFConst = -0.5 * self.D * LOGTWOPI + 0.5 * np.sum(self.E_logLam())
logPDFData = -0.5 * self.E_distMahalanobis(Data.X)
return logPDFConst + logPDFData
def E_distMahalanobis(self, X):
''' Calculate E[ (x_n - \mu)^T diag(\lambda) (x_n - mu) ]
which has simple form due to diagonal structure.
Args
-------
X : numpy array, nObs x D
Returns
-------
dist : numpy 1D array, length nObs
dist[n] = E[ (X[n] - \mu)^T diag(\lambda) (X[n] - mu) ]
= expected mahalanobis distance to observation n
'''
Elambda = self.a / self.b
if X.ndim == 2:
weighted_SOS = np.sum( Elambda * np.square(X - self.m), axis=1)
else:
weighted_SOS = np.sum(Elambda * np.square(X - self.m))
weighted_SOS += self.D/self.kappa
return weighted_SOS
######################################################### Param updates
######################################################### (M step)
def get_post_distr( self, SS, k=None, kB=None, **kwargs):
''' Create new GaussGammaDistr as posterior given sufficient stats
for a particular component (or components)
Args
------
SS : bnpy SuffStatBag, with K components
k : int specifying component of SS to use.
Range {0, 1, ... K-1}.
kB : [optional] int specifying additional component of SS to use
if provided, k-th and kB-th entry of SS are *merged* additively
Range {0, 1, ... K-1}.
Returns
-------
D : bnpy.distr.GaussGammaDistr, with updated posterior parameters
'''
if k is None:
EN = SS.N
Ex = SS.x
Exx = SS.xx
elif kB is not None:
EN = float(SS.N[k] + SS.N[kB])
Ex = SS.x[k] + SS.x[kB]
Exx = SS.xx[k] + SS.xx[kB]
else:
EN = float(SS.N[k])
Ex = SS.x[k]
Exx = SS.xx[k]
kappa = self.kappa + EN
m = (self.kappa * self.m + Ex) / kappa
a = self.a + 0.5*EN
b = self.b + 0.5*(Exx + self.kappa*np.square(self.m) - kappa*np.square(m))
return GaussGammaDistr(a, b, m, kappa)
def post_update_soVB( self, rho, refDistr, **kwargs):
''' In-place update of this GaussGammaDistr's internal parameters,
via the stochastic online variational algorithm.
Updates via interpolation between self and reference.
self = self * (1-rho) + refDistr * rho
Args
-----
rho : float, learning rate to use for the update
refDistr : bnpy GaussGammaDistr, reference distribution for update
Returns
-------
None.
'''
etaCUR = self.get_natural_params()
etaSTAR = refDistr.get_natural_params()
etaNEW = list(etaCUR)
for i in xrange(len(etaCUR)):
etaNEW[i] = rho*etaSTAR[i] + (1-rho)*etaCUR[i]
self.set_natural_params(tuple(etaNEW))
######################################################### Required accessors
#########################################################
@classmethod
def calc_log_norm_const(cls, a, b, m, kappa):
logNormConstNormal = 0.5 * D * (LOGTWOPI + np.log(kappa))
logNormConstGamma = np.sum(gammaln(a)) - np.inner(a, np.log(b))
return logNormConstNormal + logNormConstGamma
def get_log_norm_const(self):
''' Calculate log normalization constant (aka log partition function)
for this Gauss-Gamma distribution.
p(mu,Lam) = NormalGamma( mu, Lam | a, b, m, kappa)
= 1/Z f(mu|Lam) g(Lam), where Z is const w.r.t mu,Lam
Normalization constant = Z = \int f() g() dmu dLam
Returns
--------
logZ : float
'''
D = self.D
a = self.a
b = self.b
logNormConstNormal = 0.5 * D * (LOGTWOPI - np.log(self.kappa))
logNormConstGamma = np.sum(gammaln(a)) - np.inner(a, np.log(b))
return logNormConstNormal + logNormConstGamma
def E_log_pdf_Phi(self, Distr, doNormConst=True):
''' Evaluate expectation of log PDF for given GaussGammaDistr
Args
-------
Distr : bnpy GaussGammaDistr
doNormConst : boolean, if True then Distr's log norm const is included
Returns
-------
logPDF : float
'''
assert Distr.D == self.D
selfELam = self.a / self.b
logPDF = np.inner(Distr.a - 0.5, self.E_logLam()) \
- np.inner(Distr.b, selfELam) \
- 0.5 * Distr.kappa * self.E_distMahalanobis(Distr.m)
if doNormConst:
return logPDF - Distr.get_log_norm_const()
return logPDF
def get_entropy(self):
''' Calculate entropy of this Gauss-Gamma disribution,
'''
return -1.0 * self.E_log_pdf_Phi(self)
def get_natural_params(self):
'''
'''
t1 = self.a
t2 = self.b + 0.5 * self.kappa * np.square(self.m)
t3 = self.kappa * self.m
t4 = self.kappa
etatuple = t1, t2, t3, t4
return etatuple
def set_natural_params(self, etatuple):
self.a = etatuple[0]
self.kappa = etatuple[3]
self.m = etatuple[2]/self.kappa
self.b = etatuple[1] - 0.5 * self.kappa * np.square(self.m)
self.Cache = dict()
######################################################### Custom Accessors
#########################################################
def E_logLam(self):
''' E[ \log \lambda_d ]
Returns
-------
1D array, length D
'''
return digamma(self.a) - np.log(self.b)
def E_sumlogLam(self):
''' \sum_d E[ \log \lambda_d ]
Returns
-------
float, scalar
'''
return np.sum(digamma(self.a) - np.log(self.b))
def E_Lam(self):
''' E[ \lambda_d * \mu_d ]
Returns vector, length D
'''
return (self.a / self.b)
def E_LamMu(self):
''' E[ \lambda_d * \mu_d ]
Returns vector, length D
'''
return (self.a / self.b) * self.m
def E_LamMu2(self):
''' E[ \lambda_d * \mu_d * \mu_d ]
Returns vector, length D
'''
return (self.a / self.b) * np.square(self.m) + 1./self.kappa
############################################################## I/O
##############################################################
def to_dict(self):
''' Convert attributes of this GaussGammaDistr into a dict
useful for long-term storage to disk, pickling, etc.
Returns
-------
Dict with entries for each named parameter: a, b, m, kappa
'''
return dict(name=self.__class__.__name__, \
m=self.m, kappa=self.kappa, a=self.a, b=self.b)
def from_dict(self, Dict):
''' Internally set this GaussGammaDistr's parameters via provided dict
Returns
--------
None. This Distr's parameters set to new values.
'''
self.m = Dict['m']
self.a = Dict['a']
self.b = Dict['b']
self.kappa = Dict['kappa']
self.D = self.b.shape[0]
self.Cache = dict()
def to_string(self, offset=" "):
Elam = self.a[:2] / self.b[:2]
if self.D > 2:
sfx = '...\n'
else:
sfx = '\n'
np.set_printoptions(precision=3, suppress=False)
msg = offset + 'E[ mean \mu ] ' + str(self.m[:2]) + sfx
msg += offset + 'E[ precision \lambda ]' + str(Elam) + sfx
return msg
|
python
|
import torch
from .defaults import get_default_config
def update_config(config):
if config.dataset.name in ['CIFAR10', 'CIFAR100']:
dataset_dir = f'~/.torch/datasets/{config.dataset.name}'
config.dataset.dataset_dir = dataset_dir
config.dataset.image_size = 32
config.dataset.n_channels = 3
config.dataset.n_classes = int(config.dataset.name[5:])
elif config.dataset.name in ['MNIST', 'FashionMNIST', 'KMNIST']:
dataset_dir = '~/.torch/datasets'
config.dataset.dataset_dir = dataset_dir
config.dataset.image_size = 28
config.dataset.n_channels = 1
config.dataset.n_classes = 10
if not torch.cuda.is_available():
config.device = 'cpu'
return config
|
python
|
from time import sleep
import logging
import pytest
from common.utils import resize_browser
from common.asserts import assert_customer_logo, assert_customer_testimonial, assert_typography, assert_overflowing
from common.svb_form import assert_required_fields_top, assert_bad_email_top, assert_non_business_email_top, assert_success_form_top, assert_svb_contact_form_required_fields, assert_svb_contact_form_invalid_name, assert_svb_contact_form_invalid_phone, assert_svb_contact_form_invalid_phone_length_min, assert_svb_contact_form_invalid_phone_length_max, assert_svb_contact_form_success
logger = logging.getLogger(__name__)
@pytest.fixture(scope='function')
def browser(module_browser, base_url, request):
resize_browser(browser=module_browser, resolution=request.param)
module_browser.get(base_url + '/svb')
sleep(4)
return module_browser
@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)
def test_page_overflow(browser):
assert_overflowing(browser)
@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)
def test_customer_logo(browser):
assert_customer_logo(browser)
@pytest.mark.parametrize('browser', [('desktop_1')], indirect=True)
def test_customer_testimonial(browser):
assert_customer_testimonial(browser)
@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)
def test_typography(browser):
assert_typography(browser)
@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)
def test_required_fields_top(browser):
assert_required_fields_top(browser)
@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)
def test_bad_email_top(browser):
assert_bad_email_top(browser)
@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)
def test_non_business_email_top(browser):
assert_non_business_email_top(browser)
@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)
def test_success_form_top(browser):
assert_success_form_top(browser)
@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)
def test_svb_contact_form_required_fields(browser):
assert_svb_contact_form_required_fields(browser)
@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)
def test_svb_contact_form_invalid_name(browser):
assert_svb_contact_form_invalid_name(browser)
@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)
def test_svb_contact_form_invalid_phone(browser):
assert_svb_contact_form_invalid_phone(browser)
@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)
def test_svb_contact_form_invalid_phone_length_min(browser):
assert_svb_contact_form_invalid_phone_length_min(browser)
@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)
def test_svb_contact_form_invalid_phone_length_max(browser):
assert_svb_contact_form_invalid_phone_length_max(browser)
@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)
def test_svb_contact_form_success(browser):
assert_svb_contact_form_success(browser)
|
python
|
# -*- coding: utf-8 -*-
import glob
import re
import json
import os
import shutil
from PIL import Image
import numpy as np
from keras.preprocessing.image import img_to_array, load_img
# 画像サイズ
IMAGE_SIZE = 224
# チャネル数
CHANNEL_SIZE = 3
# ラベル作成
def make_label_list():
# ディレクトリのパスを取得
dir_path_list = glob.glob('image/*')
# 辞書を準備
label_dic = {}
# 各ディレクトリごとにラベルを振り分け
for i, dir_path in enumerate(dir_path_list):
key = re.search(r'image/(.+)', dir_path)
key = key.group(1)
# label_dic[key] = i
label_dic[key] = 1
# 辞書をjsonで保存
with open('./data/label_dic.json', 'w') as f:
label_json = json.dumps(label_dic)
json.dump(label_json, f)
return label_dic
# 画像を数値データに変換する関数
def convert_image(img_path):
try:
img = load_img(img_path, target_size=(IMAGE_SIZE, IMAGE_SIZE))
x = img_to_array(img)
# 正規化
x = x / 255.0
return x
except Exception as e:
shutil.move(img_path, 'noise')
x = None
print('[Error] {0} <{1}>'.format(img_path, e))
return x
#ラベルデータを取得する関数
def get_label_data(img_path, label_dic):
#画像のディレクトリのパスを取得
key = re.search(r'image/(.+)/.+/.+/.+/.+', img_path)
key = key.group(1)
#辞書からラベルを取得
t = label_dic[key]
#ラベルをnumpy配列に変換
t = np.asarray(t, dtype=np.int32)
return t
#画像のパスから画像のファイル名を取得する関数
def get_image_name(img_path):
image_name = re.search(r'image/.+/.+/.+/.+/(.+).jpg', img_path)
image_name = image_name.group(1)
return image_name
#データセットを作成する関数
def make_dataset(label_dic):
#各人物のディレクトリのリストを取得
person_path_list = glob.glob('image/*/*/*/*')
for person_path in person_path_list:
#写真の人物の名前を取得
person_name = re.search(r'image/.+/.+/.+/(.+)', person_path)
person_name = person_name.group(1)
#画像のあるディレクトリの名前を取得
dir_name = re.search(r'image/(.+/.+/.+)/.+', person_path)
dir_name = dir_name.group(1)
#画像データ・ラベルデータのファイルを保存するディレクトリ
save_dir = './data/' + dir_name + '/' + person_name
#ファイルを保存するディレクトリを作成
if not os.path.exists(save_dir): os.makedirs(save_dir)
#人物のディレクト内の画像のリストを取得
img_path_list = glob.glob(person_path+'/*.jpg')
if img_path_list == []:
shutil.move(person_path, 'noise')
print('[Remove] {0}'.format(person_path))
#画像データを入れるリストを準備
image_data = []
#ラベルデータを入れるリストを準備
label_data = []
for img_path in img_path_list:
#画像を数値データに変換
x = convert_image(img_path)
if x is None:
continue
# ラベルデータを取得
t = get_label_data(img_path, label_dic)
image_name = get_image_name(img_path)
# 画像データを保存するパス
save_image_path = save_dir + '/' + image_name + '_image.npy'
# ラベルデータを保存するパス
save_label_path = save_dir + '/' + image_name + '_label.npy'
#画像データをファイルに保存
np.save(save_image_path, x)
#ラベルデータをファイルに保存
np.save(save_label_path, t)
print('[Save] {0}: {1}'.format(person_name, len(img_path_list)))
print()
occupation_path_list = glob.glob('image/*')
for occupation_path in occupation_path_list:
#職業を取得
occupation_name = re.search(r'image/(.+)', occupation_path)
occupation_name = occupation_name.group(1)
#全画像のリストを取得
img_path_list = glob.glob(occupation_path + '/*/*/*')
print('{0}: {1}'.format(occupation_name, len(img_path_list)))
print()
# 全画像のリストを取得
img_path_list = glob.glob('image/*/*/*/*/*')
print('total: {0}'.format(len(img_path_list)))
if __name__ == '__main__':
# ラベル作成
label_dic = make_label_list()
# データセット作成
make_dataset(label_dic)
|
python
|
from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from .models import CustomUser
class CustomUserCreationForm(UserCreationForm):
password1 = forms.CharField(
label= ("Password"),
strip=False,
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label= ("Password confirmation"),
widget=forms.PasswordInput,
strip=False,
)
class Meta(UserCreationForm):
model = CustomUser
fields = ('username', 'email')
class CustomUserChangeForm(UserChangeForm):
class Meta:
model = CustomUser
fields = ('username', 'email')
|
python
|
'''
Created on Sep 20, 2013
@author: nshearer
'''
from ConsoleYesNoQuestion import ConsoleYesNoQuestion
class ConsoleActionPrompt(ConsoleYesNoQuestion):
'''Present an action prompt on the console'''
# def __init__(self, question):
# super(ConsoleActionPrompt, self).__init__(question)
def present_question(self):
print ""
print "-- ACTION --"
super(ConsoleActionPrompt, self).present_question()
if self.question.previous_answer is True:
print "* This task has already been completed *"
|
python
|
import sedate
from datetime import timedelta, time
from itertools import groupby
from sqlalchemy import types
from sqlalchemy.schema import Column
from sqlalchemy.schema import Index
from sqlalchemy.schema import UniqueConstraint
from sqlalchemy.orm import object_session
from sqlalchemy.orm.util import has_identity
from libres.modules import utils
from libres.modules.rasterizer import (
rasterize_start,
rasterize_span,
rasterize_end,
iterate_span,
MIN_RASTER
)
from libres.db.models import ORMBase
from libres.db.models.types import UUID, UTCDateTime, JSON
from libres.db.models.other import OtherModels
from libres.db.models.timestamp import TimestampMixin
from sqlalchemy.ext.hybrid import hybrid_property
class Allocation(TimestampMixin, ORMBase, OtherModels):
"""Describes a timespan within which one or many timeslots can be
reserved.
There's an important concept to understand before working with allocations.
The resource uuid of an alloction is not always pointing to the actual
resource.
A resource may in fact be a real resource, or an imaginary resource with
a uuid derived from the real resource. This is a somewhat historical
artifact.
If you need to know which allocations belong to a real resource, the
mirror_of field is what's relevant. The originally created allocation
with the real_resource is also called the master-allocation and it is
the one allocation with mirror_of and resource being equal.
When in doubt look at the managed_* functions of the
:class:`.scheduler.Scheduler` class.
"""
__tablename__ = 'allocations'
#: the id of the allocation, autoincremented
id = Column(types.Integer(), primary_key=True, autoincrement=True)
#: the resource uuid of the allocation, may not be an actual resource
#: see :class:`.models.Allocation` for more information
resource = Column(UUID(), nullable=False)
#: the polymorphic type of the allocation
type = Column(types.Text(), nullable=True)
#: resource of which this allocation is a mirror. If the mirror_of
#: attribute equals the resource, this is a real resource
#: see :class:`.models.Allocation` for more information
mirror_of = Column(UUID(), nullable=False)
#: Group uuid to which this allocation belongs to. Every allocation has a
#: group but some allcations may be the only one in their group.
group = Column(UUID(), nullable=False)
#: Number of times this allocation may be reserved
quota = Column(types.Integer(), default=1)
#: Maximum number of times this allocation may be reserved with one
#: single reservation.
quota_limit = Column(types.Integer(), default=0, nullable=False)
#: Partly available allocations may be reserved partially. How They may
#: be partitioned is defined by the allocation's raster.
partly_available = Column(types.Boolean(), default=False)
#: True if reservations for this allocation must be approved manually.
approve_manually = Column(types.Boolean(), default=False)
#: The timezone this allocation resides in.
timezone = Column(types.String())
#: Custom data reserved for the user
data = Column(
JSON(),
nullable=True
)
_start = Column(UTCDateTime(timezone=False), nullable=False)
_end = Column(UTCDateTime(timezone=False), nullable=False)
_raster = Column(types.Integer(), nullable=False)
__table_args__ = (
Index('mirror_resource_ix', 'mirror_of', 'resource'),
UniqueConstraint('resource', '_start', name='resource_start_ix')
)
__mapper_args__ = {
'polymorphic_identity': None,
'polymorphic_on': type
}
def __eq__(self, other):
return self.resource == other.resource and self._start == other._start
def __hash__(self):
return id(self)
def copy(self):
""" Creates a new copy of this allocation. """
allocation = self.__class__()
allocation.resource = self.resource
allocation.mirror_of = self.mirror_of
allocation.group = self.group
allocation.quota = self.quota
allocation.partly_available = self.partly_available
allocation.approve_manually = self.approve_manually
allocation.timezone = self.timezone
allocation.data = self.data
allocation._start = self._start
allocation._end = self._end
allocation._raster = self._raster
return allocation
def get_start(self):
return self._start
def set_start(self, start):
if not start.tzinfo:
assert self.timezone
start = sedate.replace_timezone(start, self.timezone)
if self.raster is not None:
self._start = rasterize_start(start, self.raster)
else:
self._start = rasterize_start(start, MIN_RASTER)
#: The start of this allocation. Must be timezone aware.
#: This date is rastered by the allocation's raster.
start = property(get_start, set_start)
def get_end(self):
return self._end
def set_end(self, end):
if not end.tzinfo:
assert self.timezone
end = sedate.replace_timezone(end, self.timezone)
if self.raster is not None:
self._end = rasterize_end(end, self.raster)
else:
self._end = rasterize_end(end, MIN_RASTER)
#: The end of this allocation. Must be timezone aware.
#: This date is rastered by the allocation's raster.
#: The end date is stored with an offset of minues one microsecond
#: to avoid overlaps with other allocations.
#: That is to say an allocation that ends at 15:00 really ends at
#: 14:59:59.999999
end = property(get_end, set_end)
def get_raster(self):
return self._raster
def set_raster(self, raster):
# the raster can only be set once!
assert not self._raster
self._raster = raster
# re-rasterize start/end - during initialization it's possible for
# them not to be setup correctly because that's done using
# kwargs which has a random order. So it might set start, end, raster
# in this order one time, then raster, start, end another time.
#
# this should of course only happen once - hence the assertion above
if self._start:
self._start = rasterize_start(self._start, self.raster)
if self._end:
self._end = rasterize_end(self._end, self.raster)
raster = property(get_raster, set_raster)
def display_start(self, timezone=None):
"""Returns the start in either the timezone given or the timezone
on the allocation."""
return sedate.to_timezone(self.start, timezone or self.timezone)
def display_end(self, timezone=None):
"""Returns the end plus one microsecond in either the timezone given
or the timezone on the allocation.
"""
end = self.end + timedelta(microseconds=1)
return sedate.to_timezone(end, timezone or self.timezone)
def _prepare_range(self, start, end):
if start:
start = sedate.standardize_date(start, self.timezone)
if end:
end = sedate.standardize_date(end, self.timezone)
return start, end
@property
def whole_day(self):
"""True if the allocation is a whole-day allocation.
A whole-day allocation is not really special. It's just an allocation
which starts at 0:00 and ends at 24:00 (or 23:59:59'999). Relative
to its timezone.
As such it can actually also span multiple days, only hours and minutes
count.
The use of this is to display allocations spanning days differently.
"""
s, e = self.display_start(), self.display_end()
assert s != e # this can never be, except when caused by cosmic rays
return sedate.is_whole_day(s, e, self.timezone)
def overlaps(self, start, end):
""" Returns true if the allocation overlaps with the given dates. """
start, end = self._prepare_range(start, end)
start, end = rasterize_span(start, end, self.raster)
return sedate.overlaps(start, end, self.start, self.end)
def contains(self, start, end):
""" Returns true if the the allocation contains the given dates. """
start, end = self._prepare_range(start, end)
start, end = rasterize_span(start, end, self.raster)
return self.start <= start and end <= self.end
def free_slots(self, start=None, end=None):
""" Returns the slots which are not yet reserved. """
reserved = [slot.start for slot in self.reserved_slots]
slots = []
for start, end in self.all_slots(start, end):
if start not in reserved:
slots.append((start, end))
return slots
def align_dates(self, start=None, end=None):
""" Aligns the given dates to the start and end date of the allocation.
"""
start, end = self._prepare_range(start, end)
start = start or self.start
start = start < self.start and self.start or start
end = end or self.end
end = end > self.end and self.end or end
return start, end
def all_slots(self, start=None, end=None):
""" Returns the slots which exist with this timespan. Reserved or free.
"""
start, end = self.align_dates(start, end)
if self.partly_available:
for start, end in iterate_span(start, end, self.raster):
yield start, end
else:
yield self.start, self.end
def count_slots(self, start=None, end=None):
""" Returns the number of slots which exist with this timespan.
Reserved or free.
"""
if not self.partly_available:
return 1
start, end = self.align_dates(start, end)
seconds = (end + timedelta(microseconds=1) - start).total_seconds()
return seconds // (self.raster * 60)
def is_available(self, start=None, end=None):
""" Returns true if the given daterange is completely available. """
if not (start and end):
start, end = self.start, self.end
assert self.overlaps(start, end)
reserved = {slot.start for slot in self.reserved_slots}
for start, end in self.all_slots(start, end):
if start in reserved:
return False
return True
def limit_timespan(self, start, end, timezone=None):
""" Takes the given timespan and moves the start/end date to
the closest reservable slot. So if 10:00 - 11:00 is requested it will
- on a partly available allocation return 10:00 - 11:00 if the raster
allows for that
- on a non-partly available allocation return the start/end date of
the allocation itself.
The resulting times are combined with the allocations start/end date
to form a datetime. (time in, datetime out -> maybe not the best idea)
"""
timezone = timezone or self.timezone
if self.partly_available:
assert isinstance(start, time)
assert isinstance(end, time)
s, e = sedate.get_date_range(
self.display_start(timezone), start, end
)
if self.display_end(timezone) < e:
e = self.display_end()
if self.display_start(timezone) > s:
s = self.display_start()
s, e = rasterize_span(s, e, self.raster)
return s, e + timedelta(microseconds=1)
else:
return self.display_start(timezone), self.display_end(timezone)
@property
def pending_reservations(self):
""" Returns the pending reservations query for this allocation.
As the pending reservations target the group and not a specific
allocation this function returns the same value for masters and
mirrors.
"""
assert not self.is_transient, (
"Don't call if the allocation does not yet exist"
)
Reservation = self.models.Reservation
query = object_session(self).query(Reservation.id)
query = query.filter(Reservation.target == self.group)
query = query.filter(Reservation.status == u'pending')
return query
@property
def waitinglist_length(self):
return self.pending_reservations.count()
@property
def availability(self):
"""Returns the availability in percent."""
total = self.count_slots()
used = len(self.reserved_slots)
if total == used:
return 0.0
if used == 0:
return 100.0
return 100.0 - (float(used) / float(total) * 100.0)
@property
def in_group(self):
"""True if the event is in any group."""
query = object_session(self).query(Allocation.id)
query = query.filter(Allocation.resource == self.resource)
query = query.filter(Allocation.group == self.group)
query = query.limit(2)
return len(query.all()) > 1
@property
def quota_left(self):
# this can be done quickly if this is a master with a quota of 1
if self.is_master and self.quota == 1:
return 1 if self.is_available() else 0
# if not we need to go through the mirrors
free_quota = 0
for mirror in self.siblings():
if mirror.is_available():
free_quota += 1
return free_quota
def find_spot(self, start, end):
""" Returns the first free allocation spot amongst the master and the
mirrors. Honors the quota set on the master and will only try the
master if the quota is set to 1.
If no spot can be found, None is returned.
"""
master = self.get_master()
if master.is_available(start, end):
return master
if master.quota == 1:
return None
tries = master.quota - 1
for mirror in (m for m in self.siblings() if not m.is_master):
if mirror.is_available(start, end):
return mirror
if tries >= 1:
tries -= 1
else:
return None
@property
def is_separate(self):
"""True if available separately (as opposed to available only as
part of a group)."""
if self.partly_available:
return True
if self.in_group:
return False
return True
def availability_partitions(self):
"""Partitions the space between start and end into blocks of either
free or reserved time. Each block has a percentage representing the
space the block occupies compared to the size of the whole allocation.
The blocks are ordered from start to end. Each block is an item with
two values. The first being the percentage, the second being true if
the block is reserved.
So given an allocation that goes from 8 to 9 and a reservation that
goes from 8:15 until 8:30 we get the following blocks::
[
(25%, False),
(25%, True),
(50%, False)
]
This is useful to divide an allocation block into different divs on the
frontend, indicating to the user which parts of an allocation are
reserved.
"""
if (len(self.reserved_slots) == 0):
return [(100.0, False)]
reserved = {r.start for r in self.reserved_slots}
# Get the percentage one slot represents
slots = tuple(s[0] for s in self.all_slots())
step = 100.0 / float(len(slots))
# Create an entry for each slot with either True or False
pieces = tuple(s in reserved for s in slots)
# Group by the true/false values in the pieces and sum up the
# percentage
partitions = []
total = 0
for flag, group in groupby(pieces, key=lambda p: p):
percentage = sum(1 for item in group) * step
partitions.append([percentage, flag])
total += percentage
# Make sure to get rid of floating point rounding errors
diff = 100.0 - total
partitions[-1:][0][0] -= diff
return partitions
@property
def is_transient(self):
"""True if the allocation does not exist in the database, and is not
about to be written to the database. If an allocation is transient it
means that the given instance only exists in memory.
See:
http://www.sqlalchemy.org/docs/orm/session.html
#quickie-intro-to-object-states
http://stackoverflow.com/questions/3885601/
sqlalchemy-get-object-instance-state
"""
return object_session(self) is None and not has_identity(self)
@hybrid_property
def is_master(self):
"""True if the allocation is a master allocation."""
return self.resource == self.mirror_of
def get_master(self):
if self.is_master:
return self
else:
query = object_session(self).query(Allocation)
query = query.filter(Allocation._start == self._start)
query = query.filter(Allocation.resource == self.mirror_of)
return query.one()
def siblings(self, imaginary=True):
"""Returns the master/mirrors group this allocation is part of.
If 'imaginary' is true, inexistant mirrors are created on the fly.
those mirrors are transient (see self.is_transient)
"""
# this function should always have itself in the result
if not imaginary and self.is_transient:
assert False, "the resulting list wouldn't contain this allocation"
if self.quota == 1:
assert self.is_master
return [self]
query = object_session(self).query(Allocation)
query = query.filter(Allocation.mirror_of == self.mirror_of)
query = query.filter(Allocation._start == self._start)
existing = dict(((e.resource, e) for e in query))
master = self.is_master and self or existing[self.mirror_of]
existing[master.resource] = master
uuids = utils.generate_uuids(master.resource, master.quota)
imaginary = imaginary and (master.quota - len(existing)) or 0
siblings = [master]
for uuid in uuids:
if uuid in existing:
siblings.append(existing[uuid])
elif imaginary > 0:
allocation = master.copy()
allocation.resource = uuid
siblings.append(allocation)
imaginary -= 1
return siblings
|
python
|
from collections.abc import Mapping
import shelve
import random
import time
class ConcurrentShelf(Mapping):
def __init__(self, file_name, time_out_seconds=60):
self._file_name = file_name
self._time_out_seconds = time_out_seconds
self._locked_shelf = None
shelf = self._open(write=True)
shelf.close()
def __del__(self):
if self._locked_shelf is not None:
self._locked_shelf.close()
def _open(self, write=False):
flag = 'c' if write else 'r'
start = time.time()
while True:
if time.time() - start > self._time_out_seconds:
raise RuntimeError('ConcurrentShelf time out, cannot gain access to shelf on disk')
try:
shelf = shelve.open(self._file_name, flag=flag)
return shelf
except Exception as e:
if '[Errno 11] Resource temporarily unavailable' in str(e):
# print('Shelf locked, waiting...')
time.sleep(random.uniform(0.01, 0.250))
next
else:
raise e
def lock(self, write=True):
self._locked_shelf = self._open(write=write)
def unlock(self):
if self._locked_shelf is not None:
self._locked_shelf.close()
self._locked_shelf = None
def __getitem__(self, key):
if self._locked_shelf is not None:
return self._locked_shelf[key]
else:
shelf = self._open()
try:
value = shelf[key]
shelf.close()
except Exception as e:
shelf.close()
raise e
return value
def __setitem__(self, key, value):
if self._locked_shelf is not None:
self._locked_shelf[key] = value
else:
shelf = self._open(write=True)
try:
shelf[key] = value
shelf.close()
except Exception as e:
shelf.close()
raise e
def __iter__(self):
if self._locked_shelf is not None:
for value in self._locked_shelf:
yield value
else:
shelf = self._open()
try:
for value in shelf:
yield value
shelf.close()
except Exception as e:
shelf.close()
raise e
def __len__(self):
if self._locked_shelf is not None:
return len(self._locked_shelf)
else:
shelf = self._open()
try:
value = len(shelf)
shelf.close()
except Exception as e:
shelf.close()
raise e
return value
|
python
|
from flask import Blueprint, request, jsonify
from ortools.sat.python import cp_model
import numpy
bp = Blueprint('optimize', __name__)
@bp.route('/', methods=['POST'])
def recieve_data():
model = model = cp_model.CpModel()
juniors = request.get_json()[0]
boat_parameters = request.get_json()[1]
constraints = request.get_json()[2]
maxTime = int(request.get_json()[3])
boat_parameters['minCrew'] = int(boat_parameters['minCrew']) if boat_parameters['minCrew'] else 0
boat_parameters['noBoats'] = int(boat_parameters['noBoats'])
boat_parameters['maxCrew'] = int(boat_parameters['maxCrew']) if boat_parameters['maxCrew'] else len(juniors)
boat_parameters['useAllBoats'] = bool(boat_parameters['useAllBoats'])
variables = {}
variables['x'] = create_x_var(model, juniors, boat_parameters['noBoats'])
variables['y'] = create_y_var(model, juniors, boat_parameters['noBoats'])
variables['boat_used'] = create_boat_var(model, boat_parameters['noBoats'])
variables['worst_boat'] = model.NewIntVar(0, 1, 'worst_boat')
pref_matrix = create_pref_matrix(juniors)
create_std_constraints(model, variables, boat_parameters, juniors, pref_matrix)
create_custom_constraints(model, variables, constraints, juniors, boat_parameters)
sum_exp = sum(pref_matrix[i][j] * variables['y'][i, j, b]
for i in range(len(juniors)) for j in range(len(juniors)) for b in range(boat_parameters['noBoats']) if j != i)
model.Maximize(variables['worst_boat'] + sum_exp)
#Debug - skriv ut hela modellen!
#print(solver.ExportModelAsLpFormat(False).replace('\\', '').replace(',_', ','), sep='\n')
# Check if there are hints, i.e. not first iteration
if len(request.get_json()) > 4:
hints = request.get_json()[4]
create_hints(variables, hints, juniors, boat_parameters['noBoats'], model)
solver = cp_model.CpSolver()
#Avbryt lösaren efter 60 sekunder
solver.parameters.max_time_in_seconds = maxTime
status = solver.Solve(model)
print(solver.ResponseStats())
if status == cp_model.INFEASIBLE:
return dict(success=False, status='Infeasible')
if status == cp_model.MODEL_INVALID:
return dict(success=False, status='Model invalid')
if status == cp_model.UNKNOWN:
return dict(success=False, status='Unknown')
retval = create_retval(variables, juniors, boat_parameters['noBoats'], solver)
if status == cp_model.OPTIMAL:
retval['status'] = 'Optimal'
elif status == cp_model.FEASIBLE:
retval['status'] = 'Feasible'
retval['hints'] = get_current_variable_values(variables, juniors, boat_parameters['noBoats'], solver)
retval['solver_response'] = solver.ResponseStats()
retval['objective_value'] = solver.ObjectiveValue()
return jsonify(retval)
#x[i, b] är 1 om jun i sitter i båt b, noll annars
def create_x_var(model, juniors, no_boats):
x = {}
for i in range(len(juniors)):
for b in range(no_boats):
x[i, b] = model.NewIntVar(0, 1, 'x[Junior {}, Boat {}]'.format(i, b))
return x
#y[i, j, b] är 1 om jun i sitter med jun j i båt b, noll annars
def create_y_var(model, juniors, no_boats):
y = {}
for i in range(len(juniors)):
for j in range(len(juniors)):
for b in range(no_boats):
if i != j:
y[i, j, b] = model.NewIntVar(0, 1, 'y[Jun i {}, Jun j {}, Boat {}]'.format(i, j, b))
return y
#boat_used[b] = 1 om båt b används, 0 f.ö.
def create_boat_var(model, no_boats):
boat_used = {}
for b in range(no_boats):
boat_used[b] = model.NewIntVar(0, 1, 'boat_used[Boat {}]'.format(b))
return boat_used
#p[i, j] = 1 om junior i önskat att segla med junior j
def create_pref_matrix(juniors):
p = numpy.zeros(shape=(len(juniors), len(juniors)), dtype=int)
i = 0
for junior_i in juniors:
j = 0
for junior_j in juniors:
if junior_j['name'] in junior_i['wishes']:
p[i][j] = 1
j = j + 1
i = i + 1
return p
def create_std_constraints(model, variables, boat_parameters, juniors, pref_matrix):
#En junior sitter i exakt en båt
for i in range(len(juniors)):
model.Add(sum(variables['x'][i, b] for b in range(boat_parameters['noBoats'])) == 1)
#min capacity
for b in range(boat_parameters['noBoats']):
sum_exp = sum(variables['x'][i, b] for i in range(len(juniors)))
if boat_parameters['useAllBoats']:
model.Add(sum_exp >= max(boat_parameters['minCrew'], 1))
else:
model.Add(sum_exp >= boat_parameters['minCrew'] * variables['boat_used'][b])
#max capacity
if boat_parameters['noBoats']:
for b in range(boat_parameters['noBoats']):
sum_exp = sum(variables['x'][i, b] for i in range(len(juniors)))
if boat_parameters['useAllBoats']:
model.Add(sum_exp <= boat_parameters['maxCrew'])
else:
model.Add(sum_exp <= boat_parameters['maxCrew'] * variables['boat_used'][b])
#Koppling x till y (jun i med jun j) -->
for i in range(len(juniors)):
for j in range(len(juniors)):
for b in range(boat_parameters['noBoats']):
if i != j:
model.Add(2 * variables['y'][i, j, b] <= variables['x'][i, b] + variables['x'][j, b])
#Sämsta båten --> målfunktion
for b in range(boat_parameters['noBoats']):
sum_exp = sum(pref_matrix[i][j] * variables['y'][i, j, b] for i in range(len(juniors)) for j in range(len(juniors)) if j!= i)
model.Add(variables['worst_boat'] <= sum_exp)
#constraint_exp = [pref_matrix[i][j] * variables['y'][i, j, b] for i in range(len(juniors)) for j in range(len(juniors)) if j!= i]
#solver.Add(variables['worst_boat'] <= solver.Sum(constraint_exp))
#Löser BV som tvingar juniorer att segla eller inte segla tsm
def create_custom_constraints(model, variables, constraints, juniors, boat_parameters):
for c in constraints:
name1 = c['name1']
name2 = c['name2']
i = list(filter(lambda j: juniors[j]['name'] == name1, range(len(juniors))))[0]
j = list(filter(lambda j: juniors[j]['name'] == name2, range(len(juniors))))[0]
if c['mustSail']:
#Måste segla --> summa över b av y[i,j,b] för (i,j) == 1
sum_exp = sum(variables['y'][i, j, b] for b in range(boat_parameters['noBoats']))
model.Add(sum_exp == 1)
#constraint_exp = [variables['y'][i, j, b] for b in range(boat_parameters['noBoats'])]
#solver.Add(solver.Sum(constraint_exp) == 1)
else:
#Får inte segla:
sum_exp = sum(variables['x'][i, b] + variables['x'][j, b] for b in range(boat_parameters['noBoats']))
model.Add(sum_exp == 1)
#constraint_exp = [variables['x'][i, b] + variables['x'][j, b] for b in range(boat_parameters['noBoats'])]
#solver.Add(solver.Sum(constraint_exp) <= 1)
def create_hints(variables, hints, juniors, no_boats, model):
# Hints for X
for i in range(len(juniors)):
for b in range(no_boats):
model.AddHint(variables['x'][i, b], hints['x'][str(i)][str(b)])
# Hints for Y
for i in range(len(juniors)):
for j in range(len(juniors)):
for b in range(no_boats):
if i != j:
model.AddHint(variables['y'][i, j, b], hints['y'][str(i)][str(j)][str(b)])
def create_retval(variables, juniors, no_boats, solver):
retval = {}
retval['boats'] = {}
for b in range(no_boats):
retval['boats'][b] = []
for i in range(len(juniors)):
if solver.Value(variables['x'][i, b]) == 1:
#if variables['x'][i, b].solution_value() == 1:
retval['boats'][b].append(juniors[i]['name'])
retval['success'] = True
return retval
def get_current_variable_values(variables, juniors, no_boats, solver):
current_values = {}
current_values['x'] = {}
for i in range(len(juniors)):
current_values['x'][i] = {}
for b in range(no_boats):
current_values['x'][i][b] = solver.Value(variables['x'][i, b])
current_values['y'] = {}
for i in range(len(juniors)):
current_values['y'][i] = {}
for j in range(len(juniors)):
current_values['y'][i][j] = {}
for b in range(no_boats):
if i != j:
current_values['y'][i][j][b] = solver.Value(variables['y'][i, j, b])
return current_values
# In Python, you can also set the constraints as follows.
# for i in range(number_of_constraints):
# constraint_expr = coeffs[i][j] * x[j] for j in range(data['number_of_variables'])]
# solver.Add(sum(constraint_expr) <= data['bounds'][i])
#serialize (skapa dict av dig själv) alla index i array
#jsonify
|
python
|
import os
import re
import datetime
from mod_python import apache
NOW = str(datetime.datetime.utcnow().strftime("%s"))
DUMP_DIR="/var/www/html/dump"
if not os.path.exists(DUMP_DIR):
os.makedirs(DUMP_DIR)
def index(req):
if not 'file' in req.form or not req.form['file'].filename:
return "Error: Please upload a file"
ethmac = "unset"
password = "unset"
if 'ethmac' in req.form: ethmac = req.form['ethmac']
if 'password' in req.form: password = req.form['password']
user = auth_user(ethmac,password)
# Record which IP send this file
ip = req.get_remote_host(apache.REMOTE_NOLOOKUP)
return save_file(req.form['file'], ip, user)
def auth_user(mac, password):
# shoulc check if this really is the user's MAC address
# check Eth MAC is well-formed
if not re.match("([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})",mac):
return "unset"
return mac
def save_file(fileitem, ip, user=None):
# strip leading path from file name to avoid directory traversal attacks
filename = NOW+".tgz"
if user!=None:
filename = user+"-"+filename
#os.path.basename(fileitem.filename)
# build absolute path to files directory
host_dir = os.path.join(DUMP_DIR, ip)
ensure_dir(host_dir)
filepath = os.path.join(host_dir, filename)
if os.path.exists(filepath):
return "Error: file already exists"
fd = open(filepath, 'wb')
while 1:
chunk = fileitem.file.read(100000)
if not chunk: break
fd.write (chunk)
return 'The file "%s" was uploaded successfully from %s' % (filepath, ip)
def ensure_dir(path):
if not os.path.exists(path):
os.makedirs(path)
|
python
|
# Ryan McCarthy, [email protected]
# ITP 115, Fall 2020
# Assignment 4
# Description:
# Part 1 takes a sentence from the user and counts the number of times a letter or special character appear
# this info is returned to the user
# Part 1: this gets the sentence
sentence = input('PART 1 - Character Counter\nPlease enter a sentence:')
# this removes all of the white space so I don't have to worry about spaces when iterating and counting characters
sentence = sentence.replace(" ", "")
# changes sentence to lower so it doesn't count caps as special chars
sentence = sentence.lower()
# defines all of the characters counts
ac = "a: NONE"
bc = "b: NONE"
cc = "c: NONE"
dc = "d: NONE"
ec = "e: NONE"
fc = "f: NONE"
gc = "g: NONE"
hc = "h: NONE"
ic = "i: NONE"
jc = "j: NONE"
kc = "k: NONE"
lc = "l: NONE"
mc = "m: NONE"
nc = "n: NONE"
oc = "o: NONE"
pc = "p: NONE"
qc = "q: NONE"
rc = "r: NONE"
sc = "s: NONE"
tc = "t: NONE"
uc = "u: NONE"
vc = "v: NONE"
wc = "w: NONE"
xc = "x: NONE"
yc = "y: NONE"
zc = "z: NONE"
special_c = "Special Characters: NONE"
# for each loop to add an asterisk to the count if the letter is found
for character in sentence:
char = character
if char == "a":
ac = ac.replace("NONE", "")
ac += "*"
elif char == "b":
bc = bc.replace("NONE", "")
bc += "*"
elif char == "c":
cc = cc.replace("NONE", "")
cc += "*"
elif char == "d":
dc = dc.replace("NONE", "")
dc += "*"
elif char == "e":
ec = ec.replace("NONE", "")
ec += "*"
elif char == "f":
fc = fc.replace("NONE", "")
fc += "*"
elif char == "g":
gc = gc.replace("NONE", "")
gc += "*"
elif char == "h":
hc = hc.replace("NONE", "")
hc += "*"
elif char == "i":
ic = ic.replace("NONE", "")
ic += "*"
elif char == "j":
jc = jc.replace("NONE", "")
jc += "*"
elif char == "k":
kc = kc.replace("NONE", "")
kc += "*"
elif char == "l":
lc = lc.replace("NONE", "")
lc += "*"
elif char == "m":
mc = mc.replace("NONE", "")
mc += "*"
elif char == "n":
nc = nc.replace("NONE", "")
nc += "*"
elif char == "o":
oc = oc.replace("NONE", "")
oc += "*"
elif char == "p":
pc = pc.replace("NONE", "")
pc += "*"
elif char == "q":
qc = qc.replace("NONE", "")
qc += "*"
elif char == "r":
rc = rc.replace("NONE", "")
rc += "*"
elif char == "s":
sc = sc.replace("NONE", "")
sc += "*"
elif char == "t":
tc = tc.replace("NONE", "")
tc += "*"
elif char == "u":
uc = uc.replace("NONE", "")
uc += "*"
elif char == "v":
vc = vc.replace("NONE", "")
vc += "*"
elif char == "w":
wc = wc.replace("NONE", "")
wc += "*"
elif char == "x":
xc = xc.replace("NONE", "")
xc += "*"
elif char == "y":
yc = yc.replace("NONE", "")
yc += "*"
elif char == "z":
zc = zc.replace("NONE", "")
zc += "*"
else:
special_c = special_c.replace("NONE", "")
special_c += "*"
print("Here is the character distribution:\n\n " + ac, "\n", bc, "\n", cc, "\n", dc, "\n", ec, "\n", fc, "\n", gc, "\n",
hc, "\n", ic, "\n", jc, "\n", kc, "\n", lc, "\n", mc, "\n", nc, "\n", oc, "\n", pc, "\n", qc, "\n", rc, "\n",
sc, "\n", tc, "\n", uc, "\n", vc, "\n", wc, "\n", xc, "\n", yc, "\n", zc, "\n", special_c)
|
python
|
import peewee as pw
from core.model.base import BaseModel
from playhouse.shortcuts import model_to_dict
class Activity(BaseModel):
name = pw.CharField(null=False)
url_image = pw.CharField(null=False)
def to_dict(self, recurse=False, backrefs=False):
return model_to_dict(self, recurse=recurse, backrefs=backrefs, exclude=[Activity.created_at, Activity.updated_at])
class Meta:
db_table = "activities"
|
python
|
# [M / F] while not strip upper
sexo = str(input('Digite seu sexo: [M/F] ')) .strip().upper()[0]
while sexo not in 'MmFf':
sexo = str(input('Dados inválidos. Por favor, informe corretamente: ')).strip().upper()[0]
print(sexo)
|
python
|
import json
import jsonschema
import os
import re
from urllib.request import urlopen, Request
show_descriptions = True # If False, don't include 'name' as the description of 'licenseId'
repo = 'https://github.com/spdx/license-list-data/tree/master/json'
files = ['licenses.json', 'exceptions.json']
outfile = 'spdx-license-enums'
"""
Fetch current SPDX license list from repo
"""
def github_contents(web_url):
"""
Convert a GitHub repo web page URL to the corresponding API directory URL
:param web_url: https://github.com/spdx/license-list-data/tree/master/json
:return: dir_url: https://api.github.com/repos/spdx/license-list-data/contents/json
"""
m = re.match(r'^(https://)(github.com/)(.*?)/tree/master/(.*)$', web_url)
if m:
return m.group(1) + 'api.' + m.group(2) + 'repos/' + m.group(3) + '/contents/' + m.group(4)
data = {}
auth = {'Authorization': 'token ' + os.environ['GitHubToken']}
with urlopen(Request(github_contents(repo), headers=auth)) as d:
dir = json.loads(d.read().decode())
for n, f in enumerate(dir):
if f['name'] in files:
with urlopen(Request(f['download_url'], headers=auth)) as file:
data[os.path.splitext(f['name'])[0]] = json.loads(file.read().decode())
"""
Validate license list files
"""
llversion = data['licenses']['licenseListVersion']
print(f'License List Version {llversion}, {data["licenses"]["releaseDate"]}')
assert llversion == data['exceptions']['licenseListVersion']
with open('license_list_source.json') as f:
jschema = json.load(f)
jsonschema.Draft7Validator(jschema).validate({'licenselist': data['licenses']})
jsonschema.Draft7Validator(jschema).validate({'exceptionlist': data['exceptions']})
"""
Generate license and exception enumerations
"""
def item(license, le, desc=True):
id = {'l': 'licenseId', 'e': 'licenseExceptionId'}
return [int(license['referenceNumber']), license[id[le]], license['name'].strip() if desc else '']
license_items = [item(k, 'l', show_descriptions) for k in data['licenses']['licenses']]
exception_items = [item(k, 'e', show_descriptions) for k in data['exceptions']['exceptions']]
le_schema = {
'meta': {
'module': 'http://spdx.org/license-list/v3.0',
'patch': llversion,
'description': f'SPDX License List Enumerations, Version {llversion}, Released {data["licenses"]["releaseDate"]}',
'exports': ["LicenseList", "ExceptionList"],
'config': {'$MaxElements': 1000} # Default is 100, 2020-07-21 license list has 441
},
'types': [
['LicenseList', 'Enumerated', [], '', license_items],
['ExceptionList', 'Enumerated', [], '', exception_items]
]
}
print(f'{len(license_items)} licenses, {len(exception_items)} exceptions')
fname = os.path.join('data', f'{outfile}-{llversion}.jadn')
with open(fname, 'w') as f:
json.dump(le_schema, f)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains implementation for type editor
"""
from __future__ import print_function, division, absolute_import
__author__ = "Tomas Poveda"
__license__ = "MIT"
__maintainer__ = "Tomas Poveda"
__email__ = "[email protected]"
import logging
from functools import partial
from Qt.QtCore import *
from Qt.QtWidgets import *
import tpDcc as tp
from tpDcc.libs.qt.core import base
from tpDcc.libs.qt.widgets import grid
import artellapipe
from artellapipe.tools.tagger.widgets import taggereditor
LOGGER = logging.getLogger()
class TypeEditor(taggereditor.TaggerEditor, object):
EDITOR_TYPE = 'Type'
def __init__(self, project, parent=None):
super(TypeEditor, self).__init__(project=project, parent=parent)
def ui(self):
super(TypeEditor, self).ui()
self._type_grid = grid.GridWidget()
self._type_grid.setShowGrid(False)
self._type_grid.setColumnCount(4)
self._type_grid.horizontalHeader().hide()
self._type_grid.verticalHeader().hide()
self._type_grid.resizeRowsToContents()
self._type_grid.resizeColumnsToContents()
self.main_layout.addWidget(self._type_grid)
def initialize(self):
self._type_grid.clear()
tag_types = artellapipe.TagsMgr().get_tag_types()
if not tag_types:
LOGGER.warning('No Tag Types defined in current project!')
return
for tag_type in tag_types:
tag_widget = TaggerTypeWidget(type_title=tag_type)
tag_widget._btn.toggled.connect(partial(self.update_data, tag_widget.get_name()))
self._type_grid.add_widget_first_empty_cell(tag_widget)
def reset(self):
"""
Function that resets all editor information
"""
try:
self._type_grid.blockSignals(True)
finally:
self._type_grid.blockSignals(False)
def update_tag_buttons_state(self, sel=None):
"""
Updates the type tag attribute of the tag data node
:param name: str, name of the type tag to add/remove
"""
tag_data_node = artellapipe.TagsMgr().get_tag_data_node_from_current_selection(sel)
if tag_data_node is None:
return
self.set_tag_widgets_state(False)
attr_exists = tp.Dcc.attribute_exists(node=tag_data_node, attribute_name='types')
if attr_exists:
types = tp.Dcc.get_attribute_value(node=tag_data_node, attribute_name='types')
if types is not None and types != '':
types = types.split()
for t in types:
for i in range(self._type_grid.columnCount()):
for j in range(self._type_grid.rowCount()):
container_w = self._type_grid.cellWidget(j, i)
if container_w is not None:
tag_w = container_w.containedWidget
tag_name = tag_w.get_name()
if tag_name == t:
tag_w._btn.blockSignals(True)
tag_w._btn.setChecked(True)
tag_w._btn.blockSignals(False)
def fill_tag_node(self, tag_data_node, *args, **kwargs):
"""
Fills given tag node with the data managed by this editor
:param tag_data_node: str
"""
attr_exists = tp.Dcc.attribute_exists(node=tag_data_node, attribute_name='types')
if not attr_exists:
tp.Dcc.add_string_attribute(node=tag_data_node, attribute_name='types')
data = kwargs.get('data', None)
types = tp.Dcc.get_attribute_value(node=tag_data_node, attribute_name='types')
if args and args[0]:
if types is None or types == '':
types = data
else:
types_split = types.split()
if data in types_split:
return
types_split.append(data)
types = ''.join(str(e) + ' ' for e in types_split)
tp.Dcc.unlock_attribute(node=tag_data_node, attribute_name='types')
tp.Dcc.set_string_attribute_value(node=tag_data_node, attribute_name='types', attribute_value=types)
tp.Dcc.lock_attribute(node=tag_data_node, attribute_name='types')
else:
if types is None or types == '':
return
types_split = types.split()
if data in types_split:
types_split.remove(data)
else:
return
types = ''.join(str(e) + ' ' for e in types_split)
tp.Dcc.unlock_attribute(node=tag_data_node, attribute_name='types')
tp.Dcc.set_string_attribute_value(node=tag_data_node, attribute_name='types', attribute_value=types)
tp.Dcc.lock_attribute(node=tag_data_node, attribute_name='types')
def set_tag_widgets_state(self, state=False):
"""
Disables/Enables all tag buttons on the grid layout
:param state: bool
"""
for i in range(self._type_grid.columnCount()):
for j in range(self._type_grid.rowCount()):
container_w = self._type_grid.cellWidget(j, i)
if container_w is not None:
tag_w = container_w.containedWidget
tag_w._btn.blockSignals(True)
tag_w._btn.setChecked(state)
tag_w._btn.blockSignals(False)
class TaggerTypeWidget(base.BaseWidget, object):
def __init__(self, type_title, parent=None):
self._type_title_name = type_title
self._type_name = type_title.replace(' ', '_').lower()
super(TaggerTypeWidget, self).__init__(parent=parent)
def ui(self):
super(TaggerTypeWidget, self).ui()
self._btn = QPushButton(self._type_title_name)
self._btn.setCheckable(True)
self.main_layout.addWidget(self._btn)
type_lbl = QLabel(self._type_title_name)
type_lbl.setAlignment(Qt.AlignCenter)
# main_layout.addWidget(type_lbl)
def get_name(self):
"""
Returns type name of the tagger widget
:return: str
"""
return self._type_name
|
python
|
import random
def n_list(n):
nl = [] #int list to be returned
#creating a list of integers from 1 to n
for i in xrange(1,n+1):
nl.extend([i])
#shuffle the list of integers into random order
#to the best ability of python prng
while n > 1:
choice = int(random.random()*n)
pick = nl.pop(choice)
nl.extend([pick])
n -= 1
return nl
|
python
|
"""
--- Day 18: Operation Order ---
As you look out the window and notice a heavily-forested continent slowly appear over the horizon, you are interrupted by the child sitting next to you. They're curious if you could help them with their math homework.
Unfortunately, it seems like this "math" follows different rules than you remember.
The homework (your puzzle input) consists of a series of expressions that consist of addition (+), multiplication (*), and parentheses ((...)). Just like normal math, parentheses indicate that the expression inside must be evaluated before it can be used by the surrounding expression. Addition still finds the sum of the numbers on both sides of the operator, and multiplication still finds the product.
However, the rules of operator precedence have changed. Rather than evaluating multiplication before addition, the operators have the same precedence, and are evaluated left-to-right regardless of the order in which they appear.
For example, the steps to evaluate the expression 1 + 2 * 3 + 4 * 5 + 6 are as follows:
1 + 2 * 3 + 4 * 5 + 6
3 * 3 + 4 * 5 + 6
9 + 4 * 5 + 6
13 * 5 + 6
65 + 6
71
Parentheses can override this order; for example, here is what happens if parentheses are added to form 1 + (2 * 3) + (4 * (5 + 6)):
1 + (2 * 3) + (4 * (5 + 6))
1 + 6 + (4 * (5 + 6))
7 + (4 * (5 + 6))
7 + (4 * 11 )
7 + 44
51
Here are a few more examples:
2 * 3 + (4 * 5) becomes 26.
5 + (8 * 3 + 9 + 3 * 4 * 3) becomes 437.
5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4)) becomes 12240.
((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2 becomes 13632.
Before you can help with the homework, you need to understand it yourself. Evaluate the expression on each line of the homework; what is the sum of the resulting values?
"""
import re
f = open("challenges\data\day18data.txt", "r")
def processData(file):
data = []
for x in f:
x=x.strip().replace('\n', '')
data.append(x)
return data
def evalEquationString(equation):
operations = []
stack = []
def apply_top_op():
if operations[-1] == '+':
stack[-2:] = [stack[-2] + stack[-1]]
elif operations[-1] == '*':
stack[-2:] = [stack[-2] * stack[-1]]
else:
raise Exception(f"Bad state; opstack {operations} stack {stack}")
operations.pop()
for m in re.finditer(r'([()])|(\d+)|([+*])', equation):
if m.group(1) == '(':
operations.append('(')
elif m.group(1) == ')':
while operations[-1] != '(':
apply_top_op()
operations.pop()
elif m.group(2):
stack.append(int(m.group(2)))
else:
while operations and operations[-1] != '(':
apply_top_op()
operations.append(m.group(3))
while operations:
apply_top_op()
assert len(stack) == 1, f"operations {operations} stack {stack}"
return stack[0]
def sumResultingValues(arr, evalMethod):
count = 0
for equationString in arr:
count += evalMethod(equationString)
return count
"""
--- Part Two ---
You manage to answer the child's questions and they finish part 1 of their homework, but get stuck when they reach the next section: advanced math.
Now, addition and multiplication have different precedence levels, but they're not the ones you're familiar with. Instead, addition is evaluated before multiplication.
For example, the steps to evaluate the expression 1 + 2 * 3 + 4 * 5 + 6 are now as follows:
1 + 2 * 3 + 4 * 5 + 6
3 * 3 + 4 * 5 + 6
3 * 7 * 5 + 6
3 * 7 * 11
21 * 11
231
Here are the other examples from above:
1 + (2 * 3) + (4 * (5 + 6)) still becomes 51.
2 * 3 + (4 * 5) becomes 46.
5 + (8 * 3 + 9 + 3 * 4 * 3) becomes 1445.
5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4)) becomes 669060.
((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2 becomes 23340.
What do you get if you add up the results of evaluating the homework problems using these new rules?
"""
def evalAdvEquationString(equation):
operations = []
stack = []
def apply_top_op():
if operations[-1] == '+':
stack[-2:] = [stack[-2] + stack[-1]]
elif operations[-1] == '*':
stack[-2:] = [stack[-2] * stack[-1]]
else:
raise Exception(f"Bad state; opstack {operations} stack {stack}")
operations.pop()
for m in re.finditer(r'([()])|(\d+)|([+*])', equation):
if m.group(1) == '(':
operations.append('(')
elif m.group(1) == ')':
while operations[-1] != '(':
apply_top_op()
operations.pop()
elif m.group(2):
stack.append(int(m.group(2)))
else:
while operations and m.group(3) == '*' and operations[-1] == '+':
apply_top_op()
operations.append(m.group(3))
while operations:
apply_top_op()
assert len(stack) == 1, f"operations {operations} stack {stack}"
return stack[0]
data = processData(f)
print(sumResultingValues(data, evalEquationString))
print(sumResultingValues(data, evalAdvEquationString))
|
python
|
"""Exceptions raised by the s3control service."""
from moto.core.exceptions import RESTError
ERROR_WITH_ACCESS_POINT_NAME = """{% extends 'wrapped_single_error' %}
{% block extra %}<AccessPointName>{{ name }}</AccessPointName>{% endblock %}
"""
ERROR_WITH_ACCESS_POINT_POLICY = """{% extends 'wrapped_single_error' %}
{% block extra %}<AccessPointName>{{ name }}</AccessPointName>{% endblock %}
"""
class S3ControlError(RESTError):
def __init__(self, *args, **kwargs):
kwargs.setdefault("template", "single_error")
super().__init__(*args, **kwargs)
class AccessPointNotFound(S3ControlError):
code = 404
def __init__(self, name, **kwargs):
kwargs.setdefault("template", "ap_not_found")
kwargs["name"] = name
self.templates["ap_not_found"] = ERROR_WITH_ACCESS_POINT_NAME
super().__init__(
"NoSuchAccessPoint", "The specified accesspoint does not exist", **kwargs
)
class AccessPointPolicyNotFound(S3ControlError):
code = 404
def __init__(self, name, **kwargs):
kwargs.setdefault("template", "apf_not_found")
kwargs["name"] = name
self.templates["apf_not_found"] = ERROR_WITH_ACCESS_POINT_POLICY
super().__init__(
"NoSuchAccessPointPolicy",
"The specified accesspoint policy does not exist",
**kwargs
)
|
python
|
# encoding: utf-8
from __future__ import unicode_literals
import os
from django.test import TestCase
from data_importer.core.descriptor import ReadDescriptor
from data_importer.core.descriptor import InvalidDescriptor
from data_importer.core.descriptor import InvalidModel
from data_importer.importers.base import BaseImporter
BASEDIR = os.path.dirname(__file__)
JSON_FILE = os.path.abspath(os.path.join(BASEDIR, 'data/test_json_descriptor.json'))
class ReadDescriptorTestCase(TestCase):
def setUp(self):
self.descriptor = ReadDescriptor(file_name=JSON_FILE, model_name='Contact')
def test_readed_file(self):
self.assertTrue(self.descriptor.source)
def test_get_fields(self):
self.assertEquals(self.descriptor.get_fields(), ["name", "year", "last"])
def test_invalid_model(self):
descriptor = ReadDescriptor(file_name=JSON_FILE, model_name='TestInvalidModel')
self.assertRaises(InvalidModel, lambda: descriptor.get_model())
def test_invalid_file(self):
self.assertRaises(InvalidDescriptor, lambda: ReadDescriptor(file_name='invalid_file.er',
model_name='TestInvalidModel'))
class MyBaseImport(BaseImporter):
class Meta:
delimiter = ';'
ignore_first_line = True
descriptor = JSON_FILE
descriptor_model = "Contact"
def set_reader(self):
return
class TestDescriptionUsingBaseImporter(TestCase):
def setUp(self):
self.importer = MyBaseImport(source=None)
def test_get_fields(self):
self.assertEquals(self.importer.fields, ['name', 'year', 'last'])
|
python
|
#---------------------------------------------
# Set up Trick executive parameters.
#---------------------------------------------
#instruments.echo_jobs.echo_jobs_on()
trick.exec_set_trap_sigfpe(True)
#trick.checkpoint_pre_init(1)
trick.checkpoint_post_init(1)
#trick.add_read(0.0 , '''trick.checkpoint('chkpnt_point')''')
# NOTE: You must set this to be the same as the master federate's frame for IMSim freezing
trick.exec_set_software_frame(0.25)
trick.exec_set_stack_trace(False)
# Trick config
trick.exec_set_enable_freeze(False)
trick.exec_set_freeze_command(False)
trick.sim_control_panel_set_enabled(False)
trick.exec_set_stack_trace(False)
#trick.exec_set_thread_amf_cycle_time( 1 , 0.250 )
#trick.exec_set_thread_process_type( 1 , trick.PROCESS_TYPE_AMF_CHILD )
run_duration = 15.0
#---------------------------------------------
# Set up data to record.
#---------------------------------------------
exec(open( "Log_data/log_sine_states.py" ).read())
log_sine_states( 'A', 0.250 )
log_sine_states( 'P', 0.250 )
#---------------------------------------------
# Set up the initial Sine states
#---------------------------------------------
exec(open( "Modified_data/sine_init.py" ).read())
#TODO: setup integration
#P.integ.option = Runge_Kutta_Fehlberg_45
#P.integ.first_step_deriv = Yes
# Example of a 1-dimensional dynamic array.
A.packing.buff_size = 10
A.packing.buff = trick.sim_services.alloc_type( A.packing.buff_size, 'unsigned char' )
P.packing.buff_size = 10
P.packing.buff = trick.sim_services.alloc_type( P.packing.buff_size, 'unsigned char' )
# We are taking advantage of the input file to specify a unique name for the
# sim-data name field for the P-side federate.
A.sim_data.name = 'A.name.P-side'
P.sim_data.name = 'P.name.P-side'
# We are taking advantage of the input file to specify a unique name and
# message for the P-side federate interaction handler.
A.interaction_handler.name = 'P-side: A.interaction_hdlr.name'
P.interaction_handler.name = 'P-side: P.interaction_hdlr.name'
A.interaction_handler.message = 'P-side: A.interaction_hdlr.message'
P.interaction_handler.message = 'P-side: P.interaction_hdlr.message'
# =========================================================================
# Set up HLA interoperability.
# =========================================================================
# Show or hide the TrickHLA debug messages.
# Use Level-3 to show the ownership transfer debug messages.
THLA.federate.debug_level = trick.DEBUG_LEVEL_6_TRACE
# Configure the CRC.
# Pitch specific local settings designator:
THLA.federate.local_settings = 'crcHost = localhost\n crcPort = 8989'
# Mak specific local settings designator, which is anything from the rid.mtl file:
#THLA.federate.local_settings = '(setqb RTI_tcpForwarderAddr \'192.168.15.3\') (setqb RTI_distributedForwarderPort 5000)'
THLA.federate.lookahead_time = 0.250
# Configure the federate.
THLA.federate.name = 'P-side-Federate'
THLA.federate.FOM_modules = 'FOMs/S_FOMfile.xml,FOMs/TrickHLAFreezeInteraction.xml'
THLA.federate.federation_name = 'SineWaveSim'
THLA.federate.time_regulating = True
THLA.federate.time_constrained = True
# Configure ExecutionControl.
# Set the multiphase initialization synchronization points.
THLA.execution_control.multiphase_init_sync_points = 'Phase1, Phase2'
# Set the simulation timeline to be used for time computations.
THLA.execution_control.sim_timeline = THLA_INIT.sim_timeline
# Set the scenario timeline to be used for configuring federation freeze times.
THLA.execution_control.scenario_timeline = THLA_INIT.scenario_timeline
# The list of Federates known to be in our Federation. The simulation will
# wait for all Federates marked as required to join the Federation before
# continuing on.
THLA.federate.enable_known_feds = True
THLA.federate.known_feds_count = 2
THLA.federate.known_feds = trick.sim_services.alloc_type( THLA.federate.known_feds_count, 'TrickHLA::KnownFederate' )
THLA.federate.known_feds[0].name = 'A-side-Federate'
THLA.federate.known_feds[0].required = True
THLA.federate.known_feds[1].name = 'P-side-Federate'
THLA.federate.known_feds[1].required = True
#---------------------------------------------
# Set up for simulation configuration.
#---------------------------------------------
THLA.simple_sim_config.owner = 'P-side-Federate'
THLA.simple_sim_config.run_duration = run_duration
# TrickHLA Interactions and Parameters.
THLA.manager.inter_count = 1
THLA.manager.interactions = trick.alloc_type( THLA.manager.inter_count, 'TrickHLA::Interaction' )
THLA.manager.interactions[0].FOM_name = 'Communication'
THLA.manager.interactions[0].publish = False
THLA.manager.interactions[0].subscribe = True
THLA.manager.interactions[0].handler = P.interaction_handler
THLA.manager.interactions[0].param_count = 3
THLA.manager.interactions[0].parameters = trick.sim_services.alloc_type( THLA.manager.interactions[0].param_count, 'TrickHLA::Parameter' )
THLA.manager.interactions[0].parameters[0].FOM_name = 'Message'
THLA.manager.interactions[0].parameters[0].trick_name = 'P.interaction_handler.message'
THLA.manager.interactions[0].parameters[0].rti_encoding = trick.ENCODING_UNICODE_STRING
THLA.manager.interactions[0].parameters[1].FOM_name = 'time'
THLA.manager.interactions[0].parameters[1].trick_name = 'P.interaction_handler.time'
THLA.manager.interactions[0].parameters[1].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.interactions[0].parameters[2].FOM_name = 'year'
THLA.manager.interactions[0].parameters[2].trick_name = 'P.interaction_handler.year'
THLA.manager.interactions[0].parameters[2].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
# The Federate has two objects, it publishes one and subscribes to another.
THLA.manager.obj_count = 2
THLA.manager.objects = trick.sim_services.alloc_type( THLA.manager.obj_count, 'TrickHLA::Object' )
# Configure the object this federate subscribes to but will not create an
# HLA instance for.
THLA.manager.objects[0].FOM_name = 'Test'
THLA.manager.objects[0].name = 'A-side-Federate.Test'
THLA.manager.objects[0].create_HLA_instance = False
THLA.manager.objects[0].packing = A.packing
THLA.manager.objects[0].deleted = A.obj_deleted_callback
THLA.manager.objects[0].attr_count = 8
THLA.manager.objects[0].attributes = trick.sim_services.alloc_type( THLA.manager.objects[0].attr_count, 'TrickHLA::Attribute' )
THLA.manager.objects[0].attributes[0].FOM_name = 'Time'
THLA.manager.objects[0].attributes[0].trick_name = 'A.sim_data.time'
THLA.manager.objects[0].attributes[0].config = trick.CONFIG_CYCLIC
THLA.manager.objects[0].attributes[0].publish = True
THLA.manager.objects[0].attributes[0].subscribe = True
THLA.manager.objects[0].attributes[0].locally_owned = False
THLA.manager.objects[0].attributes[0].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.objects[0].attributes[1].FOM_name = 'Value'
THLA.manager.objects[0].attributes[1].trick_name = 'A.sim_data.value'
THLA.manager.objects[0].attributes[1].config = trick.CONFIG_INITIALIZE + trick.CONFIG_CYCLIC
THLA.manager.objects[0].attributes[1].publish = True
THLA.manager.objects[0].attributes[1].subscribe = True
THLA.manager.objects[0].attributes[1].locally_owned = False
THLA.manager.objects[0].attributes[1].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.objects[0].attributes[2].FOM_name = 'dvdt'
THLA.manager.objects[0].attributes[2].trick_name = 'A.sim_data.dvdt'
THLA.manager.objects[0].attributes[2].config = trick.CONFIG_CYCLIC
THLA.manager.objects[0].attributes[2].publish = True
THLA.manager.objects[0].attributes[2].subscribe = True
THLA.manager.objects[0].attributes[2].locally_owned = False
THLA.manager.objects[0].attributes[2].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.objects[0].attributes[3].FOM_name = 'Phase'
THLA.manager.objects[0].attributes[3].trick_name = 'A.packing.phase_deg' # using packed data instead of 'A.sim_data.phase'
THLA.manager.objects[0].attributes[3].config = trick.CONFIG_CYCLIC
THLA.manager.objects[0].attributes[3].publish = True
THLA.manager.objects[0].attributes[3].subscribe = True
THLA.manager.objects[0].attributes[3].locally_owned = False
THLA.manager.objects[0].attributes[3].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.objects[0].attributes[4].FOM_name = 'Frequency'
THLA.manager.objects[0].attributes[4].trick_name = 'A.sim_data.freq'
THLA.manager.objects[0].attributes[4].config = trick.CONFIG_CYCLIC
THLA.manager.objects[0].attributes[4].publish = True
THLA.manager.objects[0].attributes[4].subscribe = True
THLA.manager.objects[0].attributes[4].locally_owned = False
THLA.manager.objects[0].attributes[4].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.objects[0].attributes[5].FOM_name = 'Amplitude'
THLA.manager.objects[0].attributes[5].trick_name = 'A.sim_data.amp'
THLA.manager.objects[0].attributes[5].config = trick.CONFIG_CYCLIC
THLA.manager.objects[0].attributes[5].publish = True
THLA.manager.objects[0].attributes[5].subscribe = True
THLA.manager.objects[0].attributes[5].locally_owned = False
THLA.manager.objects[0].attributes[5].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.objects[0].attributes[6].FOM_name = 'Tolerance'
THLA.manager.objects[0].attributes[6].trick_name = 'A.sim_data.tol'
THLA.manager.objects[0].attributes[6].config = trick.CONFIG_CYCLIC
THLA.manager.objects[0].attributes[6].publish = True
THLA.manager.objects[0].attributes[6].subscribe = True
THLA.manager.objects[0].attributes[6].locally_owned = False
THLA.manager.objects[0].attributes[6].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.objects[0].attributes[7].FOM_name = 'Name'
THLA.manager.objects[0].attributes[7].trick_name = 'A.sim_data.name'
THLA.manager.objects[0].attributes[7].config = trick.CONFIG_INITIALIZE + trick.CONFIG_CYCLIC
THLA.manager.objects[0].attributes[7].publish = True
THLA.manager.objects[0].attributes[7].subscribe = True
THLA.manager.objects[0].attributes[7].locally_owned = False
THLA.manager.objects[0].attributes[7].rti_encoding = trick.ENCODING_UNICODE_STRING
# Configure the object this federate will create an HLA instance and
# publish data for.
THLA.manager.objects[1].FOM_name = 'Test'
THLA.manager.objects[1].name = 'P-side-Federate.Test'
THLA.manager.objects[1].create_HLA_instance = True
THLA.manager.objects[1].packing = P.packing
THLA.manager.objects[1].deleted = P.obj_deleted_callback
THLA.manager.objects[1].attr_count = 8
THLA.manager.objects[1].attributes = trick.sim_services.alloc_type( THLA.manager.objects[1].attr_count, 'TrickHLA::Attribute' )
THLA.manager.objects[1].attributes[0].FOM_name = 'Time'
THLA.manager.objects[1].attributes[0].trick_name = 'P.sim_data.time'
THLA.manager.objects[1].attributes[0].config = trick.CONFIG_CYCLIC
THLA.manager.objects[1].attributes[0].publish = True
THLA.manager.objects[1].attributes[0].locally_owned = True
THLA.manager.objects[1].attributes[0].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.objects[1].attributes[1].FOM_name = 'Value'
THLA.manager.objects[1].attributes[1].trick_name = 'P.sim_data.value'
THLA.manager.objects[1].attributes[1].config = trick.CONFIG_INITIALIZE + trick.CONFIG_CYCLIC
THLA.manager.objects[1].attributes[1].publish = True
THLA.manager.objects[1].attributes[1].subscribe = True
THLA.manager.objects[1].attributes[1].locally_owned = True
THLA.manager.objects[1].attributes[1].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.objects[1].attributes[2].FOM_name = 'dvdt'
THLA.manager.objects[1].attributes[2].trick_name = 'P.sim_data.dvdt'
THLA.manager.objects[1].attributes[2].config = trick.CONFIG_CYCLIC
THLA.manager.objects[1].attributes[2].publish = True
THLA.manager.objects[1].attributes[2].locally_owned = True
THLA.manager.objects[1].attributes[2].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.objects[1].attributes[3].FOM_name = 'Phase'
THLA.manager.objects[1].attributes[3].trick_name = 'P.packing.phase_deg' # using packed data instead of 'P.sim_data.phase'
THLA.manager.objects[1].attributes[3].config = trick.CONFIG_CYCLIC
THLA.manager.objects[1].attributes[3].publish = True
THLA.manager.objects[1].attributes[3].locally_owned = True
THLA.manager.objects[1].attributes[3].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.objects[1].attributes[4].FOM_name = 'Frequency'
THLA.manager.objects[1].attributes[4].trick_name = 'P.sim_data.freq'
THLA.manager.objects[1].attributes[4].config = trick.CONFIG_CYCLIC
THLA.manager.objects[1].attributes[4].publish = True
THLA.manager.objects[1].attributes[4].locally_owned = True
THLA.manager.objects[1].attributes[4].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.objects[1].attributes[5].FOM_name = 'Amplitude'
THLA.manager.objects[1].attributes[5].trick_name = 'P.sim_data.amp'
THLA.manager.objects[1].attributes[5].config = trick.CONFIG_CYCLIC
THLA.manager.objects[1].attributes[5].publish = True
THLA.manager.objects[1].attributes[5].locally_owned = True
THLA.manager.objects[1].attributes[5].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.objects[1].attributes[6].FOM_name = 'Tolerance'
THLA.manager.objects[1].attributes[6].trick_name = 'P.sim_data.tol'
THLA.manager.objects[1].attributes[6].config = trick.CONFIG_CYCLIC
THLA.manager.objects[1].attributes[6].publish = True
THLA.manager.objects[1].attributes[6].locally_owned = True
THLA.manager.objects[1].attributes[6].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.objects[1].attributes[7].FOM_name = 'Name'
THLA.manager.objects[1].attributes[7].trick_name = 'P.sim_data.name'
THLA.manager.objects[1].attributes[7].config = trick.CONFIG_INITIALIZE + trick.CONFIG_CYCLIC
THLA.manager.objects[1].attributes[7].publish = True
THLA.manager.objects[1].attributes[7].locally_owned = True
THLA.manager.objects[1].attributes[7].rti_encoding = trick.ENCODING_UNICODE_STRING
#---------------------------------------------
# Set up simulation termination time.
#---------------------------------------------
trick.sim_services.exec_set_terminate_time( run_duration )
|
python
|
import hglib
import os
__all__ = ['HGState']
class HGState(object):
def __init__(self, path):
self.client, self.hg_root_path = self.find_hg_root(path)
def find_hg_root(self, path):
input_path = path
found_root = False
while not found_root:
try:
client = hglib.open(path)
found_root = True
except hglib.error.ServerError:
ppath = os.path.abspath(os.path.join(path, os.pardir))
if ppath == path:
raise ValueError('No hg repo at %s' % input_path)
path = ppath
return client, os.path.abspath(path)
@property
def has_addmodr(self):
return '+' in self.client.identify()
@property
def has_untracked(self):
return len(self.client.status()) > 0
@property
def is_clean(self):
return not self.has_untracked and not self.has_addmodr
@property
def id_str(self):
return '-'.join([self.client.branch()] + self.client.identify().split())
def get_state(self):
return {'version': self.client.identify(),
'status':self.client.status(),
'branch':self.client.branch()}
|
python
|
def isPalindrome(word):
for i in range(len(word)//2):
if word[i]!=word[-(i+1)]:
return False
return True
for t in range(10):
N=int(input())
L=[]
for i in range(8):
L.append(input())
ans=0
for i in range(8):
for j in range(9-N):
if isPalindrome(L[i][j:j+N])==True:
ans+=1
for j in range(8):
for i in range(9-N):
if isPalindrome([a[j] for a in L[i:i+N]])==True:
ans+=1
print(f"#{t+1} {ans}")
|
python
|
"""
pyStatic_problem
"""
# =============================================================================
# Imports
# =============================================================================
import warnings
import os
import numpy as np
from collections import OrderedDict
import time
from .base import TACSProblem
import tacs.TACS
class StaticProblem(TACSProblem):
def __init__(self, name, assembler, comm, outputViewer=None, meshLoader=None, options={}):
"""
The main purpose of this class is to represent all relevant
information for a static analysis. This will include
information defining the loading condition as well as various
other pieces of information.
Parameters
----------
name : str
Name of this tacs problem
assembler : assembler
Cython object responsible for creating and setting tacs objects used to solve problem
comm : MPI Intracomm
The comm object on which to create the pyTACS object.
outputViewer : TACSToFH5 object
Cython object used to write out f5 files that can be converted and used for postprocessing.
meshLoader : pyMeshLoader object
pyMeshLoader object used to create the assembler.
options : dict
Dictionary holding problem-specific option parameters.
"""
# python object name
self.objectName = 'StaticProblem'
# Problem name
self.name = name
# Defualt setup for common problem class objects
super().__init__(assembler, comm, outputViewer, meshLoader)
# Default Option List
defOpts = {
'outputdir': [str, './'],
# Solution Options
'KSMSolver': [str, 'GMRES'],
'orderingType': [str, 'ND'],
'PCFillLevel': [int, 1000],
'PCFillRatio': [float, 20.0],
'subSpaceSize': [int, 10],
'nRestarts': [int, 15],
'flexible': [int, 1],
'L2Convergence': [float, 1e-12],
'L2ConvergenceRel': [float, 1e-12],
'useMonitor': [bool, False],
'monitorFrequency': [int, 10],
'resNormUB': [float, 1e20],
# Output Options
'writeSolution': [bool, True],
'numberSolutions': [bool, True],
'printTiming': [bool, False],
'printIterations': [bool, False],
}
# Process the default options which are added to self.options
# under the 'defaults' key. Make sure the key are lower case
self.options = {}
def_keys = defOpts.keys()
self.options['defaults'] = {}
for key in def_keys:
self.options['defaults'][key.lower()] = defOpts[key]
self.options[key.lower()] = defOpts[key]
# Set user-defined options
for key in options:
self.setOption(key, options[key])
# Linear solver factor flag
self._factorOnNext = True
# Create problem-specific variables
self._createVariables()
def _createVariables(self):
"""Internal to create the variable required by TACS"""
# Generic residual vector
self.res = self.assembler.createVec()
self.rhs = self.assembler.createVec()
# Dictionaries to hold adjoint/sens vectors for each evalFunc
self.adjointList = OrderedDict()
self.dIduList = OrderedDict()
self.dvSensList = OrderedDict()
self.xptSensList = OrderedDict()
# Temporary vector for adjoint solve
self.phi = self.assembler.createVec()
self.adjRHS = self.assembler.createVec()
# Load vector
self.F = self.assembler.createVec()
self.F_array = self.F.getArray()
# State variable vector
self.u = self.assembler.createVec()
self.u_array = self.u.getArray()
# Auxillary element object for applying tractions/pressure
self.auxElems = tacs.TACS.AuxElements()
self.callCounter = -1
# Norms
self.initNorm = 0.0
self.startNorm = 0.0
self.finalNorm = 0.0
opt = self.getOption
# Tangent Stiffness --- process the ordering option here:
tmp = opt('orderingType').lower()
if tmp == 'natural':
ordering = tacs.TACS.NATURAL_ORDER
elif tmp == 'nd':
ordering = tacs.TACS.ND_ORDER
elif tmp == 'rcm':
ordering = tacs.TACS.RCM_ORDER
elif tmp == 'tacs_amd':
ordering = tacs.TACS.TACS_AMD_ORDER
elif tmp == 'multicolor':
ordering = tacs.TACS.MULTICOLOR_ORDER
else:
raise self.TACSError("Unrecognized 'orderingType' option value: "
"'%s'. Valid values are: 'natural', 'nd', 'rcm', "
"'tacs_amd', or 'multicolor'." % tmp)
self.K = self.assembler.createSchurMat(ordering)
# Additional Vecs for updates
self.update = self.assembler.createVec()
# Setup PCScMat and KSM solver
self.alpha = 1.0
self.beta = 0.0
self.gamma = 0.0
self.assembler.assembleJacobian(self.alpha, self.beta, self.gamma, self.res, self.K)
reorderSchur = 1
self.PC = tacs.TACS.Pc(self.K, lev_fill=opt('PCFillLevel'),
ratio_fill=opt('PCFillRatio'), reorder=reorderSchur)
# Operator, fill level, fill ratio, msub, rtol, ataol
if opt('KSMSolver').upper() == 'GMRES':
self.KSM = tacs.TACS.KSM(
self.K, self.PC, opt('subSpaceSize'), opt('nRestarts'),
opt('flexible'))
# TODO: Fix this
# elif opt('KSMSolver').upper() == 'GCROT':
# self.KSM = tacs.TACS.GCROT(
# self.K, self.PC, opt('subSpaceSize'), opt('subSpaceSize'),
# opt('nRestarts'), opt('flexible'))
else:
raise self.TACSError("Unknown KSMSolver option. Valid options are "
"'GMRES' or 'GCROT'")
self.KSM.setTolerances(self.getOption('L2ConvergenceRel'),
self.getOption('L2Convergence'))
if opt('useMonitor'):
self.KSM.setMonitor(tacs.TACS.KSMPrintStdout(
opt('KSMSolver'), self.comm.rank, opt('monitorFrequency')))
def addFunction(self, funcName, funcHandle, compIDs=None, **kwargs):
"""
Generic function to add a function for TACS. It is intended to
be reasonably generic since the user supplies the actual
function handle to use. The following functions can be used:
KSFailure, KSTemperature, AverageTemperature, Compliance,
KSDisplacement, StructuralMass, HeatFlux.
Parameters
----------
funcName : str
The user-supplied name for the function. This will
typically be a string that is meanful to the user
funcHandle : tacs.functions
The fucntion handle to use for creation. This must come
from the functions module in tacs.
compIDs: list
List of compIDs to select. Alternative to selectCompIDs
arguments.
"""
success = super().addFunction(funcName, funcHandle, compIDs, **kwargs)
if success:
# Create additional tacs BVecs to hold adjoint and sens info
self.adjointList[funcName] = self.assembler.createVec()
self.dIduList[funcName] = self.assembler.createVec()
self.dvSensList[funcName] = self.assembler.createDesignVec()
self.xptSensList[funcName] = self.assembler.createNodeVec()
return success
def setDesignVars(self, x):
"""
Update the design variables used by tacs.
Parameters
----------
x : ndarray
The variables (typically from the optimizer) to set. It
looks for variable in the ``self.varName`` attribute.
"""
super().setDesignVars(x)
self._factorOnNext = True
def setNodes(self, coords):
"""
Set the mesh coordinates of the structure.
Parameters
----------
coords : ndarray
Structural coordinate in array of size (N * 3) where N is
the number of structural nodes on this processor.
"""
super().setNodes(coords)
self._factorOnNext = True
####### Load adding methods ########
def addLoadToComponents(self, compIDs, F, averageLoad=False):
""""
The function is used to add a *FIXED TOTAL LOAD* on one or more
components, defined by COMPIDs. The purpose of this routine is to add loads that
remain fixed throughout an optimization. An example would be an engine load.
This routine determines all the unqiue nodes in the FE model that are part of the
the requested components, then takes the total 'force' by F and divides by the
number of nodes. This average load is then applied to the nodes.
Parameters
----------
compIDs : list[int] or int
The components with added loads. Use pyTACS selectCompIDs method
to determine this.
F : Numpy 1d or 2d array length (varsPerNodes) or (numNodeIDs, varsPerNodes)
Vector(s) of 'force' to apply to each components. If only one force vector is provided,
force will be copied uniformly across all components.
averageLoad : bool
Flag to determine whether load should be split evenly across all components (True)
or copied and applied individually to each component (False). Defaults to False.
Notes
----------
The units of the entries of the 'force' vector F are not
necesarily physical forces and their interpretation depends
on the physics problem being solved and the dofs included
in the model.
A couple of examples of force vector components for common problem are listed below:
In Heat Conduction with varsPerNode = 1
F = [Qdot] # heat rate
In Elasticity with varsPerNode = 3,
F = [fx, fy, fz] # forces
In Elasticity with varsPerNode = 6,
F = [fx, fy, fz, mx, my, mz] # forces + moments
In Thermoelasticity with varsPerNode = 4,
F = [fx, fy, fz, Qdot] # forces + heat rate
In Thermoelasticity with varsPerNode = 7,
F = [fx, fy, fz, mx, my, mz, Qdot] # forces + moments + heat rate
"""
self._addLoadToComponents(self.F, compIDs, F, averageLoad)
def addLoadToNodes(self, nodeIDs, F, nastranOrdering=False):
"""
The function is used to add a fixed point load of F to the
selected node IDs.
Parameters
----------
nodeIDs : list[int]
The nodes IDs with added loads.
F : Numpy 1d or 2d array length (varsPerNodes) or (numNodeIDs, varsPerNodes)
Array of force vectors, one for each node. If only one force vector is provided,
force will be copied uniformly across all nodes.
nastranOrdering : bool
Flag signaling whether nodeIDs are in TACS (default)
or NASTRAN (grid IDs in bdf file) ordering
Notes
----------
The units of the entries of the 'force' vector F are not
necesarily physical forces and their interpretation depends
on the physics problem being solved and the dofs included
in the model.
A couple of examples of force vector components for common problem are listed below:
In Heat Conduction with varsPerNode = 1
F = [Qdot] # heat rate
In Elasticity with varsPerNode = 3,
F = [fx, fy, fz] # forces
In Elasticity with varsPerNode = 6,
F = [fx, fy, fz, mx, my, mz] # forces + moments
In Thermoelasticity with varsPerNode = 4,
F = [fx, fy, fz, Qdot] # forces + heat rate
In Thermoelasticity with varsPerNode = 7,
F = [fx, fy, fz, mx, my, mz, Qdot] # forces + moments + heat rate
"""
self._addLoadToNodes(self.F, nodeIDs, F, nastranOrdering)
def addLoadToRHS(self, Fapplied):
""""
The function is used to add a *FIXED TOTAL LOAD* directly to the
right hand side vector given the equation below:
K*u = f
Where:
K : Stiffness matrix for problem
u : State variables for problem
f : Right-hand side vector to add loads to
Parameters
----------
Fapplied : ndarray or BVec
Distributed array containing loads to applied to RHS of the problem.
"""
self._addLoadToRHS(self.F, Fapplied)
def addTractionToComponents(self, compIDs, tractions,
faceIndex=0):
"""
The function is used to add a *FIXED TOTAL TRACTION* on one or more
components, defined by COMPIDs. The purpose of this routine is
to add loads that remain fixed throughout an optimization.
Parameters
----------
compIDs : list[int] or int
The components with added loads. Use pyTACS selectCompIDs method
to determine this.
tractions : Numpy array length 1 or compIDs
Array of traction vectors for each components
faceIndex : int
Indicates which face (side) of element to apply traction to.
Note: not required for certain elements (i.e. shells)
"""
self._addTractionToComponents(self.auxElems, compIDs, tractions, faceIndex)
def addTractionToElements(self, elemIDs, tractions,
faceIndex=0, nastranOrdering=False):
"""
The function is used to add a fixed traction to the
selected element IDs. Tractions can be specified on an
element by element basis (if tractions is a 2d array) or
set to a uniform value (if tractions is a 1d array)
Parameters
----------
elemIDs : list[int]
The global element ID numbers for which to apply the traction.
tractions : Numpy 1d or 2d array length varsPerNodes or (elemIDs, varsPerNodes)
Array of traction vectors for each element
faceIndex : int
Indicates which face (side) of element to apply traction to.
Note: not required for certain elements (i.e. shells)
nastranOrdering : bool
Flag signaling whether elemIDs are in TACS (default)
or NASTRAN ordering
"""
self._addTractionToElements(self.auxElems, elemIDs, tractions, faceIndex, nastranOrdering)
def addPressureToComponents(self, compIDs, pressures,
faceIndex=0):
"""
The function is used to add a *FIXED TOTAL PRESSURE* on one or more
components, defined by COMPIds. The purpose of this routine is
to add loads that remain fixed throughout an optimization. An example
would be a fuel load.
Parameters
----------
compIDs : list[int] or int
The components with added loads. Use pyTACS selectCompIDs method
to determine this.
pressures : Numpy array length 1 or compIDs
Array of pressure values for each components
faceIndex : int
Indicates which face (side) of element to apply pressure to.
Note: not required for certain elements (i.e. shells)
"""
self._addPressureToComponents(self.auxElems, compIDs, pressures, faceIndex)
def addPressureToElements(self, elemIDs, pressures,
faceIndex=0, nastranOrdering=False):
"""
The function is used to add a fixed presure to the
selected element IDs. Pressures can be specified on an
element by element basis (if pressures is an array) or
set to a uniform value (if pressures is a scalar)
Parameters
----------
elemIDs : list[int]
The global element ID numbers for which to apply the pressure.
pressures : Numpy array length 1 or elemIDs
Array of pressure values for each element
faceIndex : int
Indicates which face (side) of element to apply pressure to.
Note: not required for certain elements (i.e. shells)
nastranOrdering : bool
Flag signaling whether elemIDs are in TACS (default)
or NASTRAN ordering
"""
self._addPressureToElements(self.auxElems, elemIDs, pressures,
faceIndex, nastranOrdering)
####### Static solver methods ########
def _updateAssemblerVars(self):
"""
Make sure that the assembler is using
the input variables associated with this problem
"""
self.assembler.setDesignVars(self.x)
self.assembler.setNodes(self.Xpts)
self.assembler.setAuxElements(self.auxElems)
# Set state variables
self.assembler.setVariables(self.u)
# Zero any time derivitive terms
self.assembler.zeroDotVariables()
self.assembler.zeroDDotVariables()
def _initializeSolve(self):
"""
Initialze the solution of the structural system for the
loadCase. The stiffness matrix is assembled and factored.
"""
if self._factorOnNext:
self.assembler.assembleJacobian(self.alpha, self.beta, self.gamma, self.res, self.K)
self.PC.factor()
self._factorOnNext = False
def solve(self, Fext=None):
"""
Solution of the static problem for current load set. The
forces must already be set.
Parameters
----------
Optional Arguments:
Fext : ndarray or BVec
Distributed array containing additional loads (ex. aerodynamic forces for aerostructural coupling)
to applied to RHS of the static problem.
"""
startTime = time.time()
self.callCounter += 1
setupProblemTime = time.time()
# Set problem vars to assembler
self._updateAssemblerVars()
# Check if we need to initialize
self._initializeSolve()
initSolveTime = time.time()
# Compute the RHS
self.assembler.assembleRes(self.res)
# Add force terms from rhs
self.rhs.copyValues(self.F) # Fixed loads
# Add external loads, if specified
if Fext is not None:
if isinstance(Fext, tacs.TACS.Vec):
self.rhs.axpy(1.0, Fext)
elif isinstance(Fext, np.ndarray):
rhsArray = self.rhs.getArray()
rhsArray[:] = rhsArray[:] + Fext[:]
# Zero out bc terms in rhs
self.assembler.applyBCs(self.rhs)
# Add the -F
self.res.axpy(-1.0, self.rhs)
# Set initnorm as the norm of F
self.initNorm = np.real(self.F.norm())
# Starting Norm for this compuation
self.startNorm = np.real(self.res.norm())
initNormTime = time.time()
# Solve Linear System for the update
self.KSM.solve(self.res, self.update)
self.update.scale(-1.0)
solveTime = time.time()
# Update State Variables
self.assembler.getVariables(self.u)
self.u.axpy(1.0, self.update)
self.assembler.setVariables(self.u)
stateUpdateTime = time.time()
# Compute final FEA Norm
self.assembler.assembleRes(self.res)
self.res.axpy(-1.0, self.F) # Add the -F
self.finalNorm = np.real(self.res.norm())
finalNormTime = time.time()
# If timing was was requested print it, if the solution is nonlinear
# print this information automatically if prinititerations was requested.
if self.getOption('printTiming') or self.getOption('printIterations'):
self.pp('+--------------------------------------------------+')
self.pp('|')
self.pp('| TACS Solve Times:')
self.pp('|')
self.pp('| %-30s: %10.3f sec' % ('TACS Setup Time', setupProblemTime - startTime))
self.pp('| %-30s: %10.3f sec' % ('TACS Solve Init Time', initSolveTime - setupProblemTime))
self.pp('| %-30s: %10.3f sec' % ('TACS Init Norm Time', initNormTime - initSolveTime))
self.pp('| %-30s: %10.3f sec' % ('TACS Solve Time', solveTime - initNormTime))
self.pp('| %-30s: %10.3f sec' % ('TACS State Update Time', stateUpdateTime - solveTime))
self.pp('| %-30s: %10.3f sec' % ('TACS Final Norm Time', finalNormTime - stateUpdateTime))
self.pp('|')
self.pp('| %-30s: %10.3f sec' % ('TACS Total Solution Time', finalNormTime - startTime))
self.pp('+--------------------------------------------------+')
return
####### Function eval/sensitivity methods ########
def evalFunctions(self, funcs, evalFuncs=None,
ignoreMissing=False):
"""
This is the main routine for returning useful information from
pytacs. The functions corresponding to the strings in
EVAL_FUNCS are evaluated and updated into the provided
dictionary.
Parameters
----------
funcs : dict
Dictionary into which the functions are saved.
evalFuncs : iterable object containing strings.
If not none, use these functions to evaluate.
ignoreMissing : bool
Flag to supress checking for a valid function. Please use
this option with caution.
Examples
--------
>>> funcs = {}
>>> staticProblem.solve()
>>> staticProblem.evalFunctions(funcs, ['mass'])
>>> funcs
>>> # Result will look like (if StaticProblem has name of 'c1'):
>>> # {'cl_mass':12354.10}
"""
startTime = time.time()
# Set problem vars to assembler
self._updateAssemblerVars()
if evalFuncs is None:
evalFuncs = sorted(list(self.functionList))
else:
evalFuncs = sorted(list(evalFuncs))
if not ignoreMissing:
for f in evalFuncs:
if not f in self.functionList:
raise Error("Supplied function '%s' has not been added "
"using addFunction()." % f)
setupProblemTime = time.time()
# Fast parallel function evaluation of structural funcs:
handles = [self.functionList[f] for f in evalFuncs if
f in self.functionList]
funcVals = self.assembler.evalFunctions(handles)
functionEvalTime = time.time()
# Assign function values to appropriate dictionary
i = 0
for f in evalFuncs:
if f in self.functionList:
key = self.name + '_%s' % f
funcs[key] = funcVals[i]
i += 1
dictAssignTime = time.time()
if self.getOption('printTiming'):
self.pp('+--------------------------------------------------+')
self.pp('|')
self.pp('| TACS Function Times:')
self.pp('|')
self.pp('| %-30s: %10.3f sec' % ('TACS Function Setup Time', setupProblemTime - startTime))
self.pp('| %-30s: %10.3f sec' % ('TACS Function Eval Time', functionEvalTime - setupProblemTime))
self.pp('| %-30s: %10.3f sec' % ('TACS Dict Time', dictAssignTime - functionEvalTime))
self.pp('|')
self.pp('| %-30s: %10.3f sec' % ('TACS Function Time', dictAssignTime - startTime))
self.pp('+--------------------------------------------------+')
def evalFunctionsSens(self, funcsSens, evalFuncs=None):
"""
This is the main routine for returning useful (sensitivity)
information from problem. The derivatives of the functions
corresponding to the strings in EVAL_FUNCS are evaluated and
updated into the provided dictionary.
Parameters
----------
funcsSens : dict
Dictionary into which the derivatives are saved.
evalFuncs : iterable object containing strings
The functions the user wants returned
Examples
--------
>>> funcsSens = {}
>>> staticProblem.evalFunctionsSens(funcsSens, ['mass'])
>>> funcs
>>> # Result will look like (if StaticProblem has name of 'c1'):
>>> # {'c1_mass':{'struct':[1.234, ..., 7.89]}
"""
startTime = time.time()
# Set problem vars to assembler
self._updateAssemblerVars()
if evalFuncs is None:
evalFuncs = sorted(list(self.functionList))
else:
evalFuncs = sorted(list(evalFuncs))
# Check that the functions are all ok.
# and prepare tacs vecs for adjoint procedure
dvSenses = []
xptSenses = []
dIdus = []
adjoints = []
for f in evalFuncs:
if f not in self.functionList:
raise Error("Supplied function has not beed added "
"using addFunction()")
else:
# Populate the lists with the tacs bvecs
# we'll need for each adjoint/sens calculation
dvSens = self.dvSensList[f]
dvSens.zeroEntries()
dvSenses.append(dvSens)
xptSens = self.xptSensList[f]
xptSens.zeroEntries()
xptSenses.append(xptSens)
dIdu = self.dIduList[f]
dIdu.zeroEntries()
dIdus.append(dIdu)
adjoint = self.adjointList[f]
adjoint.zeroEntries()
adjoints.append(adjoint)
setupProblemTime = time.time()
adjointStartTime = {}
adjointEndTime = {}
# Next we will solve all the adjoints
# Set adjoint rhs
self.addSVSens(evalFuncs, dIdus)
adjointRHSTime = time.time()
for i, f in enumerate(evalFuncs):
adjointStartTime[f] = time.time()
self.solveAdjoint(dIdus[i], adjoints[i])
adjointEndTime[f] = time.time()
adjointFinishedTime = time.time()
# Evaluate all the adoint res prooduct at the same time for
# efficiency:
self.addDVSens(evalFuncs, dvSenses)
self.addAdjointResProducts(adjoints, dvSenses)
self.addXptSens(evalFuncs, xptSenses)
self.addAdjointResXptSensProducts(adjoints, xptSenses)
# Recast sensititivities into dict for user
for i, f in enumerate(evalFuncs):
key = self.name + '_%s' % f
# Return sensitivities as array in sens dict
funcsSens[key] = {self.varName: dvSenses[i].getArray().copy(),
self.coordName: xptSenses[i].getArray().copy()}
totalSensitivityTime = time.time()
if self.getOption('printTiming'):
self.pp('+--------------------------------------------------+')
self.pp('|')
self.pp('| TACS Adjoint Times:')
print('|')
print('| %-30s: %10.3f sec' % ('TACS Sens Setup Problem Time', setupProblemTime - startTime))
print('| %-30s: %10.3f sec' % (
'TACS Adjoint RHS Time', adjointRHSTime - setupProblemTime))
for f in evalFuncs:
print('| %-30s: %10.3f sec' % (
'TACS Adjoint Solve Time - %s' % (f), adjointEndTime[f] - adjointStartTime[f]))
print('| %-30s: %10.3f sec' % ('Total Sensitivity Time', totalSensitivityTime - adjointFinishedTime))
print('|')
print('| %-30s: %10.3f sec' % ('Complete Sensitivity Time', totalSensitivityTime - startTime))
print('+--------------------------------------------------+')
def addSVSens(self, evalFuncs, svSensList):
"""
Add the state variable partial sensitivity to the ADjoint RHS for given evalFuncs
Parameters
----------
evalFuncs : list[str]
The functions the user wants returned
svSensList : list[BVec] or list[ndarray]
List of sensitivity vectors to add partial sensitivity to
"""
# Set problem vars to assembler
self._updateAssemblerVars()
# Get list of TACS function handles from evalFuncs
funcHandles = [self.functionList[f] for f in evalFuncs if
f in self.functionList]
# Create a tacs BVec copy for the operation if the output is a numpy array
if isinstance(svSensList[0], np.ndarray):
svSensBVecList = [self._arrayToVec(svSensArray) for svSensArray in svSensList]
# Otherwise the input is already a BVec and we can do the operation in place
else:
svSensBVecList = svSensList
self.assembler.addSVSens(funcHandles, svSensBVecList, self.alpha, self.beta, self.gamma)
# Update from the BVec values, if the input was a numpy array
if isinstance(svSensList[0], np.ndarray):
for svSensArray, svSensBVec in zip(svSensList, svSensBVecList):
svSensArray[:] = svSensBVec.getArray()
def addDVSens(self, evalFuncs, dvSensList, scale=1.0):
"""
Add partial sensitivity contribution due to design vars for evalFuncs
Parameters
----------
evalFuncs : list[str]
The functions the user wants returned
dvSensList : list[BVec] or list[ndarray]
List of sensitivity vectors to add partial sensitivity to
scale : float
Scalar to multiply partial sensitivity by. Defaults to 1.0
"""
# Set problem vars to assembler
self._updateAssemblerVars()
# Get list of TACS function handles from evalFuncs
funcHandles = [self.functionList[f] for f in evalFuncs if
f in self.functionList]
# Create a tacs BVec copy for the operation if the output is a numpy array
if isinstance(dvSensList[0], np.ndarray):
dvSensBVecList = [self._arrayToDesignVec(dvSensArray) for dvSensArray in dvSensList]
# Otherwise the input is already a BVec and we can do the operation in place
else:
dvSensBVecList = dvSensList
self.assembler.addDVSens(funcHandles, dvSensBVecList, scale)
# Finalize sensitivity arrays across all procs
for dvSensBVec in dvSensBVecList:
dvSensBVec.beginSetValues()
dvSensBVec.endSetValues()
# Update the BVec values, if the input was a numpy array
if isinstance(dvSensList[0], np.ndarray):
for dvSensArray, dvSensBVec in zip(dvSensList, dvSensBVecList):
# Copy values to numpy array
dvSensArray[:] = dvSensBVec.getArray()
def addAdjointResProducts(self, adjointlist, dvSensList, scale=-1.0):
"""
Add the adjoint product contribution to the design variable sensitivity arrays
Parameters
----------
adjointlist : list[BVec] or list[ndarray]
List of adjoint vectors for residual sensitivity product
dvSensList : list[BVec] or list[ndarray]
List of sensitivity vectors to add product to
scale : float
Scalar to multiply product by. Defaults to -1.0
"""
# Set problem vars to assembler
self._updateAssemblerVars()
# Create a tacs BVec copy for the operation if the output is a numpy array
if isinstance(adjointlist[0], np.ndarray):
adjointBVeclist = [self._arrayToVec(adjointArray) for adjointArray in adjointlist]
# Otherwise the input is already a BVec and we can do the operation in place
else:
adjointBVeclist = adjointlist
# Make sure BC terms are zeroed out in adjoint
for adjoint in adjointBVeclist:
self.assembler.applyBCs(adjoint)
# Create a tacs BVec copy for the operation if the output is a numpy array
if isinstance(dvSensList[0], np.ndarray):
dvSensBVecList = [self._arrayToDesignVec(dvSensArray) for dvSensArray in dvSensList]
# Otherwise the input is already a BVec and we can do the operation in place
else:
dvSensBVecList = dvSensList
self.assembler.addAdjointResProducts(adjointBVeclist, dvSensBVecList, scale)
# Finalize sensitivity arrays across all procs
for dvSensBVec in dvSensBVecList:
dvSensBVec.beginSetValues()
dvSensBVec.endSetValues()
# Update the BVec values, if the input was a numpy array
if isinstance(dvSensList[0], np.ndarray):
for dvSensArray, dvSensBVec in zip(dvSensList, dvSensBVecList):
# Copy values to numpy array
dvSensArray[:] = dvSensBVec.getArray()
def addXptSens(self, evalFuncs, xptSensList, scale=1.0):
"""
Add partial sensitivity contribution due to nodal coordinates for evalFuncs
Parameters
----------
evalFuncs : list[str]
The functions the user wants returned
xptSensList : list[BVec] or list[ndarray]
List of sensitivity vectors to add partial sensitivity to
scale : float
Scalar to multiply partial sensitivity by. Defaults to 1.0
"""
# Set problem vars to assembler
self._updateAssemblerVars()
# Get list of TACS function handles from evalFuncs
funcHandles = [self.functionList[f] for f in evalFuncs if
f in self.functionList]
# Create a tacs BVec copy for the operation if the output is a numpy array
if isinstance(xptSensList[0], np.ndarray):
xptSensBVecList = [self._arrayToNodeVec(xptSensArray) for xptSensArray in xptSensList]
# Otherwise the input is already a BVec and we can do the operation in place
else:
xptSensBVecList = xptSensList
self.assembler.addXptSens(funcHandles, xptSensBVecList, scale)
# Finalize sensitivity arrays across all procs
for xptSensBVec in xptSensBVecList:
xptSensBVec.beginSetValues()
xptSensBVec.endSetValues()
# Update from the BVec values, if the input was a numpy array
if isinstance(xptSensList[0], np.ndarray):
for xptSensArray, xptSensBVec in zip(xptSensList, xptSensBVecList):
# Copy values to numpy array
xptSensArray[:] = xptSensBVec.getArray()
def addAdjointResXptSensProducts(self, adjointlist, xptSensList, scale=-1.0):
"""
Add the adjoint product contribution to the nodal coordinates sensitivity arrays
Parameters
----------
adjointlist : list[BVec] or list[ndarray]
List of adjoint vectors for residual sensitivity product
xptSensList : list[BVec] or list[ndarray]
List of sensitivity vectors to add product to
scale : float
Scalar to multiply product by. Defaults to -1.0
"""
# Set problem vars to assembler
self._updateAssemblerVars()
# Create a tacs BVec copy for the operation if the output is a numpy array
if isinstance(adjointlist[0], np.ndarray):
adjointBVeclist = [self._arrayToVec(adjointArray) for adjointArray in adjointlist]
# Otherwise the input is already a BVec and we can do the operation in place
else:
adjointBVeclist = adjointlist
# Make sure BC terms are zeroed out in adjoint
for adjoint in adjointBVeclist:
self.assembler.applyBCs(adjoint)
# Create a tacs BVec copy for the operation if the output is a numpy array
if isinstance(xptSensList[0], np.ndarray):
xptSensBVecList = [self._arrayToNodeVec(xptSensArray) for xptSensArray in xptSensList]
# Otherwise the input is already a BVec and we can do the operation in place
else:
xptSensBVecList = xptSensList
self.assembler.addAdjointResXptSensProducts(adjointBVeclist, xptSensBVecList, scale)
# Finalize sensitivity arrays across all procs
for xptSensBVec in xptSensBVecList:
xptSensBVec.beginSetValues()
xptSensBVec.endSetValues()
if isinstance(xptSensList[0], np.ndarray):
for xptSensArray, xptSensBVec in zip(xptSensList, xptSensBVecList):
# Copy values to numpy array
xptSensArray[:] = xptSensBVec.getArray()
def getResidual(self, res, Fext=None):
"""
This routine is used to evaluate directly the structural
residual. Only typically used with aerostructural analysis.
Parameters
----------
res : TACS BVec or numpy array
If res is not None, place the residuals into this array.
Fext : TACS BVec or numpy array
Distributed array containing additional loads (ex. aerodynamic forces for aerostructural coupling)
to applied to RHS of the static problem.
"""
# Make sure assembler variables are up to date
self._updateAssemblerVars()
# Assemble residual
self.assembler.assembleRes(self.res)
# Add the -F
self.res.axpy(-1.0, self.F)
# Compute the RHS
self.assembler.assembleRes(self.res)
# Add force terms from rhs
self.rhs.copyValues(self.F) # Fixed loads
# Add external loads, if specified
if Fext is not None:
if isinstance(Fext, tacs.TACS.Vec):
self.rhs.axpy(1.0, Fext)
elif isinstance(Fext, np.ndarray):
rhsArray = self.rhs.getArray()
rhsArray[:] = rhsArray[:] + Fext[:]
# Zero out bc terms in rhs
self.assembler.applyBCs(self.rhs)
# Add the -F
self.res.axpy(-1.0, self.rhs)
# Output residual
if isinstance(res, tacs.TACS.Vec):
res.copyValues(self.res)
else:
res[:] = self.res.getArray()
def addTransposeJacVecProduct(self, phi, prod, scale=1.0):
"""
Adds product of transpose Jacobian and input vector into output vector as shown below:
prod += scale * J^T . phi
Parameters
----------
phi : TACS BVec or numpy array
Input vector to product with the transpose Jacobian.
prod : TACS BVec or numpy array
Output vector to add Jacobian product to.
scale : float
Scalar used to scale Jacobian product by.
"""
# Create a tacs bvec copy of the adjoint vector
if isinstance(phi, tacs.TACS.Vec):
self.phi.copyValues(phi)
elif isinstance(phi, np.ndarray):
self.phi.getArray()[:] = phi
# Tacs doesn't actually transpose the matrix here so keep track of
# RHS entries that TACS zeros out for BCs.
bcTerms = self.update
bcTerms.copyValues(self.phi)
self.assembler.applyBCs(self.phi)
bcTerms.axpy(-1.0, self.phi)
# Set problem vars to assembler
self._updateAssemblerVars()
self.K.mult(self.phi, self.res)
# Add bc terms back in
self.res.axpy(1.0, bcTerms)
# Output residual
if isinstance(prod, tacs.TACS.Vec):
prod.axpy(scale, self.res)
else:
prod[:] = prod + scale * self.res.getArray()
def zeroVariables(self):
"""
Zero all the tacs solution b-vecs
"""
self.res.zeroEntries()
self.u.zeroEntries()
self.assembler.setVariables(self.u)
self.update.zeroEntries()
def solveAdjoint(self, rhs, phi):
"""
Solve the structural adjoint.
Parameters
----------
rhs : TACS BVec or numpy array
right hand side vector for adjoint solve
phi : TACS BVec or numpy array
BVec or numpy array into which the adjoint is saved
"""
# Set problem vars to assembler
self._updateAssemblerVars()
# Check if we need to initialize
self._initializeSolve()
# Create a copy of the adjoint/rhs guess
if isinstance(phi, tacs.TACS.Vec):
self.phi.copyValues(phi)
elif isinstance(phi, np.ndarray):
self.phi.getArray()[:] = phi
if isinstance(rhs, tacs.TACS.Vec):
self.adjRHS.copyValues(rhs)
elif isinstance(rhs, np.ndarray):
self.adjRHS.getArray()[:] = rhs
# Tacs doesn't actually transpose the matrix here so keep track of
# RHS entries that TACS zeros out for BCs.
bcTerms = self.update
bcTerms.copyValues(self.adjRHS)
self.assembler.applyBCs(self.adjRHS)
bcTerms.axpy(-1.0, self.adjRHS)
# Solve Linear System
self.KSM.solve(self.adjRHS, self.phi)
self.assembler.applyBCs(self.phi)
# Add bc terms back in
self.phi.axpy(1.0, bcTerms)
# Copy output values back to user vectors
if isinstance(phi, tacs.TACS.Vec):
phi.copyValues(self.phi)
elif isinstance(phi, np.ndarray):
phi[:] = self.phi.getArray()
def getVariables(self, states=None):
"""
Return the current state values for the
problem
Parameters
----------
states : TACS BVec or numpy array
Vector to place current state variables into (optional)
Returns
----------
states : numpy array
current state vector
"""
if isinstance(states, tacs.TACS.Vec):
states.copyValues(self.u)
elif isinstance(states, np.ndarray):
states[:] = self.u_array[:]
return self.u_array.copy()
def setVariables(self, states):
"""
Set the structural states for current load case.
Parameters
----------
states : ndarray
Values to set. Must be the size of getNumVariables()
"""
# Copy array values
if isinstance(states, tacs.TACS.Vec):
self.u.copyValues(states)
elif isinstance(states, np.ndarray):
self.u_array[:] = states[:]
# Apply boundary conditions
self.assembler.applyBCs(self.u)
# Set states to assembler
self.assembler.setVariables(self.u)
def writeSolution(self, outputDir=None, baseName=None, number=None):
"""
This is a generic shell function that writes the output
file(s). The intent is that the user or calling program can
call this function and pyTACS writes all the files that the
user has defined. It is recommended that this function is used
along with the associated logical flags in the options to
determine the desired writing procedure
Parameters
----------
outputDir : str or None
Use the supplied output directory
baseName : str or None
Use this supplied string for the base filename. Typically
only used from an external solver.
number : int or None
Use the user spplied number to index solution. Again, only
typically used from an external solver
"""
# Make sure assembler variables are up to date
self._updateAssemblerVars()
# Check input
if outputDir is None:
outputDir = self.getOption('outputDir')
if baseName is None:
baseName = self.name
# If we are numbering solution, it saving the sequence of
# calls, add the call number
if number is not None:
# We need number based on the provided number:
baseName = baseName + '_%3.3d' % number
else:
# if number is none, i.e. standalone, but we need to
# number solutions, use internal counter
if self.getOption('numberSolutions'):
baseName = baseName + '_%3.3d' % self.callCounter
# Unless the writeSolution option is off write actual file:
if self.getOption('writeSolution'):
base = os.path.join(outputDir, baseName) + '.f5'
self.outputViewer.writeToFile(base)
|
python
|
# A collection of functions for loading the esm2m perturbation experiments
import xarray as xr
from gfdl_utils.core import get_pathspp
def get_path(variable=None,
ppname=None,
override=False,
experiments=None,
timespan=None):
"""Returns a dictionary of paths relevant to the specified
experiments, variables, and timespans
Parameters
----------
variable : str
Name of variable, or None (for all variables)
ppname : str
Name of postprocess directory from
ocean_bling_tracers
ocean_bling_ocn_flux
bling_atm_flux
ocean_gat_dic
Note that if variable is specified exactly, ppname is not needed.
If variable is None or has a wildcard character, ppname is required.
override : bool
Get variables from the override experiment.
experiments : str
List of perturbation experiments from which to grab data.
If none, get for all experiments.
timespan : str
Specify the time string if a subset of years required.
Returns
-------
path : dict of str
paths : dict of list
Expanded paths
"""
config = 'MOM5_SIS_BLING_CORE2-gat'
if override:
config = config+'-override-po4'
pp = '/archive/Richard.Slater/Siena/siena_201308_rds-c3-gat-slurm/'+config+'/gfdl.ncrc3-intel16-prod-openmp/pp/'
out = 'ts'
local = 'monthly/10yr'
# Unless experiment is specified, set to all
if experiments is None:
experiments = ['','_gat','_zero','_double']
if type(experiments)==str: # Force into a list
experiments = [experiments]
# Unless variable is specified, set to all
if variable is None:
variable = '*'
# Unless timespan is specified, set to all
if timespan is None:
timespan = '*'
# If ppname is not specified, derive from variable
if ppname is None:
d = get_variable_dict()
if variable in d:
ppname = d[variable]
else:
raise NameError('If ppname is not specified, must give exact variable name.'+
' To specify wildcard variable, specify ppname.')
# Configure correct ppname
if ppname == 'ocean_bling_tracers':
ppname_pre = 'ocean_bling'
ppname_suf = '_tracers'
elif ppname in ['ocean_bling_ocn_flux','bling_atm_flux','ocean_gat_dic']:
ppname_pre = ppname
ppname_suf = ''
# ocean_gat_dic has no non-gat control
if (ppname == 'ocean_gat_dic') & ('' in experiments):
experiments.remove('')
path = {}
for e in experiments:
pathDict = {'pp':pp,
'ppname':ppname_pre+e+ppname_suf,
'out':out,
'local':local,
'time':timespan,
'add':variable}
path[e] = get_pathspp(**pathDict)
return path
def load_exps(variable=None,
ppname=None,
override=False,
experiments=None,
timespan=None,
verbose=False):
"""Returns a dictionary of datasets for each of the specified
experiments, variables, and timespans
Parameters
----------
variable : str
Name of variable, or None (for all variables)
ppname : str
Name of postprocess directory from
ocean_bling_tracers
ocean_bling_ocn_flux
bling_atm_flux
ocean_gat_dic
Note that if variable is specified exactly, ppname is not needed.
If variable is None or has a wildcard character, ppname is required.
override : bool
Get variables from the override experiment.
experiments : str
List of perturbation experiments from which to grab data.
If none, get for all experiments.
timespan : str
Specify the time string if a subset of years required.
verbose : bool
Print paths to page
Returns
-------
dd : dict
Dictionary of {xarray.Dataset}'s with each entry corresponding to
each experiment.
"""
paths = get_path(variable=variable,ppname=ppname,override=override,experiments=experiments,timespan=timespan)
dd = {}
for p,path in paths.items():
if verbose:
print(path)
dd[p] = xr.open_mfdataset(path)
if len(dd)==1:
dd=dd[experiments]
return dd
def load_grid(fromwork=True,z=None,z_i=None):
if fromwork:
# Load augmented grid saved to work
# See notebook save_grid.ipynp
gridpath = '/work/gam/projects/bio-pump-timescales/data/esm2m/raw/grid.nc'
grid = xr.open_dataset(gridpath)
else:
pp = '/archive/Richard.Slater/Siena/siena_201308_rds-c3-gat-slurm/MOM5_SIS_BLING_CORE2-gat/gfdl.ncrc3-intel16-prod-openmp/pp/'
gridpath = pp+'static.nc'
grid = xr.open_dataset(gridpath)
if z is not None:
grid['dz'] = (z_i.diff('st_edges_ocean')
.rename({'st_edges_ocean':'st_ocean'})
.assign_coords({'st_ocean':z}))
grid['volume_t'] = grid['area_t']*grid['dz']
return grid
def calc_anom(dd):
ddanom = {}
ddanom['zero'] = dd['_zero']-dd['_gat']
ddanom['double'] = dd['_double']-dd['_gat']
ddanom['noneq'] = dd['']-dd['_gat']
return ddanom
def get_variable_dict():
return {'alk':'ocean_bling_tracers',
'alpha':'ocean_bling_tracers',
'biomass_p':'ocean_bling_tracers',
'chl':'ocean_bling_tracers',
'co2_alpha':'ocean_bling_tracers',
'co3_ion':'ocean_bling_tracers',
'delta_csurf':'ocean_bling_tracers',
'delta_pco2':'ocean_bling_tracers',
'dic_area_integral':'ocean_bling_tracers',
'dic':'ocean_bling_tracers',
'dic_stf':'ocean_bling_tracers',
'dic_volume_integral':'ocean_bling_tracers',
'dop_area_integral':'ocean_bling_tracers',
'dop':'ocean_bling_tracers',
'dop_volume_integral':'ocean_bling_tracers',
'fed':'ocean_bling_tracers',
'fed_stf':'ocean_bling_tracers',
'htotal':'ocean_bling_tracers',
'integral_dic':'ocean_bling_tracers',
'integral_dic_stf':'ocean_bling_tracers',
'irr_mem':'ocean_bling_tracers',
'jdic_100':'ocean_bling_tracers',
'o2':'ocean_bling_tracers',
'pco2_surf':'ocean_bling_tracers',
'po4_area_integral':'ocean_bling_tracers',
'po4':'ocean_bling_tracers',
'po4_volume_integral':'ocean_bling_tracers',
'co2_flux_alpha_ocn':'ocean_bling_ocn_flux',
'co2_flux_cair_ice_ocn':'ocean_bling_ocn_flux',
'co2_flux_csurf_ocn':'ocean_bling_ocn_flux',
'co2_flux_flux_ice_ocn':'ocean_bling_ocn_flux',
'co2_flux_kw_ice_ocn':'ocean_bling_ocn_flux',
'co2_flux_schmidt_ocn':'ocean_bling_ocn_flux',
'o2_flux_alpha_ocn':'ocean_bling_ocn_flux',
'o2_flux_csurf_ocn':'ocean_bling_ocn_flux',
'o2_flux_flux_ice_ocn':'ocean_bling_ocn_flux',
'o2_flux_schmidt_ocn':'ocean_bling_ocn_flux',
'atm_gas_flux':'ocean_gat_dic',
'atm_gas_input':'ocean_gat_dic',
'atm_mol_wgt':'ocean_gat_dic',
'base_mix_ratio':'ocean_gat_dic',
'gas_mol_wgt':'ocean_gat_dic',
'mix_ratio':'ocean_gat_dic',
'total_atm_mass':'ocean_gat_dic',
'total_gas_mass':'ocean_gat_dic'}
def disp_variables():
return list(get_variable_dict().keys())
def add_override_suffix(directory,override):
if override:
directory = directory+'override-po4/'
else:
directory = directory+'no-override/'
return directory
|
python
|
import imports.dataHandler as jdata
import imports.passwordToKey as keys
import imports.randomText as rand_text
import pyperclip as clipboard
import imports.CONSTS as CONSTS
import os
from cryptography.fernet import Fernet
from getpass import getpass
import json
protected = ["key", "state"]
MAIN_MENU = 0
RECORDS = 1
VERSION = "v1.0.1"
class passwordManager:
def __init__(self, pathDir):
while(True):
value = input("Enter username : ")
print("Enter password")
password = getpass()
key = keys.passwordToKey(value, password)
self.state = MAIN_MENU
self.database = ""
try:
self.pathName = pathDir + value
self.data = jdata.dataBase(pathDir + value, key)
self.key = key
self.user = value
self.checkUpdate()
break
# print(self.data.json)
# Do something with the file
except:
print("Wrong username or password, try again!")
self.main_menu()
def update_ver(self):
print("Initiating update...")
orecords = {}
for record in self.data.json:
if(not self.isProtected(record, False)):
orecords[record] = self.data.json[record]
ndata = {}
ndata["state"] = self.data.json["state"]
ndata["key"] = self.data.json["key"]
ndata["version"] = VERSION
ndata["orecords"] = orecords
self.data.json = ndata
os.rename(self.pathName + "_data" + CONSTS.SED,
self.pathName + "_data_orecords" + CONSTS.SED)
self.data.save()
print("update done...")
def loadFile(self, record_name):
key = self.data.json["key"].encode()
self.kdata = jdata.dataBase(
self.pathName + "_data_" + record_name, key)
def checkUpdate(self):
if("state" not in self.data.json):
self.confirmPass()
self.init()
else:
if "version" not in self.data.json:
self.update_ver()
# key = self.data.json["key"].encode()
# self.kdata = jdata.dataBase(self.pathName + "_data", key)
def confirm(self) -> bool:
value = input("confirm to procced! y/n : ")
if(value.lower() == "y" or value.lower() == "yes"):
print("Enter password to confirm :")
password = getpass()
key = keys.passwordToKey(self.user, password)
if(key == self.key):
return True
print("wrong password try again!")
return self.confirm()
return False
def isProtected(self, value, log=True) -> bool:
pr = False
for name in protected:
if(value == name):
pr = True
break
if(pr and log):
print("this name is protected, try different name")
return pr
def confirmPass(self):
while(True):
print("Enter password to confirm :")
password = getpass()
key = keys.passwordToKey(self.user, password)
if(key == self.key):
return True
print("wrong password try again!")
def getHelpRecords(self):
print("commands : ")
print("add example_unqiue_name example_username/-n example_password/random/-r")
print("load unqiue_name or load unique_name -n (copy username then password)")
print("remove or delete -n")
print("records : show all records")
print("back : Go back")
print("help")
def main_menu(self):
while(True):
if self.state == MAIN_MENU:
value = input("Enter command : ").split()
try:
if value[0] == "help":
print("Main menu commands:")
print("load -n, where n is the name of the db, load database.")
print("adddb -n, where n is the name of the new database.")
print("records - prints all databases.")
print("version - print version.")
print("quit - exist.")
print()
if value[0] == "quit":
return
if value[0] == "records":
i = 0
for record in self.data.json:
if(not self.isProtected(record, False)):
i += 1
print("(", i, ") : ", record)
if value[0] == "version":
print("version", self.data.json["version"])
if value[0] == "load":
if value[1] in self.data.json:
self.loadFile(value[1])
self.state = RECORDS
self.database = value[1]
if value[0] == "adddb":
if value[1] in self.data.json:
print("db already exist!")
continue
self.data.json[value[1]] = {}
self.loadFile(value[1])
self.state = RECORDS
self.database = value[1]
except:
print("soemthing went wrong!")
elif self.state == RECORDS:
self.records_menu()
def records_menu(self):
run = True
self.getHelpRecords()
while(run):
value = input("Enter command : ").split()
if(value[0] == "back"):
run = False
self.database = ""
self.state = MAIN_MENU
elif(value[0] == "add"):
if(len(value) > 3 and not self.isProtected(value[1])):
add = True
if(value[1] in self.data.json[self.database]):
print(
"there already exist a record with the same unique_name!")
add = self.confirm()
if(add):
if(value[3] == "-r" or value[3] == "random"):
value[3] = rand_text.randomStringDigits(12)
key = Fernet.generate_key()
self.data.json[self.database][value[1]] = key.decode()
jdata = json.loads("{}")
jdata["name"] = value[1]
if(value[2] == "-n"):
jdata["username"] = value[1]
else:
jdata["username"] = value[2]
jdata["pass"] = value[3]
jdata = json.dumps(jdata)
fkey = Fernet(key)
jdata = fkey.encrypt(jdata.encode())
self.kdata.json[value[1]] = jdata.decode()
clipboard.copy(value[3])
self.kdata.save()
self.data.save()
print("copied the password to clipboard!")
else:
print("invalid syntax : valid syntax example")
print(
"add example_unqiue_name example_username/-n example_password/random/-r")
elif(value[0] == "load"):
if(len(value) > 1 and not self.isProtected(value[1])):
if(value[1] in self.data.json[self.database]):
key = self.data.json[self.database][value[1]].encode()
data = self.kdata.json[value[1]]
fkey = Fernet(key)
data = fkey.decrypt(data.encode())
data = json.loads(data.decode())
# print(data["pass"])
# print(clipboard.paste())
if(len(value) > 2 and value[2] == "-n"):
clipboard.copy(data["username"])
print("copied username to clipboard!")
value = input("copy password y/n : ")
if(value.lower() == "y" or value.lower() == "yes"):
clipboard.copy(data["pass"])
print("copied the password to clipboard!")
else:
clipboard.copy(data["pass"])
print("username : ", data["username"])
print("copied the password to clipboard!")
else:
print(value[1], " record doesnt exist!")
else:
print("invalid syntax : valid syntax example")
print(
"load unqiue_name or load unique_name -n (copy username then password)")
elif(value[0] == "clear"):
import os
os.system('cls' if os.name == 'nt' else 'clear')
elif(value[0] == "help"):
self.getHelpRecords()
elif(value[0] == "remove" or value[0] == "delete"):
self.delete(value)
elif(value[0] == "records"):
i = 0
for record in self.data.json[self.database]:
if(not self.isProtected(record, False)):
i += 1
print("(", i, ") : ", record)
def delete(self, user_input):
if(self.isProtected(user_input[1])):
# a.k.a values we dont want to delete
return
if(user_input[1] in self.data.json):
value = input("you sure you want to delete this record y/n : ")
if(value.lower() == "y" or value.lower() == "yes"):
del self.data.json[self.database][user_input[1]]
self.data.save()
def init(self):
self.data.json["state"] = "initialized"
key = Fernet.generate_key()
self.data.json["key"] = key.decode()
self.data.json["version"] = VERSION
# self.kdata = jdata.dataBase(self.pathName + "_data", key)
# self.kdata.save()
self.data.save()
|
python
|
# -*- coding: utf-8 -*-
from doodle.config import CONFIG
from doodle.core.models.article import Article, ArticleHitCount
from doodle.core.models.comment import ArticleComments
from ..base_handler import BaseHandler
class HomeHandler(BaseHandler):
def get(self):
articles, next_cursor = Article.get_articles_for_homepage(self.cursor)
if articles:
article_ids = [article.id for article in articles]
hit_counts = ArticleHitCount.get_by_ids(article_ids)
replies_dict = ArticleComments.get_comment_count_of_articles(article_ids)
else:
hit_counts = replies_dict = {}
self.set_cache(CONFIG.DEFAULT_CACHE_TIME, is_public=True)
self.render('web/home.html', {
'title': CONFIG.BLOG_TITLE,
'page': 'home',
'articles': articles,
'hit_counts': hit_counts,
'replies_dict': replies_dict,
'next_cursor': next_cursor
})
|
python
|
# NOTICE
# This software was produced for the U.S. Government under contract FA8702-21-C-0001,
# and is subject to the Rights in Data-General Clause 52.227-14, Alt. IV (DEC 2007)
# ©2021 The MITRE Corporation. All Rights Reserved.
'''
A PropertyConstraints object describes type and cardinality constraints
for a single property of a single ontology class.
This module implements the PropertyConstraints object
'''
from message import OntologyError
from context import Context
class PropertyConstraints:
'''
Instances of this class describe the type and cardinality constraints
for a property of an ontology class.
This class facilitates collecting the constraints and checking them
for consistency.
Attributes:
onto_class_uri An rdflib.term.URIRef object for the ontology class or None
property_uri An rdflib.term.URIRef object or None
min_cardinality An integer or None
max_cardinality An integer or None
value_range An rdflib.term.URIRef object
_qualified A boolean, or None
Two PropertyConstraint instances are equal if their attributes,
not including _qualified, are equal.
'''
def __init__(self, onto_class_uri=None, property_uri=None):
'''
Create and initialize an instance of this class
The arguments are for error messages and self-description.
Arguments:
onto_class_uri An rdflib.term.URIRef for the ontology class whose
property is constrained by these PropertyConstraints
property_uri An rdflib.term.URIRef object for the property constrained by these PropertyConstraints
'''
self.onto_class_uri = onto_class_uri
self.property_uri = property_uri
self.min_cardinality = None
self.max_cardinality = None
self.value_range = None
self._qualified = None # True/False/None where None means unset
def add_min_cardinality(self, min_cardinality):
'''
"Add" specified min_cardinality value if possible.
Arguments:
min_cardinality The minimum cardinality, an integer
Return:
List of ErrorMessages on failure, empty list on success.
'''
error_messages = []
if self._qualified is None:
self._qualified = False
elif self._qualified is True:
error_messages.append(self._get_ontology_error('unqualified min_cardinality specified for a qualified constraint'))
if self.min_cardinality is None:
if self.max_cardinality is not None and min_cardinality > self.max_cardinality:
error_messages.append(self._get_ontology_error('min_cardinality exceeds max_cardinality'))
else:
self.min_cardinality = min_cardinality
elif self.min_cardinality != min_cardinality:
error_messages.append(self._get_ontology_error('multiple min_cardinality values specified'))
return error_messages
def add_max_cardinality(self, max_cardinality):
'''
"Add" specified max_cardinality value if possible.
Arguments:
max_cardinality The maximum cardinality, an integer
Return:
List of ErrorMessages on failure, empty list on success.
'''
error_messages = []
if self._qualified is None:
self._qualified = False
elif self._qualified is True:
error_messages.append(self._get_ontology_error('unqualified max_cardinality specified for a qualified constraint'))
if self.max_cardinality is None:
if self.min_cardinality is not None and max_cardinality < self.min_cardinality:
error_messages.append(self._get_ontology_error('max_cardinality exceeds min_cardinality'))
else:
self.max_cardinality = max_cardinality
elif self.max_cardinality != max_cardinality:
error_messages.append(self._get_ontology_error('multiple max_cardinality values specified'))
return error_messages
def add_cardinality(self, cardinality):
'''
"Add" specified cardinaltiy to self if possible.
Arguments:
cardinality The (minimum and maximum) cardinality, an integer
Return:
List of ErrorMessages on failure, empty list on success.
'''
error_messages = []
if self._qualified is None:
self._qualified = False
elif self._qualified is True:
error_messages.append(self._get_ontology_error('unqualified cardinality specified for a qualified constraint'))
if self.min_cardinality is None and self.max_cardinality is None:
self.min_cardinality = cardinality
self.max_cardinality = cardinality
elif self.min_cardinality != cardinality or self.max_cardinality != cardinality:
error_messages.append(self._get_ontology_error('multiple cardinality values specified'))
return error_messages
def add_qualified_min_cardinality(self, min_cardinality):
'''
"Add" specified min_qualified_cardinaltiy value if possible.
Arguments:
min_cardinality The minimum qualified cardinality, an integer
Return:
List of ErrorMessages on failure, empty list on success.
'''
error_messages = []
if self._qualified is None:
self._qualified = True
elif self._qualified is False:
error_messages.append(self._get_ontology_error('qualified min_cardinality specified for a unqualified constraint'))
if self.min_cardinality is None:
if self.max_cardinality is not None and min_cardinality > self.max_cardinality:
error_messages.append(self._get_ontology_error('min_cardinality exceeds max_cardinality'))
else:
self.min_cardinality = min_cardinality
elif self.min_cardinality != min_cardinality:
error_messages.append(self._get_ontology_error('multiple min_cardinality values specified'))
return error_messages
def add_qualified_max_cardinality(self, max_cardinality):
'''
"Add" specified max_qualified_cardinaltiy value if possible.
Arguments:
max_cardinality The maximum qualified cardinality, an integer
Return:
List of ErrorMessages on failure, empty list on success.
'''
error_messages = []
if self._qualified is None:
self._qualified = True
elif self._qualified is False:
error_messages.append(self._get_ontology_error('qualified max_cardinality specified for a unqualified constraint'))
if self.max_cardinality is None:
if self.min_cardinality is not None and max_cardinality < self.min_cardinality:
error_messages.append(self._get_ontology_error('max_cardinality exceeds min_cardinality'))
else:
self.max_cardinality = max_cardinality
elif self.max_cardinality != max_cardinality:
error_messages.append(self._get_ontology_error('multiple max_cardinality values specified'))
return error_messages
def add_qualified_cardinality(self, cardinality):
'''
"Add" specified qualified_cardinality to self if possible.
Arguments:
cardinality The (minimum and maximum) qualified cardinality, an integer
Return:
List of ErrorMessages on failure, empty list on success.
'''
error_messages = []
if self._qualified is None:
self._qualified = True
elif self._qualified is False:
error_messages.append(self._get_ontology_error('qualified cardinality specified for a qualified constraint'))
if self.min_cardinality is None and self.max_cardinality is None:
self.min_cardinality = cardinality
self.max_cardinality = cardinality
elif self.min_cardinality != cardinality or self.max_cardinality != cardinality:
error_messages.append(self._get_ontology_error('multiple cardinality values specified'))
return error_messages
def add_value_range(self, value_range):
'''
Add specified value_range (usually xsd) value if possible.
Arguments:
value_range The rdflib.term.URIRef of the range type
Return:
List of ErrorMessages on failure, empty list on success.
'''
error_messages = []
if self.value_range is None:
self.value_range = value_range
else:
error_messages.append(self._get_ontology_error('multiple ranges specified'))
return error_messages
def merge_parent(self, parent):
'''
Arguments:
parent Another PropertyConstraints object, presumable belonging to a parent class
Return:
A tuple consisting of two items:
A new PropertyConstraints object containing the result of the merger
A (hopefully empty) list of ErrorMessage objects
Side effects:
None. Self is not changed.
Definitions and Algorithm:
Each PropertyConstraint object has three attributes of interest: min_cardinality, max_cardinality, and range.
If any of these attributes is None, that means it has not been set; there is no default value.
Before the merge operation, self's constraints must be equal to or tighter than parent's.
If the parent's constraint is tighter, the offending attibutes are ignored and we get an ErrorMessage.
The merge operation builds the new merged PropertyConstraints object as follows:
1. Start with an empty merged PropertyConstraints object and an empty list of ErrorMessages
2. Copy attributes from self or parent to merged as follows:
a. If an attribute is unset (None) in self
merged attribute is parent's value (which could also be None)
b. If an attribute has a value in self and is unset (None) in parent,
merged attribute is self's value
c. If an attribute has a value in self and a value in parent.
merged attribute is self's value (because self's value should be at least as tight as parent's)
if parent's constraint is tighter than or equal to parent's, add ErrorMessage
3. If both min_cardinality and max_cardinality have values in merged PropertyConstraints,
If min_cardinality > max_cardinality, add ErrorMessage and revert to unmerged values.
The _qualified attribute in the merged constraints is not used.
'''
# Step 1. Initialize.
merged = PropertyConstraints(onto_class_uri=self.onto_class_uri, property_uri=self.property_uri)
error_messages = []
# Step 2 for min_cardinality.
merged.min_cardinality = parent.min_cardinality if self.min_cardinality is None else self.min_cardinality
if parent.min_cardinality is not None and self.min_cardinality is not None:
if parent.min_cardinality > self.min_cardinality: # parent has tighter constraint
error_messages.append(self._get_ontology_error(
'cannot merge min_cardinality {} with {} from {}'.format(
self.min_cardinality, parent.min_cardinality, parent.onto_class_uri)))
# Step 2 for max_cardinality.
merged.max_cardinality = parent.max_cardinality if self.max_cardinality is None else self.max_cardinality
if parent.max_cardinality is not None and self.max_cardinality is not None:
if parent.max_cardinality < self.max_cardinality: # parent has tighter constraint
error_messages.append(self._get_ontology_error(
'cannot merge max_cardinality {} with {} from {}'.format(
self.max_cardinality, parent.max_cardinality, parent.onto_class_uri)))
# Step 2 for range.
merged.value_range = parent.value_range if self.value_range is None else self.value_range
if parent.value_range is not None and self.value_range is not None:
if parent.value_range != self.value_range: # inconsistant ranges (we don't check for subclass yet)
error_messages.append(self._get_ontology_error(
'cannot merge value_range {} with {} from {}'.format(
self.value_range, parent.value_range, parent.onto_class_uri)))
# Step 3. Make sure min <= max
if merged.min_cardinality is not None and merged.max_cardinality is not None:
if merged.min_cardinality > merged.max_cardinality:
error_messages.append(self._get_ontology_error(
'cannot merge cardinalities from {} because min_cardinality exceeds max_cardinality'.format(parent.onto_class_uri)))
merged.min_cardinality = self.min_cardinality
merged.max_cardinality = self.max_cardinality
# Return merged PropertyConstraints and List of error messages
#print('child {}\nparent {}\nmerged {} {}'.format(self, parent, merged, error_messages))
return merged, error_messages
def check_consistency(self):
'''
Check this PropertyConstraints object for global inconsistencies
that could not be determined when adding items one at a time.
Return:
List of ErrorMessages if inconsistencies were found, empty list if not.
'''
error_messages = []
if self._qualified is True:
if self.value_range is None:
error_messages.append(self._get_ontology_error('qualified constraint has no range'))
if self._qualified is False:
if self.value_range is not None:
error_messages.append(self._get_ontology_error('unqualified constraint has range'))
# if self._qualified is None:
# pass
return error_messages
def describe(self, context=None):
'''
Assemble and return a plain-text description of these PropertyConstraints
Return:
A single-line string describing this PropertyConstraints object.
'''
value = lambda n: 'value' if n == 1 else 'values'
phrases = []
if context is None:
context = Context()
if self.onto_class_uri:
phrases.append('Class {}'.format(context.format(self.onto_class_uri)))
if self.property_uri:
phrases.append('Property {}'.format(context.format(self.property_uri)))
else:
phrases.append('Property')
if self.max_cardinality == 0:
phrases.append('may have no values')
elif self.min_cardinality in (None, 0):
if self.max_cardinality is None:
phrases.append('may have any number of values')
else: # self.max_cardinality > 0
phrases.append('may have at most {} {}'.format(self.max_cardinality, value(self.max_cardinality)))
else: # self.min_cardinality > 0
if self.max_cardinality is None:
phrases.append('must have at least {} {}'.format(self.min_cardinality, value(self.min_cardinality)))
elif self.min_cardinality == self.max_cardinality:
phrases.append('must have exactly {} {}'.format(self.min_cardinality, value(self.min_cardinality)))
else: # self.max_cardinality > 0
phrases.append('must have between {} and {} values'.format(self.min_cardinality, self.max_cardinality))
if self.value_range:
phrases.append('of type {}'.format(self.value_range))
return ' '.join(phrases)
def _get_ontology_error(self, message):
'''
Arguments:
message A message string describing some kind of error condition
Return:
An OntologyError object using self.onto_class_uri and self.property_uri with the message
'''
return OntologyError(
message='constraint violation: ' + message,
onto_class_uri=self.onto_class_uri,
property_uri=self.property_uri)
def __members(self):
'''
Two instances of this class are equal if the __member attributes are equal
'''
return (self.onto_class_uri, self.property_uri, self.min_cardinality, self.max_cardinality, self.value_range)
def __str__(self):
return '<{} {} [{}-{}] {}>'.format(
self.onto_class_uri if self.onto_class_uri else None,
self.property_uri if self.property_uri else 'DATATYPE',
'?' if self.min_cardinality is None else self.min_cardinality,
'?' if self.max_cardinality is None else self.max_cardinality,
self.value_range if self.value_range else '?')
def __eq__(self, other):
'''
Two instances of this class are equal if the __member attributes are equal
'''
if type(other) is type(self):
return self.__members() == other.__members()
else:
return False
def __hash__(self):
'''
Two instances of this class are equal if the __member attributes are equal
'''
return hash(self.__members())
|
python
|
from mock import Mock
from flows.simulacra.youtube_dl.factory import youtube_dl_flow_factory
from flows.simulacra.youtube_dl.post import download_videos
from tests.testcase import TestCase
class TestDownloadVideos(TestCase):
def setUp(self):
self.open = self.set_up_patch(
'flows.simulacra.youtube_dl.post.open'
)
self.open.return_value.__exit__ = lambda a, b, c, d: None
self.file_handle = Mock()
self.file_handle.readlines.return_value = iter([
'some_channel1',
'some_other_channel2'
])
self.open.return_value.__enter__ = lambda x: self.file_handle
self.post_job = self.set_up_patch(
'flows.simulacra.youtube_dl.post.post_job'
)
def test_download_videos_opens_channels_file(self):
download_videos('/tmp/some_list_of_yt_channels.txt')
self.open.assert_called_once_with(
'/tmp/some_list_of_yt_channels.txt'
)
def test_download_videos_reads_lines_from_channels_file(self):
download_videos('/tmp/some_list_of_yt_channels.txt')
self.file_handle.readlines.assert_called_once_with()
def test_download_videos_posts_job_to_download_yt_videos(self):
download_videos('/tmp/some_list_of_yt_channels.txt')
expected_channels = ['some_channel1', 'some_other_channel2']
self.post_job.assert_called_once_with(
youtube_dl_flow_factory,
hierarchy=False,
factory_args=[expected_channels]
)
def test_download_videos_draws_hierarchy_of_download_yt_videos(self):
download_videos('/tmp/some_list_of_yt_channels.txt', hierarchy=True)
expected_channels = ['some_channel1', 'some_other_channel2']
self.post_job.assert_called_once_with(
youtube_dl_flow_factory,
hierarchy=True,
factory_args=[expected_channels]
)
|
python
|
# Copyright 2015 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
from mistral import exceptions as exc
from mistral import expressions as expr
from mistral import utils
from mistral.workflow import base
from mistral.workflow import commands
from mistral.workflow import data_flow
from mistral.workflow import states
from mistral.workflow import utils as wf_utils
LOG = logging.getLogger(__name__)
class DirectWorkflowController(base.WorkflowController):
"""'Direct workflow' handler.
This handler implements the workflow pattern which is based on
direct transitions between tasks, i.e. after each task completion
a decision should be made which tasks should run next based on
result of task execution.
Note, that tasks can run in parallel. For example, if there's a workflow
consisting of three tasks 'A', 'B' and 'C' where 'A' starts first then
'B' and 'C' can start second if certain associated with transition
'A'->'B' and 'A'->'C' evaluate to true.
"""
__workflow_type__ = "direct"
def _get_upstream_task_executions(self, task_spec):
return list(
filter(
lambda t_e: self._is_upstream_task_execution(task_spec, t_e),
wf_utils.find_task_executions_by_specs(
self.wf_ex,
self.wf_spec.find_inbound_task_specs(task_spec)
)
)
)
def _is_upstream_task_execution(self, t_spec, t_ex_candidate):
if not states.is_completed(t_ex_candidate.state):
return False
if not t_spec.get_join():
return not t_ex_candidate.processed
return self._triggers_join(
t_spec,
self.wf_spec.get_tasks()[t_ex_candidate.name]
)
def _find_next_commands(self, env=None):
cmds = super(DirectWorkflowController, self)._find_next_commands(
env=env
)
if not self.wf_ex.task_executions:
return self._find_start_commands()
task_execs = [
t_ex for t_ex in self.wf_ex.task_executions
if states.is_completed(t_ex.state) and not t_ex.processed
]
for t_ex in task_execs:
cmds.extend(self._find_next_commands_for_task(t_ex))
return cmds
def _find_start_commands(self):
return [
commands.RunTask(
self.wf_ex,
t_s,
self._get_task_inbound_context(t_s)
)
for t_s in self.wf_spec.find_start_tasks()
]
def _find_next_commands_for_task(self, task_ex):
"""Finds next commands based on the state of the given task.
:param task_ex: Task execution for which next commands need
to be found.
:return: List of workflow commands.
"""
cmds = []
for t_n in self._find_next_task_names(task_ex):
t_s = self.wf_spec.get_tasks()[t_n]
if not (t_s or t_n in commands.RESERVED_CMDS):
raise exc.WorkflowException("Task '%s' not found." % t_n)
elif not t_s:
t_s = self.wf_spec.get_tasks()[task_ex.name]
cmd = commands.create_command(
t_n,
self.wf_ex,
t_s,
self._get_task_inbound_context(t_s)
)
# NOTE(xylan): Decide whether or not a join task should run
# immediately.
if self._is_unsatisfied_join(cmd):
cmd.wait_flag = True
cmds.append(cmd)
# We need to remove all "join" tasks that have already started
# (or even completed) to prevent running "join" tasks more than
# once.
cmds = self._remove_started_joins(cmds)
LOG.debug("Found commands: %s" % cmds)
return cmds
# TODO(rakhmerov): Need to refactor this method to be able to pass tasks
# whose contexts need to be merged.
def evaluate_workflow_final_context(self):
ctx = {}
for t_ex in self._find_end_tasks():
ctx = utils.merge_dicts(
ctx,
data_flow.evaluate_task_outbound_context(t_ex)
)
return ctx
def is_error_handled_for(self, task_ex):
return bool(self.wf_spec.get_on_error_clause(task_ex.name))
def all_errors_handled(self):
for t_ex in wf_utils.find_error_task_executions(self.wf_ex):
tasks_on_error = self._find_next_task_names_for_clause(
self.wf_spec.get_on_error_clause(t_ex.name),
data_flow.evaluate_task_outbound_context(t_ex)
)
if not tasks_on_error:
return False
return True
def _find_end_tasks(self):
return list(
filter(
lambda t_ex: not self._has_outbound_tasks(t_ex),
wf_utils.find_successful_task_executions(self.wf_ex)
)
)
def _has_outbound_tasks(self, task_ex):
# In order to determine if there are outbound tasks we just need
# to calculate next task names (based on task outbound context)
# and remove all engine commands. To do the latter it's enough to
# check if there's a corresponding task specification for a task name.
return bool([
t_name for t_name in self._find_next_task_names(task_ex)
if self.wf_spec.get_tasks()[t_name]
])
def _find_next_task_names(self, task_ex):
t_state = task_ex.state
t_name = task_ex.name
ctx = data_flow.evaluate_task_outbound_context(task_ex)
t_names = []
if states.is_completed(t_state):
t_names += self._find_next_task_names_for_clause(
self.wf_spec.get_on_complete_clause(t_name),
ctx
)
if t_state == states.ERROR:
t_names += self._find_next_task_names_for_clause(
self.wf_spec.get_on_error_clause(t_name),
ctx
)
elif t_state == states.SUCCESS:
t_names += self._find_next_task_names_for_clause(
self.wf_spec.get_on_success_clause(t_name),
ctx
)
return t_names
@staticmethod
def _find_next_task_names_for_clause(clause, ctx):
"""Finds next tasks names.
This method finds next task(command) base on given {name: condition}
dictionary.
:param clause: Dictionary {task_name: condition} taken from
'on-complete', 'on-success' or 'on-error' clause.
:param ctx: Context that clause expressions should be evaluated
against of.
:return: List of task(command) names.
"""
if not clause:
return []
return [
t_name
for t_name, condition in clause
if not condition or expr.evaluate(condition, ctx)
]
def _remove_started_joins(self, cmds):
return list(
filter(lambda cmd: not self._is_started_join(cmd), cmds)
)
def _is_started_join(self, cmd):
if not (isinstance(cmd, commands.RunTask) and
cmd.task_spec.get_join()):
return False
return wf_utils.find_task_execution_not_state(
self.wf_ex,
cmd.task_spec,
states.WAITING
)
def _is_unsatisfied_join(self, cmd):
if not isinstance(cmd, commands.RunTask):
return False
task_spec = cmd.task_spec
join_expr = task_spec.get_join()
if not join_expr:
return False
in_task_specs = self.wf_spec.find_inbound_task_specs(task_spec)
if not in_task_specs:
return False
# We need to count a number of triggering inbound transitions.
num = len([1 for in_t_s in in_task_specs
if self._triggers_join(task_spec, in_t_s)])
# If "join" is configured as a number.
if isinstance(join_expr, int) and num < join_expr:
return True
if join_expr == 'all' and len(in_task_specs) > num:
return True
if join_expr == 'one' and num == 0:
return True
return False
# TODO(rakhmerov): Method signature is incorrect given that
# we may have multiple task executions for a task. It should
# accept inbound task execution rather than a spec.
def _triggers_join(self, join_task_spec, inbound_task_spec):
in_t_execs = wf_utils.find_task_executions_by_spec(
self.wf_ex,
inbound_task_spec
)
# TODO(rakhmerov): Temporary hack. See the previous comment.
in_t_ex = in_t_execs[-1] if in_t_execs else None
if not in_t_ex or not states.is_completed(in_t_ex.state):
return False
return list(
filter(
lambda t_name: join_task_spec.get_name() == t_name,
self._find_next_task_names(in_t_ex)
)
)
|
python
|
# Copyright 2021 Prayas Energy Group(https://www.prayaspune.org/peg/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""module which has all loaders for io layer
this module can depend only on python modules and functionstore,
filemanager, config
"""
import os
import csv
import importlib
import yaml
import click
import pandas as pd
from rumi.io import config
from rumi.io import filemanager
from rumi.io.functionstore import transpose, column, unique, concat, x_in_y
from rumi.io.functionstore import circular, valid_date
import functools
import logging
from rumi.io.logger import init_logger, get_event
from rumi.io.multiprocessutils import execute_in_process_pool
from rumi.io.multiprocessutils import execute_in_thread_pool
logger = logging.getLogger(__name__)
class LoaderError(Exception):
pass
def eval_(validation, g=None, l=None):
statement = validation['code']
if not g:
g = {}
if not l:
l = {}
try:
return eval(statement, g, l)
except Exception as e:
logger.error("Failed to evaluate statement " + statement)
logger.exception(e)
def load_param(param_name: str):
"""load parameter from file in RAW format
Parameters
----------
param_name : str
parameter name
Returns
-------
Parameter data from file
"""
specs = filemanager.get_specs(param_name)
nested = specs.get('nested')
if nested and '$' not in nested:
subfolder = specs.get('nested')
filepath = filemanager.find_filepath(param_name, subfolder)
else:
filepath = filemanager.find_filepath(param_name)
logger.debug(f"Reading {param_name} from file {filepath}")
if specs.get("optional") and not os.path.exists(filepath):
logger.warning(
f"Unable to find file for optional parameter {param_name}")
return None
if specs.get("noheader"):
return read_headerless_csv(param_name, filepath)
else:
return read_csv(param_name, filepath)
def load_dataframe(param, filepath, specs):
with open(filepath) as f:
csvf = csv.DictReader(f)
filecols = csvf.fieldnames
data = {}
for row in csvf:
columns = {k: v for k,
v in specs['columns'].items() if k in filecols}
for key, col in columns.items():
if key not in row:
if col.get("optional"):
continue
else:
raise LoaderError(
f"column {key} expected in {param}, but not found.")
else:
convert = eval(columns[key]['type'])
try:
data.setdefault(key, []).append(
convert(row[key]))
except ValueError as v:
logger.error(
f"In {param}, could not convert {row[key]} for {key}")
logger.exception(v)
data.setdefault(key, []).append(
convert(col['default']))
cols = [c for c in filecols if c in specs['columns']]
return pd.DataFrame(data)[cols]
def param_env(data):
env = {c: data[c] for c in data.columns}
#env['rows'] = data.to_dict(orient='records')
return env
def validate_each_item(param: str, spec: dict, data):
"""check if min/max boundaries are satisfied
evaluates whether data is in min/max limits
in yaml specifications
Parameters
----------
param : str
Name of parameter
spec : dict
dictinary of specifications for param
data : pd.DataFrame/[]
data for the parameters
Returns
-------
True if data is within min/max boubndaries
"""
if not isinstance(data, pd.DataFrame) and not data:
if spec.get('optional'):
logger.warning(f"No data found for optional {param}")
return True
else:
logger.error(f"No data found for {param}")
return False
if not spec.get("noheader"):
for column_, metadata in spec['columns'].items():
if column_ not in data.columns:
if metadata.get('optional'):
continue
else:
logger.error(
f"Expected column {column_} not found in {param}")
raise Exception(
f"Expected column {column_} not found in {param}")
c = data[column_]
if 'min' in spec['columns'].get(column_, {}):
m = spec['columns'][column_]['min']
default = spec['columns'][column_].get('default')
if (c < m).any() and (c[c < m] != default).any():
logger.error(
f"for {param}, {column_} should be >= {m}")
return False
if 'max' in spec['columns'].get(column_, {}):
m = spec['columns'][column_]['max']
if (c > m).any():
logger.error(
f"For {param}, {column_} should be <= {m}")
return False
return True
def validate_param(param: str, spec: dict, data, module, **kwargs):
"""validate individual parameter data
evaluates every condition form validations given
in yaml specifications
Parameters
----------
param : str
Name of parameter
spec : dict
dictinary of specifications for param
data : pd.DataFrame/[]
data for the parameters
module: string
definations from this module will be available in validation code
**kwargs:
any additional item that should be available in validation code
"""
logger.info(f"Validating {param}")
valid = validate_each_item(param, spec, data)
for validation in spec.get('validation', []):
if spec.get('noheader'):
env = {param: data}
else:
env = param_env(data)
env[param] = data
env.update({p: get_parameter(p) for p in spec.get("dependencies", [])})
load_module("rumi.io.functionstore", env)
load_module(module, env)
env.update(kwargs)
env.update(globals())
if not eval_(validation, env):
logger.error(f"Invalid data for {param}")
logger.error("{} failed".format(validation['code']))
# print(validation['message'].format(**env))
message = validation['message']
print(eval(f"f'{message}'", env))
logger.error(eval(f"f'{message}'", env))
valid = False
return valid
def load_module(module, env):
"""loads functions from given module in env
"""
m = importlib.import_module(module)
for function in dir(m):
env[function] = getattr(m, function)
def load_namespace(namespace_defs, env):
for key, value in namespace_defs.items():
env[key] = eval(value, env)
def get_params(specs, threaded=False):
def get_param(param):
try:
return get_parameter(param, validation=True)
except FileNotFoundError as fn:
logger.exception(fn)
raise fn
except filemanager.FolderStructureError as fse:
logger.exception(fse)
raise fse
except TypeError as tpe:
logger.debug(f"Automatic loading of {param} failed.")
raise tpe
param_names = [p for p in specs.keys() if p != 'global_validation']
if threaded:
values = execute_in_thread_pool(get_param, param_names)
else:
values = [get_param(p) for p in param_names]
return {p: v for p, v in zip(param_names, values)}
def global_validation(data, global_validation_):
"""validations that depend on multiple parameters
"""
validations = global_validation_['validation']
# valid = execute_in_process_pool(validate_,
# [(data, global_validation_, v) for v in validations])
valid = [validate_(data, global_validation_, v) for v in validations]
return all(valid)
def validate_(data, global_validation_, validation):
env = {}
env.update(data)
env.update(globals())
load_module(global_validation_['module'], env)
load_module("rumi.io.functionstore", env)
include = global_validation_.get("include", [])
for type_ in include:
s = filemanager.get_type_specs(type_)
env.update(get_params(s))
load_namespace(s['global_validation'].get('namespace', {}), env)
load_namespace(global_validation_.get('namespace', {}), env)
if eval_(validation, env):
return True
else:
print(validation['message'])
logger.error(f"Global validation failed for {validation['code']}")
logger.error(validation['message'])
return False
def validate_param_(param,
specs,
d,
module):
try:
if isinstance(d, type(None)):
# for complicated paramters with variable nested folders
# skip individual validation
return True
else:
return validate_param(
param, specs, d, module)
except Exception as e:
print(f"Error occured while validating {param}")
logger.error(f"Error occured while validating {param}")
logger.exception(e)
raise e
def validate_params(param_type):
""" validate all prameters
Parameters
----------
param_type: str
one of Common, Demand, Supply
specs_file: str
yaml file path
Returns
-------
returns True if all paramters are valid, else returns False
"""
logger.info(f"Validating {param_type}")
print(f"Validating {param_type}")
allspecs = dict(filemanager.get_type_specs(param_type))
gvalidation = allspecs['global_validation']
del allspecs['global_validation']
data = get_params(allspecs, threaded=True)
valid = True
module = gvalidation['module']
valid = execute_in_process_pool(validate_param_,
[(p,
allspecs[p],
v,
module) for p, v in data.items()])
return global_validation(data, gvalidation) and all(valid)
def call_loader(loaderstring, **kwargs):
functionname = loaderstring.split(".")[-1]
module = ".".join(loaderstring.split(".")[:-1])
m = importlib.import_module(module)
loader_function = getattr(m, functionname)
return loader_function(**kwargs)
def get_config_parameter(param_name):
path = filemanager.get_config_parameter_path(param_name)
return pd.read_csv(path)
def call_loader_(specs, param_name, **kwargs):
try:
if not specs.get('nested'):
d = call_loader(specs.get('loader'))
elif '$' not in specs.get('nested'):
d = call_loader(specs['loader'],
param_name=param_name,
subfolder=specs.get('nested'))
elif "$" in specs.get('nested'):
if "validation" in kwargs and kwargs['validation'] == True:
d = None
else:
d = call_loader(specs.get('loader'), **kwargs)
except FileNotFoundError as fne:
if specs.get('optional'):
d = None
else:
raise fne
return d
@functools.lru_cache(maxsize=None)
def get_parameter(param_name, **kwargs):
""" returns data for given parameter. It returns final expanded data.
except noheader kind of parameter, everything it returns is pandas
DataFrame.
for header less parameter, it returns dictionary with first item
on every row as key and list of rest items as value.
examples
--------
::
get_parameter('GDP') -> will return GDP parameter as a DataFrame
get_parameter('SubGeography1') -> will return SubGeography1 parameter as a list
get_parameter('SubGeography2') -> will return SubGeography2 parameter as a dictionary, keys are regions and values are list of states
get_parameter('BaseYearDemand',
demand_sector='D_AGRI') -> BaseYearDemand parameter for 'D_AGRI' as DataFrame
get_parameter('NumInstances',
demand_sector='D_RES',
energy_service='RES_COOL') -> NumInstances parameter for <'D_RES','RES_COOL'> as DataFrame
:param: param_name
:param: `**kwargs` - variable number of named arguments
:returns: DataFrame or list or dictionary
"""
#logger.debug("Getting Parameter " + param_name + str(kwargs))
specs = filemanager.get_specs(param_name)
if specs.get('loader'):
d = call_loader_(specs, param_name, **kwargs)
else:
d = load_param(param_name)
if d is None:
r = d
elif specs.get("noheader"):
r = reformat_headerless(param_name, specs, d)
else:
r = d
return filter_param(param_name, r)
def reformat_headerless(param_name, specs, d):
"""Formate headerless data to list/dictionary/string as required
"""
if specs.get("map"):
firstcolumn = column(d, 0)
if not unique(firstcolumn):
repeating = set(
[c for c in firstcolumn if firstcolumn.count(c) > 1])
logger.warning(
f"First column in {param_name} should not repeat, but repeating rows discovered for {repeating}")
logger.warning(
f"For {param_name} last item from repeating rows of {repeating} will be considered")
r = {key: d[r][1:] for r, key in enumerate(column(d, 0))}
elif specs.get("list"):
r = d[0]
if len(d) > 1:
logger.warning(
f"Parameter {param_name} expects only one row but found multiple rows. Only first row will be considered")
elif len(d) == 1 and len(d[0]) == 1:
r = d[0][0]
else:
r = d
return r
def filter_param(param_name, param_data):
"""This functions filters parameter based on scheme given
in yaml specifications.
caution: this function creates a circular dependency by
calling get_parameter again. SO IF SELF REFERENCING DEPENDENCIES ARE GIVEN
IT MIGHT RESULT IN RECURSION ERROR.
"""
specs = filemanager.get_specs(param_name)
if specs.get("filterqueries") and isinstance(param_data, pd.DataFrame):
logger.debug(f"Filtering parameter {param_name}")
dependencies = specs.get("dependencies")
dependencies_data = {p: get_parameter(p) for p in dependencies}
queries = specs.get("filterqueries")
dependencies_data['param_data'] = param_data
queries_ = [f"( {q} )" for q in queries]
statement = "param_data.query(f\"{0}\")".format(" & ".join(queries_))
param_data = eval(statement, dependencies_data) # .copy()
if len(param_data) == 0:
logger.warning(
f"Filtering of {param_name} has resulted in empty data")
return param_data
def find_cols(filepath, columnsdata):
"""
find columns common between column names provided in specifications
and those given in file.
"""
with open(filepath) as f:
csvf = csv.reader(f)
columnsf = next(csvf)
# columns as per order in file
return [c for c in columnsf if c in columnsdata]
def read_headerless_csv(param_name, filepath):
try:
with open(filepath) as f:
csvf = csv.reader(f)
return [row for row in csvf]
except ValueError as v:
logger.error(f"Unable to parse data for {param_name}")
logger.exception(v)
raise v
except FileNotFoundError as fne:
logger.error(f"Unable to find file for {param_name}")
logger.exception(fne)
raise fne
except Exception as e:
logger.error(f"Falied to read parameter {param_name}")
logger.exception(e)
raise e
def read_csv(param_name, filepath):
"""read dataframe using pandas.read_csv, but with appropriate types
"""
specs = filemanager.get_specs(param_name)
columndata = specs['columns']
converters = {c: eval(data['type']) for c, data in columndata.items()}
try:
cols = find_cols(filepath, columndata)
return pd.read_csv(filepath,
usecols=cols,
converters=converters,
na_values="")
except ValueError as v:
logger.error(f"Unable to parse data for {param_name}")
logger.exception(v)
raise v
except FileNotFoundError as fne:
if specs.get('optional'):
logger.warning(
f"Unable to find file for optional parameter {param_name}")
else:
logger.error(f"Unable to find file for {param_name}")
logger.exception(fne)
raise fne
except Exception as e:
logger.error(f"Falied to read parameter {param_name}")
logger.exception(e)
raise e
def sanity_check_cmd_args(param_type: str,
model_instance_path: str,
scenario: str,
logger_level: str,
numthreads: int,
cmd='rumi_validate'):
def check_null(param_value, param_name):
if not param_value:
print(f"Command line parameter, {param_name} is compulsory")
return True
else:
return False
valid = False
if check_null(param_type, "-p/--param_type") or\
check_null(model_instance_path, "-m/--model_instance_path") or\
check_null(scenario, "-s/--scenario"):
pass
elif param_type not in ["Common", "Demand", "Supply"]:
print(f"Invalid param_type '{param_type}'")
print("param_type can be one of Common, Demand or Supply")
elif not os.path.exists(model_instance_path) or not os.path.isdir(model_instance_path):
print(f"Invalid model_instance_path '{model_instance_path}'")
print("give appropriate folder path")
elif logger_level not in ["INFO", "WARN", "DEBUG", "ERROR"]:
print(f"Invalid logger_level '{logger_level}'")
print("logger_level can be one of INFO,WARN,DEBUG,ERROR.")
elif numthreads <= 0:
print(f"Invalid numthreads '{numthreads}'")
print("numthreads can be positive integer")
else:
valid = True
if not valid:
print(f"run {cmd} --help for more help")
return valid
def rumi_validate(param_type: str,
model_instance_path: str,
scenario: str,
logger_level: str,
numthreads: int):
"""Function to validate Common or Demand or Supply
"""
global logger
if not sanity_check_cmd_args(param_type,
model_instance_path,
scenario,
logger_level,
numthreads):
return
config.initialize_config(model_instance_path, scenario)
init_logger(param_type, logger_level)
config.set_config("numthreads", str(numthreads))
logger = logging.getLogger("rumi.io.loaders")
try:
if (validate_params(param_type)):
logger.info(f"{param_type} Validation succeeded")
print(f"{param_type} Validation succeeded")
else:
logger.error(f"{param_type} Validation failed")
print(f"{param_type} Validation failed")
finally:
get_event().set()
@click.command()
@click.option("-p", "--param_type",
help="Parameter type to validate. can be one of Common, Demand or Supply")
@click.option("-m", "--model_instance_path",
help="Path where model instance is stored")
@click.option("-s", "--scenario",
help="Name of Scenario")
@click.option("-l", "--logger_level",
help="Level for logging,one of INFO,WARN,DEBUG,ERROR. (default: INFO)",
default="INFO")
@click.option("-t", "--numthreads",
help="Number of threads/processes (default: 2)",
default=2)
def main(param_type: str,
model_instance_path: str,
scenario: str,
logger_level: str,
numthreads: int):
"""Command line interface for data validation.
"""
rumi_validate(param_type,
model_instance_path,
scenario,
logger_level,
numthreads)
if __name__ == "__main__":
main()
|
python
|
import gym
import numpy as np
from gym.spaces import Discrete
from gym_holdem.holdem import Table, Player, BetRound
from pokereval_cactus import Card
class HoldemEnv(gym.Env):
def __init__(self, player_amount=4, small_blind=25, big_blind=50, stakes=1000):
super().__init__()
self.player_amount = player_amount
self.small_blind = small_blind
self.big_blind = big_blind
self.stakes = stakes
self.table = None
self.done = True
# 0 -> FOLD
# 1 -> CALL || CHECK
# 2 -> ALL_IN
# 3..(stakes * player_amount + 2) -> bet_amount + 2
self.action_space = Discrete(self.stakes_in_game + 3)
self.players = [Player(stakes, name=str(i)) for i in range(player_amount)]
self.players_last_stakes = [stakes for _ in range(player_amount)]
self.debug = {}
self.last_action = (-1, None)
self.reset()
def step(self, action: int):
dbg_end_round = False
dbg_new_round = False
dbg_winners = []
dbg_new_bet_round = False
player = self.table.next_player
if action not in self.valid_actions:
raise ValueError(f"Action {action} is not valid in this context")
self._take_action(action, player)
if self.table.all_players_called():
self.table.start_next_bet_round()
dbg_new_bet_round = True
while self.table.bet_round == BetRound.SHOWDOWN:
dbg_end_round = True
dbg_winners = self.table.end_round()
if len(self.table.players) >= 2:
self.table.new_round()
dbg_new_round = True
if self.table.all_players_called():
self.table.start_next_bet_round()
else:
self.done = True
idx = self.players.index(player)
reward = player.stakes - self.players_last_stakes[idx]
self.players_last_stakes[idx] = player.stakes
self.debug = {
"new_bet_round": dbg_new_bet_round,
"new_round": dbg_new_round,
"end_round": dbg_end_round,
"winners": dbg_winners
}
self.last_action = action, player
return self.observation_space(player), reward, self.done, self.debug
def reset(self):
self.done = False
self.table = Table(small_blind=self.small_blind, big_blind=self.big_blind)
for idx, p in enumerate(self.players):
p.reset(stakes=self.stakes)
p.table = self.table
self.players_last_stakes[idx] = self.stakes
self.table.players = self.players[:]
self.table.new_round()
return self.observation_space(self.table.next_player)
@staticmethod
def _take_action(action, player):
if action == 0:
player.fold()
elif action == 1:
player.call_check()
elif action == 2:
player.action_from_amount(player.stakes)
else:
player.raise_bet(action - 2)
@property
def valid_actions(self):
player = self.table.next_player
to_call = player.to_call_amount()
min_bet_amount = to_call + self.table.last_bet_raise_delta
max_bet_amount = player.stakes
# 0 -> FOLD
# 1 -> CALL || CHECK
actions = [0, 1, 2]
if min_bet_amount <= max_bet_amount:
possible_bet_actions = range(min_bet_amount + 2, max_bet_amount + 3)
actions += possible_bet_actions
# else:
# if player.stakes > to_call:
# actions.append(player.stakes)
return np.array(actions)
def observation_space(self, player):
max_card_value = 268471337
hand = [card / (max_card_value + 1) for card in player.hand]
board = [card / (max_card_value + 1) for card in self.table.board]
for _ in range(len(self.table.board), 5):
board.append(0)
pot = self.table.pot_value() / (self.stakes_in_game + 1)
player_stakes = player.stakes / (self.stakes_in_game + 1)
other_players_stakes = []
for p in self.players:
if p == player:
continue
other_players_stakes.append(p.stakes / (self.stakes_in_game + 1))
active_false = 0
active_true = 0.1
player_active = active_true if player in self.table.active_players else active_false
other_players_active = []
for p in self.players:
if p == player:
continue
active = active_true if p in self.table.active_players else active_false
other_players_active.append(active)
observation = hand + board + [pot, player_stakes] + other_players_stakes + [
player_active] + other_players_active
return np.array(observation)
@property
def table_players(self):
return self.table.players
@property
def next_player(self):
return self.table.next_player
@property
def stakes_in_game(self):
return self.player_amount * self.stakes
def render(self, mode="human", close=False):
# for p in self.table.active_players:
# print(str(p))
# print(f"Board: {Card.print_pretty_cards(self.table.board)}")
# print(f"Bet round: {bet_round_to_str(self.table.bet_round)}")
if self.last_action[0] == 0:
print(f"{self.last_action[1].name}: FOLDED")
elif self.last_action[0] == 1:
print(f"{self.last_action[1].name}: CALLED")
elif self.last_action[0] == 2:
print(f"{self.last_action[1].name}: ALL_IN")
else:
print(f"{self.last_action[1].name}: RAISED({self.last_action[0] - 2})")
if self.debug["new_bet_round"]:
print("### NEW BET ROUND ###")
print(f"Community Cards: {Card.print_pretty_cards(self.table.board)}")
if self.debug["end_round"]:
print("### END ROUND ###")
all_winners = [[w.name for w in winners] for winners in self.debug["winners"]]
print(f"WINNERS: {all_winners}")
if self.debug["new_round"]:
print("### NEW ROUND ###")
for p in self.table.players:
print(f"Player {p.name}: hand={Card.print_pretty_cards(p.hand)}, stakes={p.stakes}, "
f"bet={p.bet}, has_called={p.has_called}, has_folded={p not in self.table.active_players}, "
f"dealer={not self.done and self.table.players[self.table.dealer] == p}")
if self.done:
print("### GAME ENDED - RESETTING ###")
|
python
|
# Generated by Django 4.0.1 on 2022-01-27 07:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='bookinstance',
options={'ordering': ['due_back']},
),
migrations.AddField(
model_name='book',
name='language',
field=models.CharField(blank=True, choices=[('EN', 'English'), ('FR', 'French'), ('JP', 'Japanese')], default='EN', help_text='Select language of teh book', max_length=2),
),
migrations.AddField(
model_name='bookinstance',
name='status',
field=models.CharField(blank=True, choices=[('m', 'Maintenance'), ('o', 'On loan'), ('a', 'Available'), ('r', 'Reserved')], default='m', help_text='Book availability', max_length=1),
),
]
|
python
|
"""
return 0 = Success
return 1 = Login = 'Invalid username or password!', Register = 'User is already'
return 2 = 'Something went wrong'
"""
from connect_db import megatronDBC
# login sys / ระบบล็อคอิน
def loginSYS(userInput, passInput):
try:
cursor = megatronDBC.cursor()
selectDB = "SELECT * FROM users;"
cursor.execute(selectDB)
result = cursor.fetchall()
for x in result:
if userInput == x[1] and passInput == x[2]:
return 0, x[1], x[3]
return 1
except:
return 2
# end login sys จบระบบล็อคอิน
# register sys / ระบบสมัครสมาชิก
def registerSYS(userInput, passInput):
try:
cursor = megatronDBC.cursor()
selectDB = "SELECT * FROM users;"
cursor.execute(selectDB)
result = cursor.fetchall()
for x in result:
if userInput == x[1]:
return 1
insertDB = f"INSERT INTO users (username, password, level) values ('{userInput}', '{passInput}', 0);"
cursor.execute(insertDB)
megatronDBC.commit()
return 0
except:
return 2
# end register sys / จบระบบสมัครสมาชิก
|
python
|
from datetime import timedelta
from app import hackathon_variables
from django.db import models
from django.utils import timezone
from user.models import User
class ItemType(models.Model):
"""Represents a kind of hardware"""
# Human readable name
name = models.CharField(max_length=50, unique=True)
# Image of the hardware
image = models.FileField(upload_to='hw_images/')
# Description of this hardware
# what is it used for? which items are contained in the package?
description = models.TextField()
def get_borrowable_items(self):
""" Get items not borrowed already """
availables = Item.objects.filter(item_type=self, available=True)
borrowings = Borrowing.objects.filter(item__item_type=self, return_time__isnull=True)
return availables.exclude(id__in=[x.item.id for x in borrowings])
def get_available_count(self):
ava_count = Item.objects.filter(item_type=self, available=True).count()
req_count = self.get_requested_count()
borrowed_count = self.get_borrowed_count()
return ava_count - req_count - borrowed_count
def get_requested_count(self):
return Request.objects.get_active_by_item_type(self).count()
def get_borrowed_count(self):
return Borrowing.objects.get_active_by_item_type(self).count()
def get_unavailable_count(self):
return Item.objects.filter(item_type=self, available=False).count()
def make_request(self, user):
req = Request(item_type=self, user=user)
req.save()
def __str__(self):
return self.name
class Item(models.Model):
"""Represents a real world object identified by label"""
# Hardware model/type
item_type = models.ForeignKey(ItemType, on_delete=models.CASCADE)
# Identifies a real world object
label = models.CharField(max_length=20, unique=True)
# Is the item available?
available = models.BooleanField(default=True)
# Any other relevant information about this item
comments = models.TextField(blank=True, null=True)
def can_be_borrowed(self):
return Borrowing.objects.filter(return_time__isnull=True, item=self).count() == 0
def __str__(self):
return '{} ({})'.format(self.label, self.item_type.name)
class BorrowingQuerySet(models.QuerySet):
def get_active(self):
return self.filter(return_time__isnull=True)
def get_returned(self):
return self.filter(return_time__isnull=False)
def get_active_by_item_type(self, item_type):
return self.filter(return_time__isnull=True, item__item_type=item_type)
def get_active_by_user(self, user):
return self.filter(return_time__isnull=True, user=user)
class Borrowing(models.Model):
"""
The 'item' has been borrowed to the 'user'
"""
objects = BorrowingQuerySet.as_manager()
user = models.ForeignKey(User, on_delete=models.DO_NOTHING)
item = models.ForeignKey(Item, on_delete=models.DO_NOTHING)
# Instant of creation
picked_up_time = models.DateTimeField(auto_now_add=True)
# If null: item has not been returned yet
return_time = models.DateTimeField(null=True, blank=True)
# Borrowing handled by
borrowing_by = models.ForeignKey(User, related_name='hardware_admin_borrowing', on_delete=models.DO_NOTHING)
# Return handled by (null until returned)
return_by = models.ForeignKey(User, related_name='hardware_admin_return', null=True, blank=True,
on_delete=models.SET_NULL)
def get_picked_up_time_ago(self):
return str(timezone.now() - self.picked_up_time)
def get_return_time_ago(self):
return str(timezone.now() - self.return_time)
def is_active(self):
return self.return_time is None
def __str__(self):
return '{} ({})'.format(self.item.item_type.name, self.user)
class RequestQuerySet(models.QuerySet):
def get_active(self):
delta = timedelta(minutes=hackathon_variables.HARDWARE_REQUEST_TIME)
threshold = timezone.now() - delta
return self.filter(borrowing__isnull=True, request_time__gte=threshold)
def get_borrowed(self):
return self.filter(borrowing__isnull=False)
def get_expired(self):
delta = timedelta(minutes=hackathon_variables.HARDWARE_REQUEST_TIME)
threshold = timezone.now() - delta
return self.filter(borrowing__isnull=True, request_time__lt=threshold)
def get_active_by_user(self, user):
delta = timedelta(minutes=hackathon_variables.HARDWARE_REQUEST_TIME)
threshold = timezone.now() - delta
return self.filter(borrowing__isnull=True, request_time__gte=threshold, user=user)
def get_active_by_item_type(self, item_type):
delta = timedelta(minutes=hackathon_variables.HARDWARE_REQUEST_TIME)
threshold = timezone.now() - delta
return self.filter(borrowing__isnull=True, request_time__gte=threshold, item_type=item_type)
class Request(models.Model):
"""
Represents reservation of an item
of type 'item_type' done by 'user'
"""
objects = RequestQuerySet.as_manager()
# Requested item type
item_type = models.ForeignKey(ItemType, on_delete=models.CASCADE)
# Hacker that made the request
user = models.ForeignKey(User, on_delete=models.CASCADE)
# Borrowing derived from this request
borrowing = models.ForeignKey(Borrowing, null=True, blank=True, on_delete=models.CASCADE)
# Instant of creation
request_time = models.DateTimeField(auto_now_add=True)
def is_active(self):
delta = timedelta(minutes=hackathon_variables.HARDWARE_REQUEST_TIME)
remaining = delta - (timezone.now() - self.request_time)
return not self.borrowing and remaining.total_seconds() > 0
def get_remaining_time(self):
delta = timedelta(minutes=hackathon_variables.HARDWARE_REQUEST_TIME)
remaining = delta - (timezone.now() - self.request_time)
if self.borrowing:
return "Borrowed"
elif remaining.total_seconds() < 0:
return "Expired"
else:
return str(remaining)
def __str__(self):
return '{} ({})'.format(self.item_type, self.user)
|
python
|
from __future__ import print_function
from builtins import object
import copy
import numpy as np
class Observer(object):
def __init__(self):
pass
def update(self, state):
pass
def reset(self):
pass
class Printer(object):
def __init__(self, elems=1, msg=None, skip=1):
self.elems = elems
self.msg = msg or "{}, "*(self.elems-1) + '{}'
self.count = 0
self.skip = skip
def update(self, sample):
if self.count % self.skip == 0:
if hasattr(sample, '__iter__'):
msg = self.msg.format(*[i for i in sample])
else:
msg = self.msg.format(sample)
print(self.count, ':', msg)
self.count += 1
def reset(self):
self.count = 0
class TimeAutoCorrelation(Observer):
def __init__(self):
self.arr = []
def update(self, sample):
self.arr.append(sample)
def get_correlation(self):
npn = np.array(self.arr)
fn = np.fft.fftn(npn)
return np.real(np.fft.ifftn(fn*fn.conj()))
def reset(self):
self.arr = []
class MeanObserver(Observer):
def __init__(self, block=None):
self.s = block or np.s_[:]
self.dat = None
self.n = 0
def update(self, sample):
if self.dat is None:
self.n = 1
self.dat = sample[self.s]
else:
self.n += 1
self.dat = self.dat + (sample[self.s] - self.dat) / self.n
def get_mean(self):
return self.dat
def reset(self):
self.dat = None
class CovarianceObserver(Observer):
def __init__(self, block=None):
self.s = block or np.s_[:]
self.mean = None
self.cov = None
self.n = 0
def update(self, sample):
if self.mean is None:
self.n = 1
self.mean = sample[self.s]
self.cov = 0*np.outer(sample[self.s], sample[self.s])
else:
self.n += 1
self.mean = self.mean + (sample[self.s] - self.mean) / self.n
self.cov = (self.n-1.0)*self.cov
self.cov += (self.n-1.0)/self.n*np.outer(sample[self.s]-self.mean,sample[self.s]-self.mean)
self.cov *= 1.0/self.n
def get_mean(self):
return self.mean
def get_covariance(self):
return self.cov
def reset(self):
self.mean = None
class HistogramObserver(Observer):
def __init__(self, block=None):
self.s = block if block is not None else np.s_[:]
self.dat = []
def update(self, sample):
self.dat.append(copy.copy(sample.state[self.s]))
def get_histogram(self):
return np.array(self.dat)
def reset(self):
self.dat = []
|
python
|
# proxy module
from __future__ import absolute_import
from chaco.abstract_plot_data import *
|
python
|
__author__ = "Doug Napoleone"
__version__ = "0.0.1"
__email__ = '[email protected]'
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import logging
from flask import Flask
from flask_debugtoolbar import DebugToolbarExtension
app = Flask(__name__)
# set a 'SECRET_KEY' to enable the Flask session cookies
app.config['SECRET_KEY'] = '<replace with a secret key>'
@app.route("/")
def index():
# NOTE: Need tab body: "Could not insert debug toolbar. </body> tag not found in response."
return "<body>Hello World!</body>"
if __name__ == '__main__':
app.debug = True
if app.debug:
logging.basicConfig(level=logging.DEBUG)
toolbar = DebugToolbarExtension(app)
# Localhost
# port=0 -- random free port
# app.run(port=0)
app.run(
port=5000
)
# # Public IP
# app.run(host='0.0.0.0')
|
python
|
from loggers import Actions
from stopping_decision_makers.base_decision_maker import BaseDecisionMaker
class SequentialNonrelDecisionMaker(BaseDecisionMaker):
"""
A concrete implementation of a decision maker.
Returns True iif the depth at which a user is in a SERP is less than a predetermined value.
"""
def __init__(self, search_context, logger, nonrelevant_threshold=3):
super(SequentialNonrelDecisionMaker, self).__init__(search_context, logger)
self.__nonrelevant_threshold = nonrelevant_threshold # The threshold; get to this point, we stop in the current SERP.
def decide(self):
"""
If the user's current position in the current SERP is < the maximum depth, look at the next snippet in the SERP.
Otherwise, a new query should be issued.
"""
counter = 0
examined_snippets = self._search_context.get_examined_snippets()
for snippet in examined_snippets:
judgment = snippet.judgment
if judgment == 0:
counter = counter + 1
if counter == self.__nonrelevant_threshold:
return Actions.QUERY
else:
counter = 0 # Break the sequence; found something relevant, reset the counter.
return Actions.SNIPPET
|
python
|
from django.db import models
from django.contrib.auth.models import User
from course.models import Course
# Create your models here.
class Answer(models.Model):
user = models.ForeignKey(User, name="user", on_delete=models.CASCADE)
answer = models.TextField()
def __str__(self) -> str:
return self.answer
class Question(models.Model):
question = models.TextField()
user = models.ForeignKey(User, name="quser", on_delete=models.CASCADE)
answer = models.ManyToManyField(Answer, name="answer")
course = models.ForeignKey(Course, name="course", on_delete=models.CASCADE)
def __str__(self) -> str:
return self.question
|
python
|
# Time: O(k * log(min(n, m, k))), with n x m matrix
# Space: O(min(n, m, k))
from heapq import heappush, heappop
class Solution(object):
def kthSmallest(self, matrix, k):
"""
:type matrix: List[List[int]]
:type k: int
:rtype: int
"""
kth_smallest = 0
min_heap = []
def push(i, j):
if len(matrix) > len(matrix[0]):
if i < len(matrix[0]) and j < len(matrix):
heappush(min_heap, [matrix[j][i], i, j])
else:
if i < len(matrix) and j < len(matrix[0]):
heappush(min_heap, [matrix[i][j], i, j])
push(0, 0)
while min_heap and k > 0:
kth_smallest, i, j = heappop(min_heap)
push(i, j + 1)
if j == 0:
push(i + 1, 0)
k -= 1
return kth_smallest
|
python
|
# -*- coding: utf-8 -*-
from . import main_menu, signals, slides, widgets # noqa
|
python
|
# -*- coding: utf-8 -*-
'''
@author: [email protected]
说明:(1)程序仅供技术学习,严禁用于任何商业用途
(2)对于抓取内容及其分析,请勿乱发布,后果自负
(3)软件可能有bug,如果发现望及时告知
(4)成交数据,需要提供修改账户密码,请查找 admin 或者password 修改
'''
import sqlite3
import os
from ErShouFangDbHelper import GetXiaoquNianDai
from ErShouFangDbHelper import GetCountFromSummary
from ChengJiaoDbHelper import GetMaxRiQi
from AdvancedAnalysisErShouFangMain import GetPriceFromDbList
import time
import datetime
def GetXiaoquData(dbname):
xiaoqulist={}
conn = sqlite3.connect("database/"+dbname+".db")
sqlstring=u'select DISTINCT xiaoqu from chengjiao '
cursor=conn.execute(sqlstring)
for row in cursor:
xiaoqulist[row[0]]={}
xiaoqulist[row[0]]['qu']=''
xiaoqulist[row[0]]['zhen']=''
xiaoqulist[row[0]]['min']=None
xiaoqulist[row[0]]['max']=None
xiaoqulist[row[0]]['range']=0.0
conn.close()
return xiaoqulist
def Analysis(dbname):
'''key $ xiaoqu $ fangxing $ mianji $ qu $ zhen $ zongjia $ danjia $ manji $ riqi'''
conn = sqlite3.connect("database/"+dbname+".db")
sqlstring='select qu,zhen,sum(zongjia),sum(mianji) from chengjiao group by zhen order by qu'
cursor=conn.execute(sqlstring)
totalprice=0.0
totalmianji=0.0
for row in cursor:
qu=row[0]
zhen=row[1]
totalprice=row[2]
totalmianji=row[3]
if(totalmianji!=0):
print qu,zhen,totalprice,totalmianji,totalprice/totalmianji
conn.close()
def TrendShanghaiMonth(dbname):
'''key $ xiaoqu $ fangxing $ mianji $ qu $ zhen $ zongjia $ danjia $ manji $ riqi'''
conn = sqlite3.connect("database/"+dbname+".db")
sqlstring='select qu,zhen,sum(zongjia),sum(mianji) from chengjiao group by zhen order by qu'
cursor=conn.execute(sqlstring)
totalprice=0.0
totalmianji=0.0
for row in cursor:
qu=row[0]
zhen=row[1]
totalprice=row[2]
totalmianji=row[3]
if(totalmianji!=0):
print qu,zhen,totalprice,totalmianji,totalprice/totalmianji
conn.close()
def TrendZhenMonth(dbname):
conn = sqlite3.connect("database/"+dbname+".db")
sqlstring=u'select qu,zhen,count(key),sum(zongjia),sum(mianji),strftime(\'%Y%m\',riqi) \
from chengjiao group by zhen,strftime(\'%Y%m\',riqi) '
cursor=conn.execute(sqlstring)
totalprice=0.0
totalmianji=0.0
zhenlist=[]
for row in cursor:
qu=row[0]
zhen=row[1]
count=row[2]
totalprice=row[3]
totalmianji=row[4]
average=totalprice/totalmianji
riqi=row[5]
zhenlist.append([qu,zhen,count,totalprice,totalmianji,average,riqi])
conn.close()
zhendata={}
for zhen in zhenlist:
key=zhen[1]
zhendata[key]={}
zhendata[key]['qu']=''
zhendata[key]['zhen']=''
zhendata[key]['min']=None
zhendata[key]['max']=None
zhendata[key]['range']=0.0
zhendata[key]['count']=0
show=[u'张江',u'唐镇',u'三林',u'川沙',u'北蔡',u'祝桥 ']
if key.strip() in show:
print zhen[0],zhen[1],zhen[2],zhen[5],zhen[6]
for zhen in zhenlist:
key=zhen[1]
zhendata[key]['qu']=zhen[0];
zhendata[key]['zhen']=zhen[1]
junjia=zhen[5]
if zhendata[key]['min']==None or \
zhendata[key]['max']==None:
zhendata[key]['min']=junjia
zhendata[key]['max']=junjia
if junjia<zhendata[key]['min']:
zhendata[key]['min']=junjia
zhendata[key]['range']=zhendata[key]['max']-zhendata[key]['min']
elif junjia>zhendata[key]['max']:
zhendata[key]['max']=junjia
zhendata[key]['range']=zhendata[key]['max']-zhendata[key]['min']
zhendata[key]['count']+=zhen[2]
f=open('report\\chenjiao\\chengjiaoTrendZhen.txt','w')
f.write('qu$zhen$shuliang$min$max$range\n')
for key in zhendata:
txt=('%s $ %s $ %s $ %s $ %s $ %s\n')%(zhendata[key]['qu'],zhendata[key]['zhen'],\
zhendata[key]['count'],zhendata[key]['min'],zhendata[key]['max'],zhendata[key]['range'])
f.write(txt.encode('utf-8'))
f.close()
def TrendXiaoQuMonth(dbname,xiaoqudata):
xiaoqulist=GetXiaoquNianDai('20170107')
conn = sqlite3.connect("database/"+dbname+".db")
sqlstring=u'select qu,zhen,xiaoqu,count(key),sum(zongjia),sum(mianji),strftime(\'%m\',riqi) \
from chengjiao group by xiaoqu,strftime(\'%m\',riqi) '
cursor=conn.execute(sqlstring)
totalprice=0.0
totalmianji=0.0
for row in cursor:
qu=row[0]
zhen=row[1]
xiaoqu=row[2]
count=row[3]
totalprice=row[4]
totalmianji=row[5]
riqi=row[6]
niandai=''
junjia=totalprice/totalmianji
if xiaoqulist.has_key(xiaoqu):
niandai=xiaoqulist[xiaoqu]
print xiaoqu,niandai
xiaoqudata[xiaoqu]['niandai']=niandai
xiaoqudata[xiaoqu]['qu']=qu;
xiaoqudata[xiaoqu]['zhen']=zhen
if xiaoqudata[xiaoqu]['min']==None or \
xiaoqudata[xiaoqu]['max']==None:
xiaoqudata[xiaoqu]['min']=junjia
xiaoqudata[xiaoqu]['max']=junjia
if junjia<xiaoqudata[xiaoqu]['min']:
xiaoqudata[xiaoqu]['min']=junjia
xiaoqudata[xiaoqu]['range']=xiaoqudata[xiaoqu]['max']-xiaoqudata[xiaoqu]['min']
elif junjia>xiaoqudata[xiaoqu]['max']:
xiaoqudata[xiaoqu]['max']=junjia
xiaoqudata[xiaoqu]['range']=xiaoqudata[xiaoqu]['max']-xiaoqudata[xiaoqu]['min']
conn.close()
f=open('report\\chenjiao\\chengjiaoTrendXiaoqu.txt','w')
f.write('qu$zhen$xiaoqu$niandai$min$max$range\n')
for key in xiaoqudata:
txt=('%s$%s$%s$%s$%s$%s$%s\n')%(xiaoqudata[key]['qu'],xiaoqudata[key]['zhen'],\
key,xiaoqudata[key]['niandai'],xiaoqudata[key]['min'],\
xiaoqudata[key]['max'],xiaoqudata[key]['range'])
f.write(txt.encode('utf-8'))
f.close()
def ChenJiaoShangHaiPerMonth(dbname='chengjiao'):
print '月份 均价 套数'
conn = sqlite3.connect("database/"+dbname+".db")
sqlstring=u'select count(key),sum(zongjia),sum(mianji),strftime(\'%Y%m\',riqi) \
from chengjiao group by strftime(\'%Y%m\',riqi) '#%d
cursor=conn.execute(sqlstring)
totalprice=0.0
totalmianji=0.0
count=0
t=''
for row in cursor:
count=row[0]
totalprice=row[1]
totalmianji=row[2]
t=row[3]
print ('%s %-11.4f %s')%(t,totalprice/totalmianji,count)
def get_latest_90_count(date,dbname='chengjiao'):
date = time.strptime(date,"%Y-%m-%d")
date=datetime.datetime(date[0],date[1],date[2])
lastdate= date + datetime.timedelta(days = -90)
print date,'九十天前日期;',lastdate
conn = sqlite3.connect("database/"+dbname+".db")
sqlstring=(u'select count(key) from chengjiao where riqi>=\'%s\'')%(lastdate)
cursor=conn.execute(sqlstring)
c=0
for row in cursor:
c=row[0]
conn.close()
return c
def get_chengjiao_count(where,dbname='chengjiao'):
conn = sqlite3.connect("database/"+dbname+".db")
sqlstring=(u'select count(key) from chengjiao where %s')%(where)
cursor=conn.execute(sqlstring)
c=0
for row in cursor:
c=row[0]
conn.close()
return c
def get_90_count(date,dbname='chengjiao'):
date = time.strptime(date,"%Y-%m-%d")
date=datetime.datetime(date[0],date[1],date[2])
lastdate= date + datetime.timedelta(days = -90)
lastdate=lastdate.strftime("%Y-%m-%d")
#print "90 day before",date,lastdate
conn = sqlite3.connect("database/"+dbname+".db")
sqlstring=(u'select count(key) from chengjiao where riqi=\'%s\'')%(lastdate)
cursor=conn.execute(sqlstring)
c=0
for row in cursor:
c=row[0]
conn.close()
return c
def get_latest_15_count():
date=time.strftime('%Y-%m-%d',time.localtime())
datetime=time.strftime('%Y%m%d',time.localtime())
count= GetCountFromSummary(datetime)
count90= get_latest_90_count(date)
print '最近90天成交套数 ,统计到:',GetMaxRiQi(),count90
delta=count-count90
countbetween=get_chengjiao_count(' riqi>=\'2017-02-01\' and riqi <= \'2017-01-08\'')
print delta+countbetween
def get_day_count(date=None):
if date==None:
date=time.strftime('%Y-%m-%d',time.localtime())
date = time.strptime(date,"%Y-%m-%d")
date=datetime.datetime(date[0],date[1],date[2])
predate= date + datetime.timedelta(days = -1)
day=date.strftime("%Y%m%d")
preday=predate.strftime("%Y%m%d")
preday2=predate.strftime("%Y-%m-%d")
countday=GetCountFromSummary(day)
countpreday=GetCountFromSummary(preday)
coutpreday90=get_90_count(preday2)
#print countday,countpreday,coutpreday90
print preday2,"chenjiao",countday-(countpreday-coutpreday90)
def GetChengJiaoPreviousData(dbname='chengjiao'):
conn = sqlite3.connect("database/"+dbname+".db")
sqlstring=(u'select * from chengjiao order by riqi desc')
cursor=conn.execute(sqlstring)
chengjiao=[]
count=1
with open('report/chengjiaoPreviousJiaGe.txt','w+') as chenjiaofile:
for row in cursor:
chen=[]
chen.append(row[0]) #key
chen.append(row[6]) #zongjia
chen.append(row[1]) #xiaoqu
chen.append(row[2]) #fang
chen.append(row[3]) #mianji
chen.append(row[4]) #qu
chen.append(row[5]) #zhen
if row[5] == u'北3蔡':
continue
chen.append(row[7]) #danjia
chen.append(row[8])
chen.append(row[9]) #riqi
chen.append(GetPriceFromDbList(row[0]))
chengjiao.append(chen)
print chen[0],chen[1],chen[2],chen[3],chen[4],chen[5],chen[6],chen[7],chen[8],chen[9],chen[10]
count+=1
lastprice=0
change=0
daikan=0
if len(chen[10])>0:
lastprice=int(chen[10][1])
daikan=int(chen[10][0])
change=int(chen[1])-lastprice
txt=('%s $ %s $ %s $ %s $ %s $ %s $ %s $ %s $ %s $ %s $ %s $ %s $ %s $ %s\n')%(chen[0],chen[2],chen[1],lastprice,change,daikan,chen[3],\
chen[4],chen[5],chen[6],chen[7],chen[8],chen[9],chen[10])#chen[10] pricelist latest to oldest
chenjiaofile.write(txt)
chenjiaofile.flush()
def GetChenJiaoPerDay():
#显示的数据晚一天
date=time.strftime('%Y-%m-%d',time.localtime())
get_day_count(date)
date = time.strptime(date,"%Y-%m-%d")
while(True):
date=datetime.datetime(date[0],date[1],date[2])
predate= date + datetime.timedelta(days = -1)
predate=predate.strftime("%Y-%m-%d")
date=predate
date = time.strptime(date,"%Y-%m-%d")
if(predate=='2017-02-23'):
break
get_day_count(predate)
if __name__ == "__main__":
if 1:
#Analysis('chengjiao')
#TrendXiaoQuMonth('chengjiao',GetXiaoquData('chengjiao'))
GetChengJiaoPreviousData()
pass
else:
pass
GetChengJiaoPreviousData()
ChenJiaoShangHaiPerMonth()
get_latest_15_count()
GetChenJiaoPerDay()
TrendZhenMonth('chengjiao')
TrendXiaoQuMonth('chengjiao',GetXiaoquData('chengjiao'))
TrendShanghaiMonth('chengjiao')
|
python
|
from flask import Blueprint, request
from libs.tools import json_response, JsonParser, Argument
from .models import NotifyWay
blueprint = Blueprint(__name__, __name__)
@blueprint.route('/', methods=['GET'])
def get():
form, error = JsonParser(Argument('page', type=int, default=1, required=False),
Argument('pagesize', type=int, default=10, required=False),
Argument('notify_query', type=dict, required=False), ).parse(request.args)
if error is None:
notify_data = NotifyWay.query
if form.page == -1:
return json_response({'data': [x.to_json() for x in notify_data.all()], 'total': -1})
if form.notify_query.get('name_field'):
notify_data = notify_data.filter(NotifyWay.name.like('%{}%'.format(form.notify_query['name_field'])))
result = notify_data.limit(form.pagesize).offset((form.page - 1) * form.pagesize).all()
return json_response({'data': [x.to_json() for x in result], 'total': notify_data.count()})
return json_response(message=error)
@blueprint.route('/', methods=['POST'])
def post():
form, error = JsonParser('name', 'value',
Argument('desc', nullable=True)).parse()
if error is None:
notify_is_exist = NotifyWay.query.filter_by(name=form.name).first()
if notify_is_exist:
return json_response(message="通知名称已存在")
NotifyWay(**form).save()
return json_response()
return json_response(message=error)
@blueprint.route('/<int:u_id>', methods=['DELETE'])
def delete(u_id):
NotifyWay.query.get_or_404(u_id).delete()
return json_response(), 204
@blueprint.route('/<int:n_id>', methods=['PUT'])
def put(n_id):
form, error = JsonParser('name', 'value',
Argument('desc', nullable=True)).parse()
if error is None:
notify_info = NotifyWay.query.get_or_404(n_id)
if not notify_info.update(**form):
notify_info.save()
return json_response(notify_info)
return json_response(message=error)
|
python
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
### <summary>
### We add an option contract using 'QCAlgorithm.AddOptionContract' and place a trade, the underlying
### gets deselected from the universe selection but should still be present since we manually added the option contract.
### Later we call 'QCAlgorithm.RemoveOptionContract' and expect both option and underlying to be removed.
### </summary>
class AddOptionContractFromUniverseRegressionAlgorithm(QCAlgorithm):
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.SetStartDate(2014, 6, 5)
self.SetEndDate(2014, 6, 9)
self._expiration = datetime(2014, 6, 21)
self._securityChanges = None
self._option = None
self._traded = False
self._twx = Symbol.Create("TWX", SecurityType.Equity, Market.USA)
self._aapl = Symbol.Create("AAPL", SecurityType.Equity, Market.USA)
self.UniverseSettings.Resolution = Resolution.Minute
self.UniverseSettings.DataNormalizationMode = DataNormalizationMode.Raw
self.AddUniverse(self.Selector, self.Selector)
def Selector(self, fundamental):
if self.Time <= datetime(2014, 6, 5):
return [ self._twx ]
return [ self._aapl ]
def OnData(self, data):
'''OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.
Arguments:
data: Slice object keyed by symbol containing the stock data
'''
if self._option != None and self.Securities[self._option].Price != 0 and not self._traded:
self._traded = True
self.Buy(self._option, 1)
if self.Time == datetime(2014, 6, 6, 14, 0, 0):
# liquidate & remove the option
self.RemoveOptionContract(self._option)
def OnSecuritiesChanged(self, changes):
# keep track of all removed and added securities
if self._securityChanges == None:
self._securityChanges = changes
else:
self._securityChanges.op_Addition(self._securityChanges, changes)
if any(security.Symbol.SecurityType == SecurityType.Option for security in changes.AddedSecurities):
return
for addedSecurity in changes.AddedSecurities:
options = self.OptionChainProvider.GetOptionContractList(addedSecurity.Symbol, self.Time)
options = sorted(options, key=lambda x: x.ID.Symbol)
option = next((option for option in options if option.ID.Date == self._expiration and option.ID.OptionRight == OptionRight.Call and option.ID.OptionStyle == OptionStyle.American), None)
self.AddOptionContract(option)
# just keep the first we got
if self._option == None:
self._option = option
|
python
|
import pytest
from django.contrib.auth.models import User
from shrubberies.factories import UserFactory
from shrubberies.models import Profile
from .rules import Is, current_user
@pytest.mark.django_db
def test_is_user_function():
u1 = UserFactory()
u2 = UserFactory()
is_own_profile = Is(lambda u: u.profile)
assert is_own_profile.check(u1, u1.profile)
assert is_own_profile.check(u2, u2.profile)
assert not is_own_profile.check(u1, u2.profile)
assert not is_own_profile.check(u2, u1.profile)
qs1 = is_own_profile.filter(u1, Profile.objects.all())
qs2 = is_own_profile.filter(u2, Profile.objects.all())
assert qs1.count() == 1
assert u1.profile in qs1
assert u2.profile not in qs1
assert qs2.count() == 1
assert u2.profile in qs2
assert u1.profile not in qs2
@pytest.mark.django_db
def test_is_never_global():
user = UserFactory()
is_own_profile = Is(lambda u: u.profile)
assert not is_own_profile.check(user)
@pytest.mark.django_db
def test_current_user():
u1 = UserFactory()
u2 = UserFactory()
assert current_user.check(u1, u1)
assert not current_user.check(u1, u2)
assert set(current_user.filter(u1, User.objects.all())) == {u1}
|
python
|
if __name__ == '__main__':
from scummer.validator import Validator
t = {
'a': 'x',
'b': {
'b1': 123
},
'c': [1,2],
'd': {
'x': 1,
'y': 'aaaa'
}
}
v = Validator(schema={
'a': ('enum',{
'items': ['x','y']
}),
'b': {
'b1': ['str','int'],
'b2': ('int',{'required':False})
},
'c': 'int[]',
'd': ('map',{
'definition': 'int'
})
})
v.validate(t)
|
python
|
import pandas as pd
import matplotlib.pyplot as plt
import pdb
def main():
results_df = pd.read_csv("results_nm.csv", delimiter=",")
fig_0, ax_0 = plt.subplots()
fig_1, ax_1 = plt.subplots()
for m in results_df.m.unique():
m_subset = results_df[results_df.m == m]
m_means = []
m_vars = []
m_evals = []
m_evals_var = []
for steps in m_subset.steps.unique():
steps_subset = m_subset[m_subset.steps == steps]
mean_overlap = steps_subset.squared_overlap.mean()
var_overlap = steps_subset.squared_overlap.var()
m_means.append(mean_overlap)
m_vars.append(var_overlap)
mean_evals = steps_subset.bfgs_evaluations.mean()
var_evals = steps_subset.bfgs_evaluations.var()
m_evals.append(mean_evals)
m_evals_var.append(var_evals)
ax_0.errorbar(m_subset.steps.unique(), m_means, yerr=m_vars, label=str(int(m)), marker="o", capsize=2)
ax_1.errorbar(m_subset.steps.unique(), m_evals, yerr=m_evals_var, label=str(int(m)), marker="o", capsize=2)
ax_0.set_xlabel("Number of circuit layers")
ax_0.set_ylabel("Squared overlap")
ax_0.set_ylim([0, 1.05])
ax_0.legend()
ax_1.set_xlabel("Number of circuit layers")
ax_1.set_ylabel("BFGS function evaluations")
ax_1.legend()
fig_0.savefig("squared_overlap")
fig_1.savefig("bfgs_evaluations")
if __name__ == '__main__':
main()
|
python
|
import json
import sys
import os
def get_offset(call):
s = call.split("+")
try:
return int(s[1], 16)
except:
pass
def get_hashsum(call):
s = call.split("{")
try:
ss = s[1].split("}")
return ss[0]
except:
s = call.split("!")
try:
return s[0]
except:
return
def generate_tag(call, proto):
offset = get_offset(call)
if offset is None:
return
hashsum = get_hashsum(call)
if hashsum is None:
return
return {'offset': offset, 'tag': proto, 'feeder': 'TagPcap', 'hash': hashsum}
def export_tags(tag_list, jsonf):
out_f = jsonf + "_TagPcap" + ".json"
with open(out_f, 'wb') as f:
json.dump(tag_list, f)
print("[TagPcap] Tags created : " + str(len(tag_list)))
print("[TagPcap] Tags exported : " + out_f)
def usage():
print("Usage : TagPcap.py <JSON PCAP file>")
def main(argv):
try:
jsonf = sys.argv[1]
except:
usage()
return
tag_list = []
with open(jsonf, "r") as f:
json_data = json.load(f)
for l0 in json_data:
try:
callstack = l0["_source"]["layers"]["pkt_comment"]["frame.comment"]
calls = callstack.splitlines()
proto = l0["_source"]["layers"]["frame"]["frame.protocols"]
except:
continue
for call in calls:
tag = generate_tag(call, proto)
if tag is not None:
tag_list.append(tag)
export_tags(tag_list, jsonf)
if __name__ == "__main__":
main(sys.argv[1:])
|
python
|
import os
import shutil
# optional: if you get a SSL CERTIFICATE_VERIFY_FAILED exception
import ssl
import sys
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from urllib.request import urlopen, urlretrieve
from zipfile import ZipFile, is_zipfile
import pandas as pd
from tqdm import tqdm
import socceraction.spadl as spadl
import socceraction.spadl.statsbomb as statsbomb
import socceraction.spadl.wyscout as wyscout
ssl._create_default_https_context = ssl._create_unverified_context
_data_dir = os.path.dirname(__file__)
def read_json_file(filename):
with open(filename, 'rb') as json_file:
return BytesIO(json_file.read()).getvalue().decode('unicode_escape')
def download_statsbomb_data():
dataset_url = 'https://github.com/statsbomb/open-data/archive/master.zip'
tmp_datafolder = os.path.join(_data_dir, 'statsbomb', 'tmp')
raw_datafolder = os.path.join(_data_dir, 'statsbomb', 'raw')
for datafolder in [tmp_datafolder, raw_datafolder]:
if not os.path.exists(datafolder):
os.makedirs(datafolder, exist_ok=True)
statsbombzip = os.path.join(tmp_datafolder, 'statsbomb-open-data.zip')
with urlopen(dataset_url) as dl_file:
with open(statsbombzip, 'wb') as out_file:
out_file.write(dl_file.read())
with ZipFile(statsbombzip, 'r') as zipObj:
zipObj.extractall(tmp_datafolder)
shutil.rmtree(raw_datafolder)
Path(f'{tmp_datafolder}/open-data-master/data').rename(raw_datafolder)
shutil.rmtree(tmp_datafolder)
def convert_statsbomb_data():
seasons = {
3: '2018',
}
leagues = {
'FIFA World Cup': 'WorldCup',
}
spadl_datafolder = os.path.join(_data_dir, 'statsbomb')
free_open_data_remote = 'https://raw.githubusercontent.com/statsbomb/open-data/master/data/'
SBL = statsbomb.StatsBombLoader(root=free_open_data_remote, getter='remote')
# View all available competitions
df_competitions = SBL.competitions()
df_selected_competitions = df_competitions[
df_competitions.competition_name.isin(leagues.keys())
]
for competition in df_selected_competitions.itertuples():
# Get games from all selected competition
games = SBL.games(competition.competition_id, competition.season_id)
games_verbose = tqdm(list(games.itertuples()), desc='Loading match data')
teams, players = [], []
competition_id = leagues[competition.competition_name]
season_id = seasons[competition.season_id]
spadl_h5 = os.path.join(spadl_datafolder, f'spadl-{competition_id}-{season_id}.h5')
with pd.HDFStore(spadl_h5) as spadlstore:
spadlstore.put('actiontypes', spadl.actiontypes_df(), format='table')
spadlstore.put('results', spadl.results_df(), format='table')
spadlstore.put('bodyparts', spadl.bodyparts_df(), format='table')
for game in games_verbose:
# load data
teams.append(SBL.teams(game.game_id))
players.append(SBL.players(game.game_id))
events = SBL.events(game.game_id)
# convert data
spadlstore.put(
f'actions/game_{game.game_id}',
statsbomb.convert_to_actions(events, game.home_team_id),
format='table',
)
games.season_id = season_id
games.competition_id = competition_id
spadlstore.put('games', games)
spadlstore.put(
'teams',
pd.concat(teams).drop_duplicates('team_id').reset_index(drop=True),
)
spadlstore.put(
'players',
pd.concat(players).drop_duplicates('player_id').reset_index(drop=True),
)
def download_wyscout_data():
# https://figshare.com/collections/Soccer_match_event_dataset/4415000/5
dataset_urls = dict(
competitions='https://ndownloader.figshare.com/files/15073685',
teams='https://ndownloader.figshare.com/files/15073697',
players='https://ndownloader.figshare.com/files/15073721',
games='https://ndownloader.figshare.com/files/14464622',
events='https://ndownloader.figshare.com/files/14464685',
)
raw_datafolder = os.path.join(_data_dir, 'wyscout_public', 'raw')
if not os.path.exists(raw_datafolder):
os.makedirs(raw_datafolder, exist_ok=True)
# download and unzip Wyscout open data
for url in tqdm(dataset_urls.values(), desc='Downloading data'):
url_obj = urlopen(url).geturl()
path = Path(urlparse(url_obj).path)
file_name = os.path.join(raw_datafolder, path.name)
file_local, _ = urlretrieve(url_obj, file_name)
if is_zipfile(file_local):
with ZipFile(file_local) as zip_file:
zip_file.extractall(raw_datafolder)
def convert_wyscout_data():
seasons = {
10078: '2018',
}
leagues = {
28: 'WorldCup',
}
raw_datafolder = os.path.join(_data_dir, 'wyscout_public', 'raw')
spadl_datafolder = os.path.join(_data_dir, 'wyscout_public')
# select competitions
json_competitions = read_json_file(f'{raw_datafolder}/competitions.json')
df_competitions = pd.read_json(json_competitions)
# Rename competitions to the names used in the file names
df_competitions['name'] = df_competitions.apply(
lambda x: x.area['name'] if x.area['name'] != '' else x['name'], axis=1
)
df_selected_competitions = df_competitions[df_competitions.wyId.isin(leagues.keys())]
json_teams = read_json_file(f'{raw_datafolder}/teams.json')
df_teams = wyscout.convert_teams(pd.read_json(json_teams))
json_players = read_json_file(f'{raw_datafolder}/players.json')
df_players = wyscout.convert_players(pd.read_json(json_players))
for competition in df_selected_competitions.itertuples():
json_games = read_json_file(
f"{raw_datafolder}/matches_{competition.name.replace(' ', '_')}.json"
)
df_games = pd.read_json(json_games)
competition_id = leagues[competition.wyId]
season_id = seasons[df_games.seasonId.unique()[0]]
df_games = wyscout.convert_games(df_games)
df_games['competition_id'] = competition_id
df_games['season_id'] = season_id
json_events = read_json_file(
f"{raw_datafolder}/events_{competition.name.replace(' ', '_')}.json"
)
df_events = pd.read_json(json_events).groupby('matchId', as_index=False)
spadl_h5 = os.path.join(spadl_datafolder, f'spadl-{competition_id}-{season_id}.h5')
# Store all spadl data in h5-file
print(f'Converting {competition_id} {season_id}')
with pd.HDFStore(spadl_h5) as spadlstore:
spadlstore['actiontypes'] = spadl.actiontypes_df()
spadlstore['results'] = spadl.results_df()
spadlstore['bodyparts'] = spadl.bodyparts_df()
spadlstore['games'] = df_games
for game in tqdm(list(df_games.itertuples())):
game_id = game.game_id
game_events = wyscout.convert_events(df_events.get_group(game_id))
# convert events to SPADL actions
home_team = game.home_team_id
df_actions = wyscout.convert_to_actions(game_events, home_team)
df_actions['action_id'] = range(len(df_actions))
spadlstore[f'actions/game_{game_id}'] = df_actions
spadlstore['players'] = df_players
spadlstore['teams'] = df_teams[
df_teams.team_id.isin(df_games.home_team_id)
| df_teams.team_id.isin(df_games.away_team_id)
]
if __name__ == '__main__':
if len(sys.argv) == 1 or sys.argv[1] == 'statsbomb':
download_statsbomb_data()
convert_statsbomb_data()
if len(sys.argv) == 1 or sys.argv[1] == 'wyscout':
download_wyscout_data()
convert_wyscout_data()
|
python
|
def test_latest():
print('\n >>> start Latest Features... \n')
import talos
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
x, y = talos.templates.datasets.iris()
p = {'activation': ['relu', 'elu'],
'optimizer': ['Nadam', 'Adam'],
'losses': ['logcosh'],
'shapes': ['brick'],
'first_neuron': [16, 32, 64, 128],
'hidden_layers': [0, 1, 2, 3],
'dropout': [.2, .3, .4],
'batch_size': [20, 30, 40, 50],
'epochs': [10]}
def iris_model(x_train, y_train, x_val, y_val, params):
model = Sequential()
model.add(Dense(params['first_neuron'],
input_dim=4,
activation=params['activation']))
talos.utils.hidden_layers(model, params, 3)
model.add(Dense(3, activation='softmax'))
model.compile(optimizer=params['optimizer'],
loss=params['losses'], metrics=['acc'])
out = model.fit(x_train,
y_train,
callbacks=[talos.utils.ExperimentLogCallback('test_latest', params)],
batch_size=params['batch_size'],
epochs=params['epochs'],
validation_data=(x_val, y_val),
verbose=0)
return out, model
scan_object = talos.Scan(x, y,
model=iris_model,
params=p,
experiment_name='test_latest',
round_limit=5,
reduction_method='gamify',
save_weights=False)
print('finised Latest Features \n')
|
python
|
# Time Complexity: O(n^2)
# Space Complexity: O(n)
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
res = []
nums.sort()
for cur in range(len(nums)):
if nums[cur] > 0: break
if cur>0 and nums[cur]==nums[cur-1]: continue # 重点理解
left = cur + 1
right = len(nums) - 1
while left < right:
if nums[cur] + nums[left] + nums[right] > 0:
# while left < right and nums[right-1] == nums[right]:
# right -= 1
right -= 1
elif nums[cur] + nums[left] + nums[right] < 0:
# while left < right and nums[left+1] == nums[left]:
# left += 1
left += 1
else:
res.append([nums[cur], nums[left], nums[right]])
print([nums[cur], nums[left], nums[right]])
while left < right and nums[left+1] == nums[left]:
left += 1
while left < right and nums[right-1] == nums[right]:
right -= 1
left += 1
right -= 1
return res
|
python
|
import pandas as pd
import os
# df = pd.read_csv('./train_annotation_list.csv')
# for i in range (len(df['Image_Path'])):
# dirname = os.path.dirname(df['Image_Path'][i])
# patient_name = os.path.basename(df['Image_Path'][i])
# patient_no = int(patient_name.split('.')[0].split('_')[1])
# folder = 'center_'+str(int(patient_no//20))
# corrected_path = os.path.join(dirname, folder, patient_name)
# # print (corrected_path)
# df['Image_Path'][i] = corrected_path
df = pd.read_csv('./annotated_train_data.csv')
for i in range (len(df['Image_Path'])):
image_path = df['Image_Path'][i]
label_path = df['Mask_Path'][i]
if label_path == 'empty':
print (label_path)
|
python
|
import pandas as pd
import matplotlib.pyplot as plt
import errno
import os
import numpy as np
from pathlib import Path
import data_helper as dh
# Counts the number of learning agents in one log path
def count_learning_agents_in_logs_path(logs_path):
count = 0
# loop through win rates directories in logs
for content_path in logs_path.rglob("*"):
# check if content is directory
if not content_path.is_dir():
continue
# Create file path to win_rates.csv
win_rates_path = content_path / 'win_rates.csv'
# Check if win rate csv exists
if not win_rates_path.exists():
continue
# Get directory name
directory_name = content_path.name
# Split directory name into agent names
agent_names = directory_name.split('_vs_')
# Iterate over both agents
for agent in agent_names:
# Plot only winrates for not-random agents
if 'Random' in agent:
continue
count = count + 1
return count
if __name__=='__main__':
# Create path to experiments directory
experiments_path = Path(Path().cwd().parents[0] / 'experiments')
# Loop through all training directories
for train_directory_path in experiments_path.rglob("*"):
# check if train_directory_path is directory
if not train_directory_path.is_dir():
continue
# Loop through experiment directories
for experiment_directory_path in train_directory_path.rglob("*"):
# check if experiment_directory_path is directory
if not experiment_directory_path.is_dir():
continue
# Create path to train/logs
logs_path = experiment_directory_path / 'train/logs'
# Check path to train/logs is available
if not logs_path.exists():
continue
# Create new plot for each experiment
plt.figure()
# Current agent number
i = 0
# Number of learning agents in this experiment
n = count_learning_agents_in_logs_path(logs_path)
# loop through win rates directories in logs
for content_path in sorted(logs_path.rglob("*")):
# check if content is directory
if not content_path.is_dir():
continue
# Create file path to win_rates.csv
win_rates_path = content_path / 'win_rates.csv'
# Check if win rate csv exists
if not win_rates_path.exists():
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), win_rates_path)
# Read dataframe from csv file
winrate_df = dh.get_winrate_df_from_csv(win_rates_path)
# Get directory name
directory_name = content_path.name
# Split directory name into agent names
agent_names = directory_name.split('_vs_')
# Iterate over both agents
for agent in agent_names:
# Plot only winrates for not-random agents
if 'Random' in agent:
continue
# Get side of agent
side = dh.WINRATE_A if agent.split('_')[-1] == 'SideA' else dh.WINRATE_B
# Create list with winrates to investigate
winrate_bars = [0.60, 0.70, 0.80, 0.85, 0.90, 0.95, 0.97, 0.99]
# Create empty list for episode marks
episodes_for_winrate = []
# Get episodes, when the winrates are reached
for winrate in winrate_bars:
found_episode = False
for index, row in winrate_df.iterrows():
if row[side] > winrate and not found_episode:
episodes_for_winrate.append(row[dh.EPISODE])
found_episode = True
break
if not found_episode:
episodes_for_winrate.append(0)
width = 0.175
# the label locations
label_locations = np.arange(len(winrate_bars))
# Plot winrate of agent
plt.bar(label_locations - ((n - 1)/2 * width) + i * width, episodes_for_winrate, width, label=agent)
i = i + 1
# Set tick labels
plt.xticks(label_locations, winrate_bars)
# Set the x and y axis label
plt.xlabel('win rate')
plt.ylabel(dh.EPISODE)
# Show legend
plt.legend()
# Create path to plots directory
plots_path = experiment_directory_path /'plots'
# Check plot directory is available
if not plots_path.exists():
os.mkdir(plots_path)
# Create path to convergence_speed directory
convergence_speed_path = Path(plots_path / 'convergence_speed')
# Check convergence_speed directory is available
if not convergence_speed_path.exists():
os.mkdir(convergence_speed_path)
# Get image name from source directroy
image_name = 'convergence_speed.png'
# Create image path
image_path = convergence_speed_path / image_name
# Save plot as image
plt.savefig(image_path)
|
python
|
# TODO: change name to queue
from db_works import db_connect, db_tables
import datetime
def get_settings(interval_param_):
db_schema_name, db_table_name, db_settings_table_name = db_tables()
cursor, cnxn = db_connect()
# interval parameter: current - API data; daily_hist - data from daily files; monthly_hist - data from monthly files
if interval_param_ == "current":
cursor.execute(
"SELECT download_settings_id, market, tick_interval, data_granulation, stock_type, stock_exchange, "
"current_range_to_overwrite, download_api_interval_sec, daily_update_from_files, monthly_update_from_files, start_hist_download_ux_timestamp "
"FROM " + db_schema_name + "." + db_settings_table_name + " WHERE current_update_from_api = 1 and "
"download_setting_status_id = 0 and "
"daily_hist_complete = 1 AND "
"monthly_hist_complete = 1 AND "
"coalesce(next_download_ux_timestamp, 0) <= "
+ str(int(datetime.datetime.utcnow().timestamp())) + " order by next_download_ux_timestamp asc limit 1")
elif interval_param_ == "daily_hist":
cursor.execute("SELECT download_settings_id, market, tick_interval, data_granulation, stock_type, stock_exchange, "
"current_range_to_overwrite, download_api_interval_sec, daily_update_from_files, monthly_update_from_files, start_hist_download_ux_timestamp "
"FROM " + db_schema_name + "." + db_settings_table_name + " WHERE daily_update_from_files = 1 and "
"download_setting_status_id = 0 and "
"daily_hist_complete = 0 AND "
"monthly_hist_complete = 1 AND "
"coalesce(start_hist_download_ux_timestamp, 0) <= "
+ str(int(datetime.datetime.utcnow().timestamp())) + " order by start_hist_download_ux_timestamp asc limit 1")
elif interval_param_ == "monthly_hist":
cursor.execute("SELECT download_settings_id, market, tick_interval, data_granulation, stock_type, stock_exchange, "
"current_range_to_overwrite, download_api_interval_sec, daily_update_from_files, monthly_update_from_files, start_hist_download_ux_timestamp "
"FROM " + db_schema_name + "." + db_settings_table_name + " WHERE monthly_update_from_files = 1 and "
"download_setting_status_id = 0 and "
"monthly_hist_complete = 0 AND "
"coalesce(start_hist_download_ux_timestamp, 0) <= "
+ str(int(datetime.datetime.utcnow().timestamp())) + " order by start_hist_download_ux_timestamp asc limit 1")
else:
exit()
download_setting = cursor.fetchall()
if len(download_setting) > 0:
download_settings_id = download_setting[0][0]
market = download_setting[0][1]
tick_interval = download_setting[0][2]
data_granulation = download_setting[0][3]
stock_type = download_setting[0][4]
stock_exchange = download_setting[0][5]
range_to_download = download_setting[0][6]
download_api_interval_sec = download_setting[0][7]
daily_update_from_files = download_setting[0][8]
monthly_update_from_files = download_setting[0][9]
start_hist_download_ux_timestamp = download_setting[0][10]
else:
print("no data to download")
exit()
# block current setting changing its status
cursor.execute("UPDATE " + db_schema_name + "." + db_settings_table_name + " SET download_setting_status_id = %s where download_settings_id = %s", (1, download_settings_id))
cnxn.commit()
print("settings blocked")
return download_settings_id, market, tick_interval, data_granulation, stock_type, stock_exchange, range_to_download, download_api_interval_sec, daily_update_from_files, monthly_update_from_files, start_hist_download_ux_timestamp
print()
|
python
|
from tests.utils import TEST_DATA_DIR
from dexy.doc import Doc
from tests.utils import wrap
import os
markdown_file = os.path.join(TEST_DATA_DIR, "markdown-test.md")
def run_kramdown(ext):
with open(markdown_file, 'r') as f:
example_markdown = f.read()
with wrap() as wrapper:
node = Doc("markdown.md|kramdown",
wrapper,
[],
kramdown = { 'ext' : ext },
contents = example_markdown
)
wrapper.run_docs(node)
assert node.output_data().is_cached()
return node.output_data()
def test_kramdown_html():
html = str(run_kramdown(".html"))
assert """<h2 id="download">""" in html
def test_kramdown_tex():
tex = str(run_kramdown(".tex"))
assert "\subsection" in tex
|
python
|
from flask import Blueprint
from flask import redirect
from flask import render_template
from flask import abort, jsonify, request
from flask_login import current_user
from flask_login import login_required
from app.models import User, load_user
from app.extensions import db
import os
import stripe
stripe.api_key = os.environ['STRIPE_SECRET_KEY']
stripe_bp = Blueprint('stripe', __name__)
stripe_prefix = '/stripe'
products = {
'private_model': {
'name': 'your own private fine-tuned model',
'price': 5000,
'per': 'month',
'adjustable_quantity': {
'enabled': True,
'minimum': 1,
'maximum': 3,
},
},
}
@stripe_bp.route('/')
def index():
if current_user.is_authenticated:
user = load_user(current_user.get_id())
allowed = user.models_allowed
return render_template('stripe.html', username=str(user), allowed=allowed, products=products,
stripe_prefix=stripe_prefix)
else:
return render_template('stripe.html', products=products,stripe_prefix=stripe_prefix)
@stripe_bp.route('/order/<product_id>', methods=['POST'])
@login_required
def order(product_id):
if product_id not in products:
abort(404)
checkout_session = stripe.checkout.Session.create(
line_items=[
{
'price_data': {
'product_data': {
'name': products[product_id]['name'],
},
'unit_amount': products[product_id]['price'],
'currency': 'usd',
},
'quantity': 1,
'adjustable_quantity': products[product_id].get(
'adjustable_quantity', {'enabled': False}),
},
],
payment_method_types=['card'],
mode='payment',
success_url=request.host_url + stripe_prefix.replace('/', '') + '/order/success',
cancel_url=request.host_url + stripe_prefix.replace('/', '') + '/order/cancel',
metadata={'userId': current_user.get_id()},
)
return redirect(checkout_session.url)
@stripe_bp.route('/order/success')
@login_required
def success():
return render_template('success.html')
@stripe_bp.route('/order/cancel')
@login_required
def cancel():
return render_template('cancel.html')
@stripe_bp.route('/webhook', methods=['POST'])
def webhook():
event = None
payload = request.data
sig_header = request.headers['STRIPE_SIGNATURE']
try:
event = stripe.Webhook.construct_event(
payload, sig_header, os.environ['STRIPE_WEBHOOK_SECRET']
)
except ValueError as e:
# Invalid payload
raise e
except stripe.error.SignatureVerificationError as e:
# Invalid signature
raise e
# Handle the event
if event['type'] == 'checkout.session.async_payment_failed':
session = event['data']['object']
elif event['type'] == 'checkout.session.async_payment_succeeded':
session = event['data']['object']
elif event['type'] == 'checkout.session.completed':
session = event['data']['object']
print('🔔 Payment succeeded!')
user_id = session['metadata']['userId']
user = load_user(user_id)
user.inc_models_allowed()
db.session.commit()
# session = stripe.checkout.Session.retrieve(
# event['data']['object'].id, expand=['line_items'])
# print(f'Sale to {session.customer_details.email}:')
# for item in session.line_items.data:
# print(f' - {item.quantity} {item.description} '
# f'${item.amount_total/100:.02f} {item.currency.upper()}')
# ... handle other event types
else:
print('Unhandled event type {}'.format(event['type']))
return jsonify(success=True)
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import get_object_or_404
from django.db import models, migrations
def encrypt_secrets(apps, schema_editor):
Secret = apps.get_model("server", "Secret")
for secret in Secret.objects.all():
secret.save()
class Migration(migrations.Migration):
dependencies = [
('server', '0006_auto_20150714_0821'),
]
operations = [
migrations.RunPython(encrypt_secrets),
]
|
python
|
import pytest
from tartiflette_middleware.examples.standalone import\
StandaloneMiddleware
from tartiflette_middleware.exceptions import\
RequestDataNotStoredException
class TestStandaloneMiddleware:
def test_standalone_example_init(self):
service = StandaloneMiddleware()
@pytest.mark.asyncio
async def test_standalone_example_call_data_not_set(self):
service = StandaloneMiddleware()
service.request = {'fake': 'data'}
with pytest.raises(RequestDataNotStoredException):
await service()
@pytest.mark.asyncio
async def test_standalone_example_call_data_set(self):
service = StandaloneMiddleware()
service.request = {'fake': 'data'}
async with service:
pass
assert await service() == 'foo'
|
python
|
import pyrebase
config={
"apiKey": "AIzaSyDYt-fmafI1kkMZSIphL829C6QgdlE1Tro",
"authDomain": "cp19-12.firebaseapp.com",
"databaseURL": "https://cp19-12.firebaseio.com",
" projectId": "cp19-12",
"storageBucket": "cp19-12.appspot.com",
"messagingSenderId": "681358965828",
"appId": "1:681358965828:web:3e31fb7429aed930"
}
firebase = pyrebase.initialize_app(config)
db = firebase.database()
storage=firebase.storage()
import flask
from flask import Flask , render_template
app = Flask(__name__)
@app.route("/")
def hello():
new_post=db.child("names").get()
user=new_post.val()
l=user.key()
return l
@app.route("/Taha")
def Taha():
return "Hello taha!"
@app.route("/Daniyal")
def Daniyal():
return "Hello Daniyal"
@app.route("/fahad")
def fahad():
return "Hello fahad"
app.run(debug=True)
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.