max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
pkgs/ops-pkg/src/genie/libs/ops/dot1x/ios/tests/test_dot1x.py | jbronikowski/genielibs | 94 | 9900 | # Python
import unittest
from copy import deepcopy
from unittest.mock import Mock
# ATS
from pyats.topology import Device
# Genie
from genie.libs.ops.dot1x.ios.dot1x import Dot1X
from genie.libs.ops.dot1x.ios.tests.dot1x_output import Dot1xOutput
# Parser
from genie.libs.parser.ios.show_dot1x import ShowDot1xAllDetail, \
ShowDot1xAllStatistics, \
ShowDot1xAllSummary, \
ShowDot1xAllCount
class test_dot1x(unittest.TestCase):
def setUp(self):
self.device = Device(name='aDevice')
self.device.os = 'ios'
self.device.custom['abstraction'] = {'order':['os']}
self.device.mapping={}
self.device.mapping['cli']='cli'
# Give the device as a connection type
# This is done in order to call the parser on the output provided
self.device.connectionmgr.connections['cli'] = self.device
def test_complete_output(self):
self.maxDiff = None
dot1x = Dot1X(device=self.device)
# Get outputs
dot1x.maker.outputs[ShowDot1xAllDetail] = \
{'': Dot1xOutput.ShowDot1xAllDetail}
dot1x.maker.outputs[ShowDot1xAllStatistics] = \
{'': Dot1xOutput.ShowDot1xAllStatistics}
dot1x.maker.outputs[ShowDot1xAllSummary] = \
{'': Dot1xOutput.ShowDot1xAllSummary}
dot1x.maker.outputs[ShowDot1xAllCount] = \
{'': Dot1xOutput.ShowDot1xAllCount}
# Learn the feature
dot1x.learn()
# Verify Ops was created successfully
self.assertEqual(dot1x.info, Dot1xOutput.Dot1x_info)
# Check Selected Attributes
self.assertEqual(dot1x.info['version'], 3)
# info - mdot1x default
self.assertEqual(dot1x.info['interfaces']['GigabitEthernet1/0/9']\
['max_start'], 3)
def test_empty_output(self):
self.maxDiff = None
dot1x = Dot1X(device=self.device)
dot1x.maker.outputs[ShowDot1xAllDetail] = \
{'': {}}
dot1x.maker.outputs[ShowDot1xAllStatistics] = \
{'': {}}
dot1x.maker.outputs[ShowDot1xAllSummary] = \
{'': {}}
dot1x.maker.outputs[ShowDot1xAllCount] = \
{'': {}}
# Learn the feature
dot1x.learn()
# Check no attribute not found
with self.assertRaises(AttributeError):
dot1x.info['version']
def test_incomplete_output(self):
self.maxDiff = None
dot1x = Dot1X(device=self.device)
# Get outputs
dot1x.maker.outputs[ShowDot1xAllDetail] = \
{'': Dot1xOutput.ShowDot1xAllDetail}
dot1x.maker.outputs[ShowDot1xAllStatistics] = \
{'': Dot1xOutput.ShowDot1xAllStatistics}
dot1x.maker.outputs[ShowDot1xAllSummary] = \
{'': Dot1xOutput.ShowDot1xAllSummary}
dot1x.maker.outputs[ShowDot1xAllCount] = \
{'': {}}
# Learn the feature
dot1x.learn()
# Delete missing specific attribute values
expect_dict = deepcopy(Dot1xOutput.Dot1x_info)
del(expect_dict['sessions'])
# Verify Ops was created successfully
self.assertEqual(dot1x.info, expect_dict)
if __name__ == '__main__':
unittest.main()
| 2.40625 | 2 |
script/analysis/check_transformation_matrices.py | lanl/nubhlight | 16 | 9901 | # ======================================================================
# copyright 2020. Triad National Security, LLC. All rights
# reserved. This program was produced under U.S. Government contract
# 89233218CNA000001 for Los Alamos National Laboratory (LANL), which
# is operated by Triad National Security, LLC for the U.S. Department
# of Energy/National Nuclear Security Administration. All rights in
# the program are reserved by Triad National Security, LLC, and the
# U.S. Department of Energy/National Nuclear Security
# Administration. The Government is granted for itself and others
# acting on its behalf a nonexclusive, paid-up, irrevocable worldwide
# license in this material to reproduce, prepare derivative works,
# distribute copies to the public, perform publicly and display
# publicly, and to permit others to do so.
# ======================================================================
# Authors: <NAME> (<EMAIL>)
# Purpose:
# Provides a check of whether a coordinate transformation of the metric
# from code coordinates to Kerr-Schild coordinates produces correct
# metric, consistent with the closed form (as in e.g. Eq.(3)
# McKinney & Gammie 2004, https://arxiv.org/abs/astro-ph/0404512)
#
# Functions:
# - print_matrix
# - check_transformation_matrices
#
from math import *
import numpy as np
def print_matrix(matrix,fmt="%19.11e",tostdout=True) -> str:
"""Pretty-prints a matrix to a string (optinally, to stdout)
Parameters
----------
matrix : numpy.array([N,M])
matrix to print
fmt : str
C-style format of each element (default: "%19.11e")
tostdout : bool
output to stdout (default: true)
Returns
-------
str
formatted output string
"""
N = matrix.shape[0]
M = matrix.shape[1]
s = "["
for i in range(N):
s+= "["
for j in range(M):
s+= (fmt % matrix[i,j])
if j < M - 1: s += ", "
s+= "]"
if i < N - 1: s += ",\n "
s+="]"
if tostdout: print(s)
return s
def check_transformation_matrices(geom, a, ir, jth,
verbose=True, tol=1e-12) -> bool:
"""Transforms the metric to spherical KS and compares with analytic formula
Test 1: covariant metric, gcov, at A = {ir, jth}
1.1 sample gcov and Lambda_h2bl_cov at A
1.2 transform gcov to gks using transofmration matrices
1.3 compare to expected values at {r,th} at A
Parameters
----------
geom : dictionary
nubhlight geom object
a : Float
dimensionless Kerr spin parameter
ir : Integer
index of sample point in radial direction
jth : Integer
index of sample point in angular theta-direction
verbose : bool
output steps to stdout
tol : Float
tolerance to relative error (wrt det g)
Returns
-------
bool
True if all checks passed
Examples
--------
import hdf5_to_dict as io
hdr = io.load_hdr("dump_00000010.h5")
geom = io.load_geom(hdr,recalc=True)
check_transformation_matrices(geom, -1, 64)
"""
# sample gcov and h2bl at point A
gcov_A = geom['gcov'][ir,jth]
h2bl_A = geom['Lambda_h2bl_cov'][ir,jth]
# sample r and theta, compute BL metric-related quantities
r = geom['r'][ir,jth,0]; r2 = r*r
a2 = a*a
th= geom['th'][ir,jth,0]
sth2= sin(th)**2
Delta= r2 - 2*r + a2
Sigma= r2 + a2*cos(th)**2
A = (r2 + a2)**2 - a2*Delta*sin(th)**2
if verbose:
print ("r = %19.11e" % r)
print ("theta = %19.11e" % th)
print ("a = %19.11e" % a)
print ("Delta = %19.11e" % Delta)
print ("Sigma = %19.11e" % Sigma)
print ("A = %19.11e" % A)
# output metric
print ("gcov_A = ")
print_matrix (gcov_A)
print ("")
# output transformation matrix
print ("h2bl_A = ")
print_matrix (h2bl_A)
print ("")
# compute BL metric at A
gks_A = np.zeros([4,4])
for i in range(4):
for j in range(4):
for k in range(4):
for l in range(4):
gks_A[i,j] = gks_A[i,j] + h2bl_A[k,i]*h2bl_A[l,j]*gcov_A[k,l]
if verbose:
print ("gks_A = ")
print_matrix (gks_A)
print("")
# expected values at {r, th}
g_tt = -1. + 2.*r/Sigma
g_rr = 1. + 2.*r/Sigma
g_ff = sth2*(Sigma + a2*g_rr*sth2)
g_thth = Sigma
g_tr = 2*r/Sigma
g_tf = -2*a*r*sth2/Sigma
g_rf = -a*g_rr*sth2
det_g = -Sigma**2*sth2
if verbose:
print ("Expected:")
print (" g_tt = %19.11e" % g_tt )
print (" g_rr = %19.11e" % g_rr )
print (" g_thth = %19.11e" % g_thth)
print (" g_ff = %19.11e" % g_ff )
print (" g_tr = %19.11e" % g_tr )
print (" g_rf = %19.11e" % g_rf )
print (" g_tf = %19.11e" % g_tf )
print ("")
# check gks_A
gks_expected = np.array(
[[ g_tt, g_tr, 0.0, g_tf],
[ g_tr, g_rr, 0.0, g_rf],
[ 0.0, 0.0, g_thth, 0.0],
[ g_tf, g_rf, 0.0, g_ff]]
)
passed = True
for i in range(4):
for j in range(4):
if abs(gks_A[i,j] - gks_expected[i,j])/abs(det_g) > tol:
passed = False
if verbose:
print (f"WARNING: Significant mismatch in gks_A[{i},{j}]:")
print (" -- expected: %19.11e" % gks_expected[i,j])
print (" -- actual: %19.11e" % gks_A[i,j])
return passed
| 2.21875 | 2 |
holobot/discord/sdk/models/channel.py | rexor12/holobot | 1 | 9902 | <gh_stars>1-10
from dataclasses import dataclass
@dataclass
class Channel:
id: str
| 1.53125 | 2 |
pynet/models/braingengan.py | claireguichon/pynet | 8 | 9903 | <reponame>claireguichon/pynet
# -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2020
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
3D MRI Brain Generation with Generative Adversarial Networks (BGGAN) with
Variational Auto Encoder (VAE).
"""
# Imports
import logging
import collections
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as func
from pynet.utils import Networks
# Global parameters
logger = logging.getLogger("pynet")
@Networks.register
class BGDiscriminator(nn.Module):
""" This is the discriminator part of the BGGAN.
"""
def __init__(self, in_shape, in_channels=1, out_channels=1,
start_filts=64, with_logit=True):
""" Init class.
Parameters
----------
in_shape: uplet
the input tensor data shape (X, Y, Z).
in_channels: int, default 1
number of channels in the input tensor.
out_channels: int, default 1
number of channels in the output tensor.
start_filts: int, default 64
number of convolutional filters for the first conv.
with_logit: bool, default True
apply the logit function to the result.
"""
super(BGDiscriminator, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.start_filts = start_filts
self.with_logit = with_logit
self.in_shape = in_shape
self.shapes = _downsample_shape(
self.in_shape, nb_iterations=4, scale_factor=2)
self.conv1 = nn.Conv3d(
self.in_channels, self.start_filts, kernel_size=4, stride=2,
padding=1)
self.conv2 = nn.Conv3d(
self.start_filts, self.start_filts * 2, kernel_size=4, stride=2,
padding=1)
self.bn2 = nn.BatchNorm3d(self.start_filts * 2)
self.conv3 = nn.Conv3d(
self.start_filts * 2, self.start_filts * 4, kernel_size=4,
stride=2, padding=1)
self.bn3 = nn.BatchNorm3d(self.start_filts * 4)
self.conv4 = nn.Conv3d(
self.start_filts * 4, self.start_filts * 8, kernel_size=4,
stride=2, padding=1)
self.bn4 = nn.BatchNorm3d(self.start_filts * 8)
self.conv5 = nn.Conv3d(
self.start_filts * 8, self.out_channels,
kernel_size=self.shapes[-1], stride=1, padding=0)
def forward(self, x):
logger.debug("BGGAN Discriminator...")
self.debug("input", x)
h1 = func.leaky_relu(self.conv1(x), negative_slope=0.2)
self.debug("conv1", h1)
h2 = func.leaky_relu(self.bn2(self.conv2(h1)), negative_slope=0.2)
self.debug("conv2", h2)
h3 = func.leaky_relu(self.bn3(self.conv3(h2)), negative_slope=0.2)
self.debug("conv3", h3)
h4 = func.leaky_relu(self.bn4(self.conv4(h3)), negative_slope=0.2)
self.debug("conv4", h4)
h5 = self.conv5(h4)
self.debug("conv5", h5)
if self.with_logit:
output = torch.sigmoid(h5.view(h5.size(0), -1))
self.debug("output", output)
else:
output = h5
logger.debug("Done.")
return output
def debug(self, name, tensor):
logger.debug(" {3}: {0} - {1} - {2}".format(
tensor.shape, tensor.get_device(), tensor.dtype, name))
@Networks.register
class BGEncoder(nn.Module):
""" This is the encoder part of the BGGAN.
"""
def __init__(self, in_shape, in_channels=1, start_filts=64,
latent_dim=1000):
""" Init class.
Parameters
----------
in_shape: uplet
the input tensor data shape (X, Y, Z).
in_channels: int, default 1
number of channels in the input tensor.
start_filts: int, default 64
number of convolutional filters for the first conv.
latent_dim: int, default 1000
the latent variable sizes.
"""
super(BGEncoder, self).__init__()
self.in_channels = in_channels
self.start_filts = start_filts
self.latent_dim = latent_dim
self.in_shape = in_shape
self.shapes = _downsample_shape(
self.in_shape, nb_iterations=4, scale_factor=2)
self.dense_features = np.prod(self.shapes[-1])
logger.debug("BGGAN Encoder shapes: {0}".format(self.shapes))
self.conv1 = nn.Conv3d(
self.in_channels, self.start_filts, kernel_size=4, stride=2,
padding=1)
self.conv2 = nn.Conv3d(
self.start_filts, self.start_filts * 2, kernel_size=4, stride=2,
padding=1)
self.bn2 = nn.BatchNorm3d(self.start_filts * 2)
self.conv3 = nn.Conv3d(
self.start_filts * 2, self.start_filts * 4, kernel_size=4,
stride=2, padding=1)
self.bn3 = nn.BatchNorm3d(self.start_filts * 4)
self.conv4 = nn.Conv3d(
self.start_filts * 4, self.start_filts * 8, kernel_size=4,
stride=2, padding=1)
self.bn4 = nn.BatchNorm3d(self.start_filts * 8)
self.mean = nn.Sequential(
nn.Linear(self.start_filts * 8 * self.dense_features, 2048),
nn.BatchNorm1d(2048),
nn.ReLU(),
nn.Linear(2048, self.latent_dim))
self.logvar = nn.Sequential(
nn.Linear(self.start_filts * 8 * self.dense_features, 2048),
nn.BatchNorm1d(2048),
nn.ReLU(),
nn.Linear(2048, self.latent_dim))
def forward(self, x):
logger.debug("BGGAN Encoder...")
batch_size = x.size(0)
logger.debug(" batch_size: {0}".format(batch_size))
self.debug("input", x)
h1 = func.leaky_relu(self.conv1(x), negative_slope=0.2)
self.debug("conv1", h1)
h2 = func.leaky_relu(self.bn2(self.conv2(h1)), negative_slope=0.2)
self.debug("conv2", h2)
h3 = func.leaky_relu(self.bn3(self.conv3(h2)), negative_slope=0.2)
self.debug("conv3", h3)
h4 = func.leaky_relu(self.bn4(self.conv4(h3)), negative_slope=0.2)
self.debug("conv4", h4)
mean = self.mean(h4.view(batch_size, -1))
self.debug("mean", mean)
logvar = self.logvar(h4.view(batch_size, -1))
self.debug("logvar", logvar)
std = logvar.mul(0.5).exp_()
reparametrized_noise = Variable(
torch.randn((batch_size, self.latent_dim))).to(x.device)
reparametrized_noise = mean + std * reparametrized_noise
self.debug("reparametrization", reparametrized_noise)
logger.debug("Done.")
return mean, logvar, reparametrized_noise
def debug(self, name, tensor):
logger.debug(" {3}: {0} - {1} - {2}".format(
tensor.shape, tensor.get_device(), tensor.dtype, name))
@Networks.register
class BGCodeDiscriminator(nn.Module):
""" This is the code discriminator part of the BGGAN.
"""
def __init__(self, out_channels=1, code_size=1000, n_units=4096):
""" Init class.
Parameters
----------
out_channels: int, default 1
number of channels in the output tensor.
code_size: int, default 1000
the code sier.
n_units: int, default 4096
the number of hidden units.
"""
super(BGCodeDiscriminator, self).__init__()
self.out_channels = out_channels
self.code_size = code_size
self.n_units = n_units
self.layer1 = nn.Sequential(
nn.Linear(self.code_size, self.n_units),
nn.BatchNorm1d(self.n_units),
nn.LeakyReLU(0.2, inplace=True))
self.layer2 = nn.Sequential(
nn.Linear(self.n_units, self.n_units),
nn.BatchNorm1d(self.n_units),
nn.LeakyReLU(0.2, inplace=True))
self.layer3 = nn.Linear(self.n_units, self.out_channels)
def forward(self, x):
logger.debug("BGGAN Code Discriminator...")
self.debug("input", x)
h1 = self.layer1(x)
self.debug("layer1", h1)
h2 = self.layer2(h1)
self.debug("layer2", h2)
output = self.layer3(h2)
self.debug("layer3", output)
logger.debug("Done.")
return output
def debug(self, name, tensor):
logger.debug(" {3}: {0} - {1} - {2}".format(
tensor.shape, tensor.get_device(), tensor.dtype, name))
@Networks.register
class BGGenerator(nn.Module):
""" This is the generator part of the BGGAN.
"""
def __init__(self, in_shape, out_channels=1, start_filts=64,
latent_dim=1000, mode="trilinear", with_code=False):
""" Init class.
Parameters
----------
in_shape: uplet
the input tensor data shape (X, Y, Z).
out_channels: int, default 1
number of channels in the output tensor.
start_filts: int, default 64
number of convolutional filters for the first conv.
latent_dim: int, default 1000
the latent variable sizes.
mode: str, default 'trilinear'
the interpolation mode.
with_code: bool, default False
change the architecture if code discriminator is used.
"""
super(BGGenerator, self).__init__()
self.out_channels = out_channels
self.start_filts = start_filts
self.latent_dim = latent_dim
self.in_shape = in_shape
self.mode = mode
self.with_code = with_code
self.shapes = _downsample_shape(
self.in_shape, nb_iterations=4, scale_factor=2)
self.dense_features = np.prod(self.shapes[-1])
logger.debug("BGGAN Generator shapes: {0}".format(self.shapes))
if self.with_code:
self.tp_conv1 = nn.ConvTranspose3d(
self.latent_dim, self.start_filts * 8, kernel_size=4,
stride=1, padding=0, bias=False)
else:
self.fc = nn.Linear(
self.latent_dim, self.start_filts * 8 * self.dense_features)
self.bn1 = nn.BatchNorm3d(self.start_filts * 8)
self.tp_conv2 = nn.Conv3d(
self.start_filts * 8, self.start_filts * 4, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm3d(self.start_filts * 4)
self.tp_conv3 = nn.Conv3d(
self.start_filts * 4, self.start_filts * 2, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn3 = nn.BatchNorm3d(self.start_filts * 2)
self.tp_conv4 = nn.Conv3d(
self.start_filts * 2, self.start_filts, kernel_size=3, stride=1,
padding=1, bias=False)
self.bn4 = nn.BatchNorm3d(self.start_filts)
self.tp_conv5 = nn.Conv3d(
self.start_filts, self.out_channels, kernel_size=3, stride=1,
padding=1, bias=False)
def forward(self, noise):
logger.debug("BGGAN Generator...")
self.debug("input", noise)
if self.with_code:
noise = noise.view(-1, self.latent_dim, 1, 1, 1)
self.debug("view", noise)
h = self.tp_conv1(noise)
self.debug("tp_conv1", h)
else:
noise = noise.view(-1, self.latent_dim)
self.debug("view", noise)
h = self.fc(noise)
self.debug("dense", h)
h = h.view(-1, self.start_filts * 8, *self.shapes[-1])
self.debug("view", h)
h = func.relu(self.bn1(h))
h = nn.functional.interpolate(
h, size=self.shapes[-2], mode=self.mode, align_corners=False)
h = self.tp_conv2(h)
h = func.relu(self.bn2(h))
self.debug("tp_conv2", h)
h = nn.functional.interpolate(
h, size=self.shapes[-3], mode=self.mode, align_corners=False)
h = self.tp_conv3(h)
h = func.relu(self.bn3(h))
self.debug("tp_conv3", h)
h = nn.functional.interpolate(
h, size=self.shapes[-4], mode=self.mode, align_corners=False)
h = self.tp_conv4(h)
h = func.relu(self.bn4(h))
self.debug("tp_conv4", h)
h = nn.functional.interpolate(
h, size=self.shapes[-5], mode=self.mode, align_corners=False)
h = self.tp_conv5(h)
self.debug("tp_conv5", h)
h = torch.tanh(h)
self.debug("output", h)
logger.debug("Done.")
return h
def debug(self, name, tensor):
logger.debug(" {3}: {0} - {1} - {2}".format(
tensor.shape, tensor.get_device(), tensor.dtype, name))
def _downsample_shape(shape, nb_iterations=1, scale_factor=2):
shape = np.asarray(shape)
all_shapes = [shape.astype(int).tolist()]
for idx in range(nb_iterations):
shape = np.floor(shape / scale_factor)
all_shapes.append(shape.astype(int).tolist())
return all_shapes
| 2.234375 | 2 |
research/object_detection/core/freezable_batch_norm_test.py | baranshad/models | 3 | 9904 | <filename>research/object_detection/core/freezable_batch_norm_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.freezable_batch_norm."""
import numpy as np
import tensorflow as tf
from object_detection.core import freezable_batch_norm
class FreezableBatchNormTest(tf.test.TestCase):
"""Tests for FreezableBatchNorm operations."""
def _build_model(self, training=None):
model = tf.keras.models.Sequential()
norm = freezable_batch_norm.FreezableBatchNorm(training=training,
input_shape=(10,),
momentum=0.8)
model.add(norm)
return model, norm
def _train_freezable_batch_norm(self, training_mean, training_var):
model, _ = self._build_model()
model.compile(loss='mse', optimizer='sgd')
# centered on training_mean, variance training_var
train_data = np.random.normal(
loc=training_mean,
scale=training_var,
size=(1000, 10))
model.fit(train_data, train_data, epochs=4, verbose=0)
return model.weights
def _test_batchnorm_layer(
self, norm, should_be_training, test_data,
testing_mean, testing_var, training_arg, training_mean, training_var):
out_tensor = norm(tf.convert_to_tensor(test_data, dtype=tf.float32),
training=training_arg)
out = tf.keras.backend.eval(out_tensor)
out -= tf.keras.backend.eval(norm.beta)
out /= tf.keras.backend.eval(norm.gamma)
if not should_be_training:
out *= training_var
out += (training_mean - testing_mean)
out /= testing_var
np.testing.assert_allclose(out.mean(), 0.0, atol=1.5e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=1.5e-1)
def test_batchnorm_freezing_training_none(self):
with self.test_session():
training_mean = 5.0
training_var = 10.0
testing_mean = -10.0
testing_var = 5.0
# Initially train the batch norm, and save the weights
trained_weights = self._train_freezable_batch_norm(training_mean,
training_var)
# Load the batch norm weights, freezing training to True.
# Apply the batch norm layer to testing data and ensure it is normalized
# according to the batch statistics.
model, norm = self._build_model(training=True)
for trained_weight, blank_weight in zip(trained_weights, model.weights):
weight_copy = blank_weight.assign(tf.keras.backend.eval(trained_weight))
tf.keras.backend.eval(weight_copy)
# centered on testing_mean, variance testing_var
test_data = np.random.normal(
loc=testing_mean,
scale=testing_var,
size=(1000, 10))
# Test with training=True passed to the call method:
training_arg = True
should_be_training = True
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
# Test with training=False passed to the call method:
training_arg = False
should_be_training = False
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
# Test the layer in various Keras learning phase scopes:
training_arg = None
should_be_training = False
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
tf.keras.backend.set_learning_phase(True)
should_be_training = True
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
tf.keras.backend.set_learning_phase(False)
should_be_training = False
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
def test_batchnorm_freezing_training_false(self):
with self.test_session():
training_mean = 5.0
training_var = 10.0
testing_mean = -10.0
testing_var = 5.0
# Initially train the batch norm, and save the weights
trained_weights = self._train_freezable_batch_norm(training_mean,
training_var)
# Load the batch norm back up, freezing training to False.
# Apply the batch norm layer to testing data and ensure it is normalized
# according to the training data's statistics.
model, norm = self._build_model(training=False)
for trained_weight, blank_weight in zip(trained_weights, model.weights):
weight_copy = blank_weight.assign(tf.keras.backend.eval(trained_weight))
tf.keras.backend.eval(weight_copy)
# centered on testing_mean, variance testing_var
test_data = np.random.normal(
loc=testing_mean,
scale=testing_var,
size=(1000, 10))
# Make sure that the layer is never training
# Test with training=True passed to the call method:
training_arg = True
should_be_training = False
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
# Test with training=False passed to the call method:
training_arg = False
should_be_training = False
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
# Test the layer in various Keras learning phase scopes:
training_arg = None
should_be_training = False
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
tf.keras.backend.set_learning_phase(True)
should_be_training = False
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
tf.keras.backend.set_learning_phase(False)
should_be_training = False
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
if __name__ == '__main__':
tf.test.main()
| 2.359375 | 2 |
offspect/gui/VWidgets/message.py | translationalneurosurgery/tool-offspect | 1 | 9905 | <reponame>translationalneurosurgery/tool-offspect
# from PyQt5.QtWidgets import QMessageBox
# def raise_error(message: str = "DEFAULT:Error Description:More Information"):
# box = QMessageBox()
# kind, msg, info = message.split(":")
# box.setIcon(QMessageBox.Critical)
# box.setWindowTitle(kind + " Error")
# box.setText(msg)
# box.setInformativeText(info)
# box.exec_()
| 1.960938 | 2 |
Widen/LC759_Employee_Free_Time.py | crazywiden/Leetcode_daily_submit | 0 | 9906 | <filename>Widen/LC759_Employee_Free_Time.py
"""
759. Employee Free Time
We are given a list schedule of employees, which represents the working time for each employee.
Each employee has a list of non-overlapping Intervals, and these intervals are in sorted order.
Return the list of finite intervals representing common, positive-length free time for all employees, also in sorted order.
(Even though we are representing Intervals in the form [x, y], the objects inside are Intervals, not lists or arrays. For example, schedule[0][0].start = 1, schedule[0][0].end = 2, and schedule[0][0][0] is not defined). Also, we wouldn't include intervals like [5, 5] in our answer, as they have zero length.
"""
# Line Swap method
# if we met a start, cnt += 1
# if we met an end, cnt -= 1
# time complexity -- O(NlogN), need sort all intervals
# Runtime: 96 ms, faster than 87.95% of Python3 online submissions for Employee Free Time.
# Memory Usage: 14.7 MB, less than 25.00% of Python3 online submissions for Employee Free Time.
"""
# Definition for an Interval.
class Interval:
def __init__(self, start: int = None, end: int = None):
self.start = start
self.end = end
"""
class Solution:
def employeeFreeTime(self, schedule: '[[Interval]]') -> '[Interval]':
START, END = 0, 1
all_interval = []
for person in schedule:
for interval in person:
all_interval.append((interval.start, START))
all_interval.append((interval.end, END))
all_interval = sorted(all_interval, key=lambda x: x[0])
prev = None
cnt = 0
res = []
for i in range(len(all_interval)):
if cnt == 0 and prev is not None:
if prev != all_interval[i][0]:
res.append(Interval(prev, all_interval[i][0]))
if all_interval[i][1] == START:
cnt += 1
else:
cnt -= 1
prev = all_interval[i][0]
return res
# priority queue
# if the current end is less than the smallest start
# then means there is a free time
# use priority queue to maintain the smallest start
# also only stort one of jobs of each person in the queue to save memory
# time complexity -- O(NlogC), C is the number of employee
"""
# Definition for an Interval.
class Interval:
def __init__(self, start: int = None, end: int = None):
self.start = start
self.end = end
"""
import heapq
class Solution:
def employeeFreeTime(self, schedule: '[[Interval]]') -> '[Interval]':
res = []
job_start_q = [(emp[0].start, emp_id, 0) for emp_id, emp in enumerate(schedule)]
heapq.heapify(job_start_q)
largest_end = min(interval.start for emp in schedule for interval in emp)
while job_start_q:
start, emp_id, job_id = heapq.heappop(job_start_q)
if largest_end < start:
res.append(Interval(largest_end, start))
largest_end = max(largest_end, schedule[emp_id][job_id].end)
if job_id + 1 < len(schedule[emp_id]):
heapq.heappush(job_start_q, (schedule[emp_id][job_id+1].start, emp_id, job_id+1))
return res
| 3.609375 | 4 |
storitch/config.py | thomaserlang/storitch | 0 | 9907 | <reponame>thomaserlang/storitch<filename>storitch/config.py
import os, yaml
config = {
'debug': False,
'port': 5000,
'store_path': '/var/storitch',
'pool_size': 5,
'logging': {
'level': 'warning',
'path': None,
'max_size': 100 * 1000 * 1000,# ~ 95 mb
'num_backups': 10,
},
'image_exts': [
'.jpg', '.jpeg', '.png', '.tiff', '.tif', '.gif',
'.bmp', '.bmp2', '.bmp3', '.dcm', '.dicom', '.webp',
],
}
def load(path=None):
default_paths = [
'~/storitch.yaml',
'./storitch.yaml',
'../storitch.yaml',
'/etc/storitch/storitch.yaml',
'/etc/storitch.yaml',
]
if not path:
path = os.environ.get('STORITCH_CONFIG', None)
if not path:
for p in default_paths:
p = os.path.expanduser(p)
if os.path.isfile(p):
path = p
break
if not path:
raise Exception('No config file specified.')
if not os.path.isfile(path):
raise Exception('Config: "{}" could not be found.'.format(path))
with open(path) as f:
data = yaml.load(f, Loader=yaml.FullLoader)
for key in data:
if key in config:
if isinstance(config[key], dict):
config[key].update(data[key])
else:
config[key] = data[key] | 2.1875 | 2 |
keras/lstm-securitai/model/pipeline_invoke_python.py | PipelineAI/models | 44 | 9908 | import io
import os
import numpy as np
import pandas
import json
import logging #<== Optional. Log to console, file, kafka
from pipeline_monitor import prometheus_monitor as monitor #<== Optional. Monitor runtime metrics
from pipeline_logger import log
import tensorflow as tf
from tensorflow.contrib import predictor
from keras.models import Sequential, load_model
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
from collections import OrderedDict
_logger = logging.getLogger('pipeline-logger')
_logger.setLevel(logging.INFO)
_logger_stream_handler = logging.StreamHandler()
_logger_stream_handler.setLevel(logging.INFO)
_logger.addHandler(_logger_stream_handler)
__all__ = ['invoke'] #<== Optional. Being a good Python citizen.
_labels = { #<== Optional. Used for metrics/labels
'name': 'injection',
'tag': 'v1',
'type': 'tensorflow',
'runtime': 'python',
'chip': 'cpu',
}
def _initialize_upon_import(): #<== Optional. Called once upon server startup
''' Initialize / Restore Model Object.
'''
model = load_model('securitai-lstm-model.h5')
model.load_weights('securitai-lstm-weights.h5')
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
return model
# This is called unconditionally at *module import time*...
_model = _initialize_upon_import()
#@log(labels=_labels, logger=_logger) #<== Optional. Sample and compare predictions
def invoke(request): #<== Required. Called on every prediction
'''Where the magic happens...'''
with monitor(labels=_labels, name="transform_request"): #<== Optional. Expose fine-grained metrics
transformed_request = _transform_request(request) #<== Optional. Transform input (json) into TensorFlow (tensor)
with monitor(labels=_labels, name="invoke"): #<== Optional. Calls _model.predict()
response = _model.predict(transformed_request)
with monitor(labels=_labels, name="transform_response"): #<== Optional. Transform TensorFlow (tensor) into output (json)
transformed_response = _transform_response(response)
return transformed_response #<== Required. Returns the predicted value(s)
def _transform_request(request):
request_str = request.decode('utf-8')
# tokenize the csv request and create json
X = pandas.read_csv(io.StringIO(request_str), engine='python', quotechar='|', header=None).values[:,0]
for index, item in enumerate(X):
reqJson = json.loads(item, object_pairs_hook=OrderedDict)
del reqJson['http']['timestamp']
del reqJson['http']['headers']
del reqJson['http']['source']
del reqJson['http']['route']
del reqJson['http']['responsePayload']
X[index] = json.dumps(reqJson, separators=(',', ':'))
tokenizer = Tokenizer(filters='\t\n', char_level=True)
tokenizer.fit_on_texts(X)
# this used to be [log_entry]
seq = tokenizer.texts_to_sequences([request_str])
max_log_length = 1024
log_entry_processed = sequence.pad_sequences(seq, maxlen=max_log_length)
return log_entry_processed
def _transform_response(response):
return response[0]
if __name__ == '__main__':
with open('./pipeline_test_request.csv', 'rb') as fb:
request_bytes = fb.read()
response_bytes = invoke(request_bytes)
print(response_bytes)
| 2.265625 | 2 |
src/act/common/aCTReport.py | ATLASControlTower/aCT | 0 | 9909 | <reponame>ATLASControlTower/aCT
import argparse
import importlib
import os
import re
import signal
import subprocess
import sys
import time
import logging
from act.common import aCTLogger
from act.common.aCTConfig import aCTConfigAPP
from act.arc import aCTDBArc
class aCTReport:
'''Print summary info on jobs in DB. Use --web to print html that is
automatically refreshed. Add filenames to query more than one aCT DB'''
def __init__(self, args):
self.output = ""
self.outfile = args.web
self.actconfs = args.conffiles or [''] # empty string for default behaviour
self.logger=aCTLogger.aCTLogger("aCTReport")
self.actlog=self.logger()
self.actlog.logger.setLevel(logging.INFO)
self.criticallogger = aCTLogger.aCTLogger('aCTCritical', arclog=False)
self.criticallog = self.criticallogger()
if self.outfile:
self.log('<META HTTP-EQUIV="refresh" CONTENT="60"><pre>')
self.log(time.asctime() + '\n')
self.db=aCTDBArc.aCTDBArc(self.actlog)
def log(self, message=''):
self.output += message + '\n'
def AppReport(self):
appconf = aCTConfigAPP()
apps = appconf.getList(["modules", "app"])
for app in apps:
try:
ap = importlib.import_module(f'{app}.aCTReport').report
self.log(ap(self.actconfs))
except ModuleNotFoundError as e:
self.actlog.info(f'No report in module {app}')
except AttributeError:
self.actlog.info(f'aCTReport.report() not found in {app}')
except Exception as e:
self.actlog.error(f'Exception running {app}.aCTReport.report: {e}')
def ProcessReport(self):
if self.actconfs != ['']:
return # don't print processes for combined report
actprocscmd = 'ps ax -ww -o pid,etime,args'
try:
out = subprocess.run(actprocscmd.split(), check=True, encoding='utf-8', stdout=subprocess.PIPE).stdout
except subprocess.CalledProcessError as e:
self.log('Error: could not run ps command: %s' % e.stderr)
return
# Group processes by cluster
cluster_procs = {}
longprocesses = []
for line in out.split('\n'):
reg = re.match(r'\s*(\d*)\s*(.*) .*python.* .*(aCT\w*)\.py\s?(\S*)', line)
if reg:
pid, runningtime, process, cluster = reg.groups()
# ignore Main and this process
if process in ['aCTReport', 'aCTMain', 'aCTHeartbeatWatchdog']:
continue
if cluster == '':
cluster = '(no cluster defined)'
elif not re.match(r'\d\d:\d\d$', runningtime):
# Check for overrunning processes
longprocesses.append((process, pid, cluster, runningtime))
if cluster in cluster_procs:
cluster_procs[cluster].append(process)
else:
cluster_procs[cluster] = [process]
for proc in longprocesses:
self.log('WARNING: %s (pid %s) for %s running for more than one hour (%s), this process will be killed' % proc)
# Kill process and log a critical message to send email
# Too many emails, disable
#self.criticallog.critical('Killing process %s (pid %s) for %s running for more than one hour (%s)' % proc)
try:
os.kill(int(proc[1]), signal.SIGKILL)
except OSError:
pass
self.log()
self.log('Active processes per cluster:')
for cluster in sorted(cluster_procs):
procs = cluster_procs[cluster]
procs.sort()
self.log(f'{cluster:>38.38}: {" ".join(procs)}')
self.log()
def ArcJobReport(self):
rep={}
rtot={}
states = ["Undefined", "Accepted", "Preparing", "Submitting",
"Queuing", "Running", "Finishing", "Finished", "Hold", "Killed",
"Failed", "Deleted", "Other"]
for conf in self.actconfs:
if conf:
os.environ['ACTCONFIGARC'] = conf
db=aCTDBArc.aCTDBArc(self.actlog)
c=db.db.conn.cursor()
c.execute("select jobid,state from arcjobs")
rows=c.fetchall()
for r in rows:
reg=re.search('.+//([^:]+)',str(r[0]))
cl=""
try:
cl=reg.group(1)
except:
cl='WaitingSubmission'
jid=str(r[1])
if jid == 'None':
jid="Other"
try:
rep[cl][jid]+=1
except:
try:
rep[cl][jid]=1
except:
rep[cl]={}
rep[cl][jid]=1
try:
rtot[jid]+=1
except:
rtot[jid]=1
if sum(rtot.values()) == 0:
return
self.log(f"All ARC jobs: {sum(rtot.values())}")
self.log(f"{'':39} {' '.join([f'{s:>9}' for s in states])}")
for k in sorted(rep, key=lambda x: x.split('.')[-1]):
log=f"{k:>38.38}:"
for s in states:
try:
log += f'{rep[k][s]:>10}'
except KeyError:
log += f'{"-":>10}'
self.log(log)
log = f"{'Totals':>38}:"
for s in states:
try:
log += f'{rtot[s]:>10}'
except:
log += f'{"-":>10}'
self.log(log+'\n\n')
def CondorJobReport(self):
rep = {}
rtot = {}
condorjobstatemap = ['Undefined', # used before real state is known
'Idle',
'Running',
'Removed',
'Completed',
'Held',
'Transferring',
'Suspended']
for conf in self.actconfs:
if conf:
os.environ['ACTCONFIGARC'] = conf
db=aCTDBArc.aCTDBArc(self.actlog)
c = db.db.conn.cursor()
c.execute("select cluster, JobStatus from condorjobs")
rows = c.fetchall()
for r in rows:
cl = str(r[0])
if not cl:
cl = 'WaitingSubmission'
jid = r[1]
try:
rep[cl][jid]+=1
except:
try:
rep[cl][jid]=1
except:
rep[cl]={}
rep[cl][jid]=1
try:
rtot[jid]+=1
except:
rtot[jid]=1
if sum(rtot.values()) == 0:
return
self.log(f"All Condor jobs: {sum(rtot.values())}")
self.log(f"{'':39} {' '.join([f'{s:>9}' for s in condorjobstatemap])}")
for k in sorted(rep, key=lambda x: x.split('.')[-1]):
log=f"{k:>38.38}:"
for s in range(8):
try:
log += f'{rep[k][s]:>10}'
except KeyError:
log += f'{"-":>10}'
self.log(log)
log = f"{'Totals':>38}:"
for s in range(8):
try:
log += f'{rtot[s]:>10}'
except:
log += f'{"-":>10}'
self.log(log+'\n\n')
def StuckReport(self):
# Query for lost jobs older than lostlimit
lostlimit = 86400
select = "(arcstate='submitted' or arcstate='running') and " \
+ self.db.timeStampLessThan("tarcstate", lostlimit) + \
" order by tarcstate"
columns = ['cluster']
jobs = self.db.getArcJobsInfo(select, columns)
if jobs:
self.log('Found %d jobs not updated in over %d seconds:\n' % (len(jobs), lostlimit))
clustercount = {}
for job in jobs:
try:
host = re.search('.+//([^:]+)', job['cluster']).group(1)
except:
host = None
if host in clustercount:
clustercount[host] += 1
else:
clustercount[host] = 1
for cluster, count in clustercount.items():
self.log(f'{count} {cluster}')
self.log()
def end(self):
if self.outfile:
self.log('</pre>')
def main():
parser = argparse.ArgumentParser(description='Report table of aCT jobs.')
parser.add_argument('conffiles', nargs='*', help='list of configuration files')
parser.add_argument('--web', help='Output suitable for web page')
parser.add_argument('--harvester', action='store_true', help='Dummy arg for backwards compatibility')
args = parser.parse_args(sys.argv[1:])
acts = aCTReport(args)
acts.AppReport()
acts.ArcJobReport()
acts.CondorJobReport()
acts.StuckReport()
acts.ProcessReport()
acts.end()
if acts.outfile is None:
sys.stdout.write(acts.output)
else:
f=open(acts.outfile,"w")
f.write(acts.output)
f.close()
if __name__ == '__main__':
main()
| 2.203125 | 2 |
unittests/test_apiv2_user.py | mtcolman/django-DefectDojo | 249 | 9910 | <reponame>mtcolman/django-DefectDojo
from rest_framework.test import APITestCase, APIClient
from django.urls import reverse
from rest_framework.authtoken.models import Token
class UserTest(APITestCase):
"""
Test the User APIv2 endpoint.
"""
fixtures = ['dojo_testdata.json']
def setUp(self):
token = Token.objects.get(user__username='admin')
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
def test_user_list(self):
r = self.client.get(reverse('user-list'))
self.assertEqual(r.status_code, 200, r.content[:1000])
user_list = r.json()['results']
self.assertTrue(len(user_list) >= 1, r.content[:1000])
for user in user_list:
for item in ['username', 'first_name', 'last_name', 'email']:
self.assertIn(item, user, r.content[:1000])
for item in ['password']:
self.assertNotIn(item, user, r.content[:1000])
def test_user_add(self):
# simple user without password
r = self.client.post(reverse('user-list'), {
"username": "api-user-1"
}, format='json')
self.assertEqual(r.status_code, 201, r.content[:1000])
# user with good password
password = '<PASSWORD>!@#$'
r = self.client.post(reverse('user-list'), {
"username": "api-user-2",
"password": password
}, format='json')
self.assertEqual(r.status_code, 201, r.content[:1000])
# test password by fetching API key
r = self.client.post(reverse('api-token-auth'), {
"username": "api-user-2",
"password": password
}, format='json')
self.assertEqual(r.status_code, 200, r.content[:1000])
# user with weak password
r = self.client.post(reverse('user-list'), {
"username": "api-user-3",
"password": "<PASSWORD>"
}, format='json')
self.assertEqual(r.status_code, 400, r.content[:1000])
self.assertIn('The password must contain at least 1 digit, 0-9.', r.content.decode("utf-8"))
def test_user_change_password(self):
# some user
r = self.client.post(reverse('user-list'), {
"username": "api-user-4"
}, format='json')
self.assertEqual(r.status_code, 201, r.content[:1000])
user_id = r.json()['id']
r = self.client.put("{}{}/".format(reverse('user-list'), user_id), {
"username": "api-user-4",
"first_name": "first"
}, format='json',)
self.assertEqual(r.status_code, 200, r.content[:1000])
r = self.client.patch("{}{}/".format(reverse('user-list'), user_id), {
"last_name": "last"
}, format='json')
self.assertEqual(r.status_code, 200, r.content[:1000])
r = self.client.put("{}{}/".format(reverse('user-list'), user_id), {
"username": "api-user-4",
"password": "<PASSWORD>!@#$"
}, format='json')
self.assertEqual(r.status_code, 400, r.content[:1000])
self.assertIn("Update of password though API is not allowed", r.content.decode("utf-8"))
r = self.client.patch("{}{}/".format(reverse('user-list'), user_id), {
"password": "<PASSWORD>!@#$"
}, format='json')
self.assertEqual(r.status_code, 400, r.content[:1000])
self.assertIn("Update of password though API is not allowed", r.content.decode("utf-8"))
| 2.71875 | 3 |
src/init.py | ankit-kushwaha-51/RESTful_API | 0 | 9911 | <reponame>ankit-kushwaha-51/RESTful_API
from flask import Flask
from src.models import db
from . import config
def create_app():
flask_app = Flask(__name__)
flask_app.config['SQLALCHEMY_DATABASE_URI'] = config.DATABASE_CONNECTION_URI
flask_app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
flask_app.app_context().push()
db.init_app(flask_app)
db.create_all()
return flask_app
| 2.28125 | 2 |
tests/helpers/examples/order/tasks.py | nicoddemus/dependencies | 0 | 9912 | <filename>tests/helpers/examples/order/tasks.py<gh_stars>0
from dependencies import Injector
from dependencies import this
from dependencies.contrib.celery import shared_task
from examples.order.commands import ProcessOrder
@shared_task
class ProcessOrderTask(Injector):
name = "process_order"
run = ProcessOrder
bind = True
retry = this.task.retry
| 1.648438 | 2 |
001 - 050/ex032.py | SocrammBR/Desafios-Python-CursoEmVideo | 0 | 9913 | ano = int(input('Digite o ano: '))
if (ano%4) == 0:
print ('Ele é bissexto')
else:
print ('Ele não é bissexto') | 3.734375 | 4 |
rn/__init__.py | vikneswaran20/rn | 0 | 9914 | <reponame>vikneswaran20/rn
__version__ = '0.0.1'
__license__ = 'BSD'
| 0.8125 | 1 |
flexmeasures/cli/data_edit.py | FlexMeasures/flexmeasures | 12 | 9915 | from datetime import timedelta
from typing import Union, List, Optional
import click
import pandas as pd
from flask import current_app as app
from flask.cli import with_appcontext
from flexmeasures import Sensor
from flexmeasures.data import db
from flexmeasures.data.schemas.generic_assets import GenericAssetIdField
from flexmeasures.data.schemas.sensors import SensorIdField
from flexmeasures.data.models.generic_assets import GenericAsset
from flexmeasures.data.models.time_series import TimedBelief
from flexmeasures.data.utils import save_to_db
@click.group("edit")
def fm_edit_data():
"""FlexMeasures: Edit data."""
@fm_edit_data.command("attribute")
@with_appcontext
@click.option(
"--asset-id",
"assets",
required=False,
multiple=True,
type=GenericAssetIdField(),
help="Add/edit attribute to this asset. Follow up with the asset's ID.",
)
@click.option(
"--sensor-id",
"sensors",
required=False,
multiple=True,
type=SensorIdField(),
help="Add/edit attribute to this sensor. Follow up with the sensor's ID.",
)
@click.option(
"--attribute",
"attribute_key",
required=True,
help="Add/edit this attribute. Follow up with the name of the attribute.",
)
@click.option(
"--float",
"attribute_float_value",
required=False,
type=float,
help="Set the attribute to this float value.",
)
@click.option(
"--bool",
"attribute_bool_value",
required=False,
type=bool,
help="Set the attribute to this bool value.",
)
@click.option(
"--str",
"attribute_str_value",
required=False,
type=str,
help="Set the attribute to this string value.",
)
@click.option(
"--int",
"attribute_int_value",
required=False,
type=int,
help="Set the attribute to this integer value.",
)
@click.option(
"--null",
"attribute_null_value",
required=False,
is_flag=True,
default=False,
help="Set the attribute to a null value.",
)
def edit_attribute(
attribute_key: str,
assets: List[GenericAsset],
sensors: List[Sensor],
attribute_null_value: bool,
attribute_float_value: Optional[float] = None,
attribute_bool_value: Optional[bool] = None,
attribute_str_value: Optional[str] = None,
attribute_int_value: Optional[int] = None,
):
"""Edit (or add) an asset attribute or sensor attribute."""
if not assets and not sensors:
raise ValueError("Missing flag: pass at least one --asset-id or --sensor-id.")
# Parse attribute value
attribute_value = parse_attribute_value(
attribute_float_value=attribute_float_value,
attribute_bool_value=attribute_bool_value,
attribute_str_value=attribute_str_value,
attribute_int_value=attribute_int_value,
attribute_null_value=attribute_null_value,
)
# Set attribute
for asset in assets:
asset.attributes[attribute_key] = attribute_value
db.session.add(asset)
for sensor in sensors:
sensor.attributes[attribute_key] = attribute_value
db.session.add(sensor)
db.session.commit()
print("Successfully edited/added attribute.")
@fm_edit_data.command("resample-data")
@with_appcontext
@click.option(
"--sensor-id",
"sensor_ids",
multiple=True,
required=True,
help="Resample data for this sensor. Follow up with the sensor's ID. This argument can be given multiple times.",
)
@click.option(
"--event-resolution",
"event_resolution_in_minutes",
type=int,
required=True,
help="New event resolution as an integer number of minutes.",
)
@click.option(
"--from",
"start_str",
required=False,
help="Resample only data from this datetime onwards. Follow up with a timezone-aware datetime in ISO 6801 format.",
)
@click.option(
"--until",
"end_str",
required=False,
help="Resample only data until this datetime. Follow up with a timezone-aware datetime in ISO 6801 format.",
)
@click.option(
"--skip-integrity-check",
is_flag=True,
help="Whether to skip checking the resampled time series data for each sensor."
" By default, an excerpt and the mean value of the original"
" and resampled data will be shown for manual approval.",
)
def resample_sensor_data(
sensor_ids: List[int],
event_resolution_in_minutes: int,
start_str: Optional[str] = None,
end_str: Optional[str] = None,
skip_integrity_check: bool = False,
):
"""Assign a new event resolution to an existing sensor and resample its data accordingly."""
event_resolution = timedelta(minutes=event_resolution_in_minutes)
event_starts_after = pd.Timestamp(start_str) # note that "" or None becomes NaT
event_ends_before = pd.Timestamp(end_str)
for sensor_id in sensor_ids:
sensor = Sensor.query.get(sensor_id)
if sensor.event_resolution == event_resolution:
print(f"{sensor} already has the desired event resolution.")
continue
df_original = sensor.search_beliefs(
most_recent_beliefs_only=False,
event_starts_after=event_starts_after,
event_ends_before=event_ends_before,
).sort_values("event_start")
df_resampled = df_original.resample_events(event_resolution).sort_values(
"event_start"
)
if not skip_integrity_check:
message = ""
if sensor.event_resolution < event_resolution:
message += f"Downsampling {sensor} to {event_resolution} will result in a loss of data. "
click.confirm(
message
+ f"Data before:\n{df_original}\nData after:\n{df_resampled}\nMean before: {df_original['event_value'].mean()}\nMean after: {df_resampled['event_value'].mean()}\nContinue?",
abort=True,
)
# Update sensor
sensor.event_resolution = event_resolution
db.session.add(sensor)
# Update sensor data
query = TimedBelief.query.filter(TimedBelief.sensor == sensor)
if not pd.isnull(event_starts_after):
query = query.filter(TimedBelief.event_start >= event_starts_after)
if not pd.isnull(event_ends_before):
query = query.filter(
TimedBelief.event_start + sensor.event_resolution <= event_ends_before
)
query.delete()
save_to_db(df_resampled, bulk_save_objects=True)
db.session.commit()
print("Successfully resampled sensor data.")
app.cli.add_command(fm_edit_data)
def parse_attribute_value(
attribute_null_value: bool,
attribute_float_value: Optional[float] = None,
attribute_bool_value: Optional[bool] = None,
attribute_str_value: Optional[str] = None,
attribute_int_value: Optional[int] = None,
) -> Union[float, int, bool, str, None]:
"""Parse attribute value."""
if not single_true(
[attribute_null_value]
+ [
v is not None
for v in [
attribute_float_value,
attribute_bool_value,
attribute_str_value,
attribute_int_value,
]
]
):
raise ValueError("Cannot set multiple values simultaneously.")
if attribute_null_value:
return None
elif attribute_float_value is not None:
return float(attribute_float_value)
elif attribute_bool_value is not None:
return bool(attribute_bool_value)
elif attribute_int_value is not None:
return int(attribute_int_value)
return attribute_str_value
def single_true(iterable) -> bool:
i = iter(iterable)
return any(i) and not any(i)
| 2.28125 | 2 |
semana2/mail_settings.py | ArseniumGX/bluemer-modulo2 | 0 | 9916 | mail_settings = {
"MAIL_SERVER": 'smtp.gmail.com',
"MAIL_PORT": 465,
"MAIL_USE_TLS": False,
"MAIL_USE_SSL": True,
"MAIL_USERNAME": '<EMAIL>',
"MAIL_PASSWORD": '<PASSWORD>'
} | 1.210938 | 1 |
frame/base/parser.py | dingjingmaster/blog_spider | 0 | 9917 | #!/usr/bin/env python3.6
# -*- encoding=utf8 -*-
import pyquery
"""
需求字段:
標題、發表日期、分類、標籤、內容、圖片
需要的字段信息
1. 网站根URL
2. 解析器名字
3. 解析器类型
1. PARSER_PASSAGE_URL 文章URL
2. PARSER_PASSAGE_TITLE 文章标题
3. PARSER_PASSAGE_DATE 发表日期
4. PARSER_PASSAGE_CATEGORY 文章分類
5. PARSER_PASSAGE_TAG 文章標籤
6. PARSER_PASSAGE_CONTENT 文章内容
7. PARSER_PASSAGE_IMGURL 文章中的图片 URL
"""
class Parser(object):
def __init__ (self):
self._webURL = ''
self._parserName = 'base_parser'
def _parser_passage_url (self, doc: str) -> (bool, str):
return
def _parser_passage_title (self, doc: str) -> (bool, str):
return
def _parser_passage_date (self, doc: str) -> (bool, str):
return
def _parser_passage_category (self, doc: str) -> (bool, str):
return
def _parser_passage_tag (self, doc: str) -> (bool, str):
return
def _parser_passage_content (self, doc: str) -> (bool, str):
return
def _parser_passage_img_url (self, doc: str) -> (bool, str, bytes):
return
def get_parser_name (self):
return self._parserName
@staticmethod
def _parser (doc: str, rule: str):
return pyquery.PyQuery(doc).find(rule)
def parse (self, doc: str, rule='', parse_type=-1):
if self.PARSER_PASSAGE_URL == parse_type:
if doc == '' or doc == None:
return (False, '')
return self._parser_passage_url(doc)
elif self.PARSER_PASSAGE_TITLE == parse_type:
if doc == '' or doc == None:
return (False, '')
return self._parser_passage_title(doc)
elif self.PARSER_PASSAGE_DATE == parse_type:
if doc == '' or doc == None:
return (False, '')
return self._parser_passage_date(doc)
elif self.PARSER_PASSAGE_CATEGORY == parse_type:
if doc == '' or doc == None:
return (False, '')
return self._parser_passage_category(doc)
elif self.PARSER_PASSAGE_TAG == parse_type:
if doc == '' or doc == None:
return (False, '')
return self._parser_passage_tag(doc)
elif self.PARSER_PASSAGE_CONTENT == parse_type:
if doc == '' or doc == None:
return (False, '')
return self._parser_passage_content(doc)
elif self.PARSER_PASSAGE_IMGURL == parse_type:
if doc == '' or doc == None:
return (False, '')
return self._parser_passage_img_url(doc)
else:
if doc == '' or doc == None:
return (False, '')
return Parser._parser(doc, rule)
PARSER_PASSAGE_URL = 1
PARSER_PASSAGE_TITLE = 2
PARSER_PASSAGE_DATE = 3
PARSER_PASSAGE_CATEGORY = 4
PARSER_PASSAGE_TAG = 5
PARSER_PASSAGE_CONTENT = 6
PARSER_PASSAGE_IMGURL = 7
| 2.796875 | 3 |
tools/mkblocks.py | Commodore-Bench/u5remastered | 14 | 9918 | <filename>tools/mkblocks.py
#!/usr/bin/env python3
# ----------------------------------------------------------------------------
# Copyright 2019 Drunella
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import os
import sys
import glob
import subprocess
import argparse
import hashlib
import traceback
import pprint
def readblockmap_info(filename):
directory = dict()
with open(filename) as f:
result = [line.split() for line in f]
for l in result:
directory[l[0]] = l[1:]
return directory
def readdisks_info(filename):
disks = []
with open(filename) as f:
result = [line.split() for line in f]
#pprint.pprint(result)
return result
def readdisks_getdiskinfo(disks, diskname):
for d in disks:
if d[0] == diskname:
return d
return []
def map_initialize():
global bank_data, map_data
map_data = bytearray([0xff] * 0x800)
def crtmap_appendentry(filename, block, name, address):
with open(filename, "at") as f:
content = "{0} f {1} addr 0x{2:04x}\n".format(block, name, address)
return f.write(content)
def load_file(filename):
with open(filename, "rb") as f:
return f.read()
def write_prg(dirname, lowhigh, data):
if lowhigh == 0:
# low
a = bytearray(2)
a[0] = 0
a[1] = 0x80
elif lowhigh == 1:
# high
a = bytearray(2)
a[0] = 0
a[1] = 0xA0
else:
raise Exception("lowhigh can only be 0 or 1")
with open(dirname, "wb") as f:
#f.write(a)
f.write(data)
def blockmap_appendentry(diskid, line, bank, highaddress):
global map_data
base = diskid * 256 + line * 2
map_data[base] = bank
map_data[base+1] = highaddress
#print("blockmap_appendentry: " + str(base) + ": " + str(bank) + " " + str(highaddress))
def calculate_address(lowhigh):
if lowhigh == 0:
# low
a = 0x80
elif lowhigh == 1:
# high
a = 0xA0
else:
raise Exception("lowhigh can only be 0 or 1")
return a
def main(argv):
global bank_data, map_data
p = argparse.ArgumentParser()
p.add_argument("-v", dest="verbose", action="store_true", help="Verbose output.")
p.add_argument("-o", dest="disks", action="store", required=True, help="disk configuration file.")
p.add_argument("-f", dest="files", action="store", required=True, help="files directory.")
p.add_argument("-m", dest="crtfile", action="store", required=True, help="crt.map file")
p.add_argument("-d", dest="destination", action="store", required=True, help="destination directory.")
p.add_argument("-b", dest="blockmap", action="store", required=True, help="blockmap file.")
#p.add_argument("-f", dest="fileoutput", action="store", required=True, help="output data content file.")
args = p.parse_args()
#temp_path = os.path.join(args.build, "temp")
#os.makedirs(temp_path, exist_ok=True)
files_path = args.files #os.path.join(args.build, "files")
os.makedirs(files_path, exist_ok=True)
destination_path = args.destination #os.path.join(args.build, "obj")
os.makedirs(destination_path, exist_ok=True)
disks = readdisks_info(args.disks)
blockmap = readblockmap_info(args.blockmap)
map_initialize()
if os.path.exists(args.crtfile):
os.remove(args.crtfile)
# add blocks file
for d in ("britannia", "towne", "dwelling", "castle", "keep", "dungeon", "underworld"):
diskinfo = readdisks_getdiskinfo(disks, d)
starttrack = int(diskinfo[2], 0)
height = int(diskinfo[4], 0) - int(diskinfo[2], 0) + 1
diskid = int(diskinfo[1], 0) - 0x41
startbank = int(blockmap[d][0], 0)
lowhigh = int(blockmap[d][1], 0)
block_data = load_file(os.path.join(files_path, d + ".data"))
# build map and blocks
map_data[diskid*256+255] = starttrack
for b in range(0, height, 2):
# double line or single line
#factor = 2
#if b+1 >= height:
# factor = 1
# make data
bank_data = bytearray([0xff] * 0x2000)
baseaddress = calculate_address(lowhigh)
if b+1 >= height:
# one line
s = b * 256*16
l = 0x1000
bank_data[0:l] = block_data[s:s+l]
blockmap_appendentry(diskid, b, startbank, baseaddress)
else:
# two lines
s = b * 256*16
l = 0x2000
bank_data[0:l] = block_data[s:s+l]
blockmap_appendentry(diskid, b, startbank, baseaddress)
blockmap_appendentry(diskid, b+1, startbank, baseaddress+0x10)
# write data and map
filename = "{0}_{1:02d}.aprg".format(d, b)
write_prg(os.path.join(destination_path, filename), lowhigh, bank_data)
crtmap_appendentry(args.crtfile, startbank, filename, baseaddress * 0x100)
# increase values
startbank += 1
# write block map
blockmap_bank = int(blockmap["blockmap"][0], 0)
blockmap_lowhigh = int(blockmap["blockmap"][1], 0)
blockmap_address = calculate_address(blockmap_lowhigh) * 256
#blockmap_appendentry(0, b, startbank, baseaddress)
blockmapname = os.path.join(destination_path, "blockmap.aprg")
write_prg(blockmapname, blockmap_lowhigh, map_data)
crtmap_appendentry(args.crtfile, blockmap_bank, "blockmap.aprg", blockmap_address)
return 0
if __name__ == '__main__':
try:
retval = main(sys.argv)
sys.exit(retval)
except Exception as e:
print(e)
traceback.print_exc()
sys.exit(1)
| 2.421875 | 2 |
src/apetest/decode.py | boxingbeetle/apetest | 6 | 9919 | <gh_stars>1-10
# SPDX-License-Identifier: BSD-3-Clause
"""
Text decode functions.
These functions can be used to get Unicode strings from a series of bytes.
"""
from codecs import (
BOM_UTF8,
BOM_UTF16_BE,
BOM_UTF16_LE,
BOM_UTF32_BE,
BOM_UTF32_LE,
CodecInfo,
lookup as lookup_codec,
)
from collections import OrderedDict
from typing import Dict, Iterable, Optional, Tuple
from apetest.typing import LoggerT
def encoding_from_bom(data: bytes) -> Optional[str]:
"""
Look for a byte-order-marker at the start of the given C{bytes}.
If found, return the encoding matching that BOM, otherwise return C{None}.
"""
if data.startswith(BOM_UTF8):
return "utf-8"
elif data.startswith(BOM_UTF16_LE) or data.startswith(BOM_UTF16_BE):
return "utf-16"
elif data.startswith(BOM_UTF32_LE) or data.startswith(BOM_UTF32_BE):
return "utf-32"
else:
return None
def standard_codec_name(name: str) -> str:
"""
Map a codec name to the preferred standardized version.
The preferred names were taken from this list published by IANA:
U{http://www.iana.org/assignments/character-sets/character-sets.xhtml}
@param name:
Text encoding name, in lower case.
"""
if name.startswith("iso8859"):
return "iso-8859" + name[7:]
return {
"ascii": "us-ascii",
"euc_jp": "euc-jp",
"euc_kr": "euc-kr",
"iso2022_jp": "iso-2022-jp",
"iso2022_jp_2": "iso-2022-jp-2",
"iso2022_kr": "iso-2022-kr",
}.get(name, name)
def try_decode(data: bytes, encodings: Iterable[str]) -> Tuple[str, str]:
"""
Attempt to decode text using the given encodings in order.
@param data:
Encoded version of the text.
@param encodings:
Names of the encodings to try. Must all be lower case.
@return: C{(text, encoding)}
The decoded string and the encoding used to decode it.
The returned encoding is name the preferred name, which could differ
from the name used in the C{encodings} argument.
@raise ValueError:
If the text could not be decoded.
"""
# Build sequence of codecs to try.
codecs: Dict[str, CodecInfo] = OrderedDict()
for encoding in encodings:
try:
codec = lookup_codec(encoding)
except LookupError:
pass
else:
codecs[standard_codec_name(codec.name)] = codec
# Apply decoders to the document.
for name, codec in codecs.items():
try:
text, consumed = codec.decode(data, "strict")
except UnicodeDecodeError:
continue
if consumed == len(data):
return text, name
raise ValueError("Unable to determine document encoding")
def decode_and_report(
data: bytes,
encoding_options: Iterable[Tuple[Optional[str], str]],
logger: LoggerT,
) -> Tuple[str, str]:
"""
Attempt to decode text using several encoding options in order.
@param data:
Encoded version of the text.
@param encoding_options: C{(encoding | None, source)*}
Each option is a pair of encoding name and a description of
where this encoding suggestion originated.
If the encoding name is C{None}, the option is skipped.
@param logger:
Non-fatal problems are logged here.
Such problems include an unknown or differing encodings
among the options.
@return: C{(text, encoding)}
The decoded string and the encoding used to decode it.
@raise ValueError:
If the text could not be decoded.
"""
# Filter and remember encoding options.
options = [
(encoding, source)
for encoding, source in encoding_options
if encoding is not None
]
encodings = [encoding for encoding, source in options]
# Always try to decode as UTF-8, since that is the most common encoding
# these days, plus it's a superset of ASCII so it also works for old or
# simple documents.
encodings.append("utf-8")
text, used_encoding = try_decode(data, encodings)
# Report differences between suggested encodings and the one we
# settled on.
for encoding, source in options:
try:
codec = lookup_codec(encoding)
except LookupError:
logger.warning(
'%s specifies encoding "%s", which is unknown to Python',
source,
encoding,
)
continue
std_name = standard_codec_name(codec.name)
if std_name != used_encoding:
logger.warning(
'%s specifies encoding "%s", ' 'while actual encoding seems to be "%s"',
source,
encoding,
used_encoding,
)
elif std_name != encoding:
logger.info(
'%s specifies encoding "%s", ' 'which is not the standard name "%s"',
source,
encoding,
used_encoding,
)
return text, used_encoding
| 3.171875 | 3 |
utils.py | jiangycTarheel/Compositional-Auxseq | 8 | 9920 | import os
import json
import gzip
from copy import deepcopy, copy
import numpy as np
import csv
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, RandomSampler
from transformers.tokenization_utils import trim_batch
class LabelSmoothingLoss(nn.Module):
def __init__(self, label_smooth, tgt_vocab_size, ignore_index=-100):
assert 0. < label_smooth <= 1.
self.ignore_index = ignore_index
super(LabelSmoothingLoss, self).__init__()
smoothing_value = label_smooth / (tgt_vocab_size - 2)
one_hot = torch.full((tgt_vocab_size,), smoothing_value)
one_hot[self.ignore_index] = 0
self.register_buffer('one_hot', one_hot.unsqueeze(0).unsqueeze(0))
self.confidence = 1.0 - label_smooth
self.lossfct = torch.nn.KLDivLoss(reduction='none')
def forward(self, pred, target):
"""
Args:
pred: [bsz, seq_len, vocab_size]
target: [bsz, seq_len]
Returns:
"""
model_prob = self.one_hot.repeat(target.size(0), target.size(1), 1) # [bsz, seq_len, vocab_size]
model_prob.scatter_(2, target.unsqueeze(2), self.confidence)
model_prob.masked_fill_((target == self.ignore_index).unsqueeze(2), 0)
pred_prob = F.log_softmax(pred, dim=2)
#return F.kl_div(pred_prob, model_prob, reduction='mean')
loss = self.lossfct(pred_prob, model_prob)
loss = torch.sum(loss, dim=2).masked_fill_((target == self.ignore_index), 0)
avg_loss = torch.sum(loss) / torch.sum((target != self.ignore_index).to(torch.float))
return avg_loss
# Special symbols
SOS_token = "<SOS>" # start of sentence
EOS_token = "<EOS>" # end of sentence
PAD_token = SOS_token # padding symbol
INPUT_TOKENS_SCAN = ['jump', 'opposite', 'right', 'twice', 'and', 'turn', 'thrice', 'run', 'after', 'around', 'left', 'walk', 'look']
OUTPUT_TOKENS_SCAN = ['I_TURN_RIGHT', 'I_JUMP', 'I_TURN_LEFT', 'I_RUN', 'I_WALK', 'I_LOOK']
# ACTION_TO_TEXT = {'I_TURN_RIGHT': 'right', 'I_JUMP': 'jump', 'I_TURN_LEFT': 'left', 'I_RUN': 'run', 'I_WALK': 'walk', 'I_LOOK': 'look'}
class Lang:
# Class for converting strings/words to numerical indices, and vice versa.
# Should use separate class for input language (English) and output language (actions)
#
def __init__(self, symbols, io_type):
# symbols : list of all possible symbols
n = len(symbols)
self.symbols = [_s.strip('\n') for _s in symbols]
self.io_type = io_type
if SOS_token not in self.symbols:
assert EOS_token not in self.symbols
self.index2symbol = {n: SOS_token, n+1: EOS_token}
self.symbol2index = {SOS_token: n, EOS_token: n + 1}
self.sos_id, self.eos_id = n, n + 1
else:
self.index2symbol = {}
self.symbol2index = {}
self.sos_id, self.eos_id = 0, 1
self.pad_token_id = self.sos_id
for idx,s in enumerate(self.symbols):
self.index2symbol[idx] = s
self.symbol2index[s] = idx
self.n_symbols = len(self.index2symbol)
def variableFromSymbols(self, mylist, add_eos=True):
# Convert a list of symbols to a tensor of indices (adding a EOS token at end)
#
# Input
# mylist : list of m symbols
# add_eos : true/false, if true add the EOS symbol at end
#
# Output
# output : [m or m+1 LongTensor] indices of each symbol (plus EOS if appropriate)
mylist = copy(mylist)
if add_eos:
mylist.append(EOS_token)
indices = [self.symbol2index[s] for s in mylist]
output = torch.LongTensor(indices)
#if USE_CUDA:
output = output.cuda()
return output
def symbolsFromVector(self, v):
# Convert indices to symbols, breaking where we get a EOS token
#
# Input
# v : list of m indices
#
# Output
# mylist : list of m or m-1 symbols (excluding EOS)
mylist = []
for x in v:
s = self.index2symbol[x]
if s == EOS_token:
break
mylist.append(s)
return mylist
def encode_scan_file(self, data, max_length):
encoded_data = []
for dp in data:
input, output = dp[0], dp[1]
if self.io_type == 'input':
raw = input
else:
assert self.io_type == 'output'
raw = output
encoded = self.variableFromSymbols(raw.split(' '))
encoded_data.append(encoded)
return encoded_data
def encode_scan_file_2_seg(self, data, max_length, cutoffs):
encoded_data_1, encoded_data_2 = [], []
for _id, dp in enumerate(data):
input, output, cutoff = dp[0], dp[1], cutoffs[_id]
assert self.io_type == 'output'
raw = output
encoded_1 = self.variableFromSymbols(raw.split(' ')[:cutoff])
encoded_2 = self.variableFromSymbols(raw.split(' ')[cutoff:])
encoded_data_1.append(encoded_1)
encoded_data_2.append(encoded_2)
return encoded_data_1, encoded_data_2
def encode_cfq_file(self, data, max_length):
encoded_data = []
for dp in data:
input, output = dp['query_ids'], dp['sparql_ids']
if self.io_type == 'input':
raw = input
else:
assert self.io_type == 'output'
raw = output + [self.eos_id]
encoded = torch.LongTensor(raw).cuda()
encoded_data.append(encoded)
return encoded_data
def encode_cogs_file(self, data, max_length):
encoded_data = []
for dp in data:
input, output = dp['src'], dp['trg']
if self.io_type == 'input':
raw = input
else:
assert self.io_type == 'output'
raw = output
encoded = self.variableFromSymbols(raw.split(' '))
encoded_data.append(encoded)
return encoded_data
def decode(self, ids):
out = self.symbolsFromVector(ids.cpu().numpy())
if out == []:
return out
if out[0] in ['<SOS>', '<SOS_2>']:
out = out[1:]
return out
def calculate_accuracy(preds, gts):
assert len(preds) == len(gts)
match = 0
for pred, gt in zip(preds, gts):
if pred == gt:
match += 1
return match / len(preds)
def encode_file(tokenizer, data_path, max_length, pad_to_max_length=True, return_tensors="pt", max_examples=None):
examples = []
if data_path[-3:] == '.gz':
print('Data file is gzipped')
f = gzip.open(data_path, "rt")
else:
print('Data file is plain text')
print(data_path)
f = open(data_path, "r", encoding='utf-8')
for i, text in enumerate(f.readlines()):
tokenized = tokenizer.batch_encode_plus( [text + ' </s>'], max_length=max_length,
pad_to_max_length=pad_to_max_length, return_tensors=return_tensors )
if max_examples and i >= max_examples:
break
examples.append(tokenized)
f.close()
return examples
# def encode_file_iterator(tokenizer, data_path, max_length, pad_to_max_length=True, return_tensors="pt", max_examples=None):
# '''
# This provides a low-memory usage way of iterating thru all of the source/target lines for processing by JIT loader.
# '''
# if data_path[-3:] == '.gz':
# print('Data file is gzipped')
# f = gzip.open(data_path, "rt")
# else:
# print('Data file is plain text')
# f = open(data_path, "r", encoding='utf-8')
#
# for i, text in enumerate(f):
#
# tokenized = tokenizer.batch_encode_plus( [text + ' </s>'], max_length=max_length,
# pad_to_max_length=pad_to_max_length, return_tensors=return_tensors )
#
# yield tokenized
#
# if max_examples and i >= max_examples:
# break
#
# f.close()
# def convert_scan_actions_to_text(actions):
# return ' '.join([ACTION_TO_TEXT[_action] for _action in actions.split(' ')])
# def encode_scan_file(tokenizer, data, io_type, max_length, pad_to_max_length=True, return_tensors="pt", max_examples=None):
# examples = []
# # a = tokenizer.batch_encode_plus( ['right jump left run walk look' + ' <s> </s>'], max_length=max_length,
# # pad_to_max_length=pad_to_max_length, return_tensors=return_tensors )
# # print(a)
# # exit()
# for dp in data:
# input, output = dp[0], dp[1]
# if io_type == 'input':
# raw = input
# else:
# assert io_type == 'output'
# raw = convert_scan_actions_to_text(output)
#
# tokenized = tokenizer.batch_encode_plus( [raw + ' </s>'], max_length=max_length,
# pad_to_max_length=pad_to_max_length, return_tensors=return_tensors )
#
# if max_examples and i >= max_examples:
# break
# examples.append(tokenized)
#
# return examples
def load_scan_file(mytype, split):
# Load SCAN dataset from file
#
# Input
# mytype : type of SCAN experiment
# split : 'train' or 'test'
#
# Output
# commands : list of input/output strings (as tuples)
assert mytype in ['simple', 'addprim_jump', 'length', 'addprim_turn_left', 'all', 'template_around_right', 'viz',
'examine', 'template_jump_around_right', 'template_right', 'template_around_right',
'mcd1', 'mcd2', 'mcd3', 'mcd1.1', 'mcd1.2', 'debug', 'attn_vis']
assert split in ['train', 'test', 'val']
if split == 'val' and mytype not in ['mcd1', 'mcd2', 'mcd3', 'mcd1.1', 'mcd1.2']:
split = 'test'
fn = 'data/scan/tasks_' + split + '_' + mytype + '.txt'
fid = open(fn, 'r')
lines = fid.readlines()
fid.close()
lines = [l.strip() for l in lines]
lines = [l.lstrip('IN: ') for l in lines]
commands = [l.split(' OUT: ') for l in lines]
return commands
class CompositionDataset(Dataset):
def __init__(
self,
src_lang,
trg_lang,
data_dir,
type_path,
sub_task,
max_source_length=20,
max_target_length=20,
tokenized=False,
):
super().__init__()
self.max_source_length = max_source_length
self.max_target_length = max_target_length
self.tokenized = tokenized
self.src_lang = src_lang
self.trg_lang = trg_lang
def __len__(self):
if self.tokenized:
return len(self.dataset)
else:
return len(self.source)
def __getitem__(self, index):
if self.tokenized:
dp = self.dataset[index]
source_ids, src_mask, target_ids = dp[0], dp[1], dp[2]
source_ids = source_ids[:self.max_source_length]
#src_mask = src_mask[:self.max_source_length]
target_ids = target_ids[:self.max_target_length]
else:
source_ids = self.source[index]
target_ids = self.target[index]
return {"source_ids": source_ids, "target_ids": target_ids}
@staticmethod
def trim_seq2seq_batch(batch, src_pad_token_id, trg_pad_token_id, trim_y=True):
if trim_y:
y = trim_batch(batch["target_ids"], trg_pad_token_id)
else:
y = batch["target_ids"]
source_ids, source_mask = trim_batch(batch["source_ids"], src_pad_token_id, attention_mask=batch["source_mask"])
return source_ids, source_mask, y
def pad_to_max_len(self, ids, max_len, pad_token_id):
ids_length = ids.size(0)
if ids_length == max_len:
return ids
pad_tokens = torch.tensor([pad_token_id] * (max_len - ids_length))
# if ids.type() == 'torch.cuda.FloatTensor':
# print(ids)
# exit()
padded_ids = torch.cat([ids, pad_tokens.cuda()])
return padded_ids
def create_mask(self, ids, max_len):
ids_length = ids.size(0)
mask = torch.tensor([1] * ids_length + [0] * (max_len - ids_length)).cuda()
return mask
def collate_fn(self, batch):
max_src_len = max(map(len, [x["source_ids"] for x in batch]))
max_trg_len = max(map(len, [x["target_ids"] for x in batch]))
src_mask = torch.stack([self.create_mask(x["source_ids"], max_src_len) for x in batch])
src_ids = torch.stack([self.pad_to_max_len(x["source_ids"], max_src_len, self.src_lang.pad_token_id) for x in batch])
#masks = torch.stack([x["source_mask"] for x in batch])
trg_ids = torch.stack([self.pad_to_max_len(x["target_ids"], max_trg_len, self.trg_lang.pad_token_id) for x in batch])
y = trim_batch(trg_ids, self.trg_lang.pad_token_id)
src_ids, src_mask = trim_batch(src_ids, self.src_lang.pad_token_id, attention_mask=src_mask)
return {"source_ids": src_ids, "source_mask": src_mask, "target_ids": y}
class ScanDataset(CompositionDataset):
def __init__(
self,
src_lang,
trg_lang,
data_dir="./data/scan/",
type_path="train",
sub_task="addprim_jump",
max_source_length=20,
max_target_length=20,
tokenized=False,
):
super().__init__(src_lang, trg_lang, data_dir, type_path, sub_task, max_source_length,
max_target_length, tokenized)
scan_data = load_scan_file(sub_task, type_path)
print(len(scan_data))
all_scan_dict = self.convert_to_dict(load_scan_file('all', 'train'))
self.action_count_labels, self.action_group_labels, self.action_type_labels = self.construct_count_label(scan_data, all_scan_dict)
if not tokenized:
self.source = self.src_lang.encode_scan_file(scan_data, max_source_length)
self.target = self.trg_lang.encode_scan_file(scan_data, max_target_length)
else:
self.dataset = torch.load(os.path.join(data_dir, type_path))
def construct_count_label(self, raw_data, all_data_dict):
all_count_labels = []
count_label_scheme = "v1"
group_label_scheme = "v2"
type_label_scheme = "v2"
all_action_group_labels, all_action_type_labels = [], []
# Group 1: single prim (jump), Group 2: prim + direction (jump left), Group 3: prim opposite, Group 4: prim around
#no_skip_id = np.random.randint(0, len(raw_data), int(len(raw_data)*0.05))
#no_skip_id = np.random.choice(range(len(raw_data)), int(len(raw_data)*0.07), replace=False)
# no_skip_id = np.random.choice(range(len(raw_data)), 10, replace=False)
skip_cnt, sup_cnt = 0, 0
for _id, dp in enumerate(raw_data):
input_text, output_text = dp[0], dp[1]
input_tok, output_tok = input_text.split(' '), output_text.split(' ')
count_labels, group_labels, type_labels = [], [], []
first_part_output_text, second_part_output_text = '', ''
if 'and' in input_tok:
first_part_input_tok = input_tok[:input_tok.index('and')]
second_part_input_tok = input_tok[input_tok.index('and')+1:]
first_part_output_text = all_data_dict[' '.join(first_part_input_tok)]
second_part_output_text = all_data_dict[' '.join(second_part_input_tok)]
elif 'after' in input_tok:
second_part_input_tok = input_tok[:input_tok.index('after')]
first_part_input_tok = input_tok[input_tok.index('after') + 1:]
first_part_output_text = all_data_dict[' '.join(first_part_input_tok)]
second_part_output_text = all_data_dict[' '.join(second_part_input_tok)]
else:
first_part_input_tok, second_part_input_tok = input_tok, []
first_part_output_text = output_text
first_part_output_tok, second_part_output_tok = first_part_output_text.split(' '), second_part_output_text.split(' ')
if second_part_output_text == '':
second_part_output_tok = []
assert len(first_part_output_tok) + len(second_part_output_tok) == len(output_tok), \
(len(first_part_output_tok), len(second_part_output_tok), len(output_tok), first_part_output_text, second_part_output_text, output_text)
### 1. Build the action count labels ###
if count_label_scheme == 'v1':
### For the first part output
if 'twice' in first_part_input_tok:
if 'after' in input_tok:
count_labels += ([4] * int(len(first_part_output_tok) / 2) + [3] * int(len(first_part_output_tok) / 2))
else:
count_labels += ([1] * int(len(first_part_output_tok) / 2) + [0] * int(len(first_part_output_tok) / 2))
# count_labels += ([1] + [0] * (int(len(first_part_output_tok) / 2) - 1)) * 2
elif 'thrice' in first_part_input_tok:
if 'after' in input_tok:
count_labels += ([5] * int(len(first_part_output_tok) / 3) + [4] * int(len(first_part_output_tok) / 3) + \
[3] * int(len(first_part_output_tok) / 3))
else:
count_labels += ([2] * int(len(first_part_output_tok) / 3) + [1] * int(len(first_part_output_tok) / 3) + \
[0] * int(len(first_part_output_tok) / 3))
# count_labels += ([1] + [0] * (int(len(first_part_output_tok) / 3) - 1)) * 3
else:
if 'after' in input_tok:
count_labels += ([3] * len(first_part_output_tok))
else:
count_labels += ([0] * len(first_part_output_tok))
# count_labels += ([1] + [0] * (int(len(first_part_output_tok)) - 1))
### For the second part output
if len(second_part_output_tok) > 0:
if 'twice' in second_part_input_tok:
if 'after' in input_tok:
count_labels += ([1] * int(len(second_part_output_tok) / 2) + [0] * int(len(second_part_output_tok) / 2))
else:
count_labels += ([4] * int(len(second_part_output_tok) / 2) + [3] * int(len(second_part_output_tok) / 2))
# count_labels += ([1] + [0] * (int(len(second_part_output_tok) / 2) - 1)) * 2
elif 'thrice' in second_part_input_tok:
if 'after' in input_tok:
count_labels += ([2] * int(len(second_part_output_tok) / 3) + [1] * int(len(second_part_output_tok) / 3) + \
[0] * int(len(second_part_output_tok) / 3))
else:
count_labels += ([5] * int(len(second_part_output_tok) / 3) + [4] * int(len(second_part_output_tok) / 3) + \
[3] * int(len(second_part_output_tok) / 3))
# count_labels += ([1] + [0] * (int(len(second_part_output_tok) / 3) - 1)) * 3
else:
if 'after' in input_tok:
count_labels += ([0] * len(second_part_output_tok))
else:
count_labels += ([3] * len(second_part_output_tok))
# count_labels += ([1] + [0] * (int(len(second_part_output_tok)) - 1))
elif count_label_scheme == 'v2':
### For the first part output
if 'twice' in first_part_input_tok:
count_labels += ([1] * int(len(first_part_output_tok) / 2) + [0] * int(
len(first_part_output_tok) / 2))
elif 'thrice' in first_part_input_tok:
count_labels += ([2] * int(len(first_part_output_tok) / 3) + [1] * int(
len(first_part_output_tok) / 3) + \
[0] * int(len(first_part_output_tok) / 3))
else:
count_labels += ([0] * len(first_part_output_tok))
### For the second part output
if len(second_part_output_tok) > 0:
if 'twice' in second_part_input_tok:
count_labels += ([1] * int(len(second_part_output_tok) / 2) + [0] * int(
len(second_part_output_tok) / 2))
elif 'thrice' in second_part_input_tok:
count_labels += ([2] * int(len(second_part_output_tok) / 3) + [1] * int(
len(second_part_output_tok) / 3) + [0] * int(len(second_part_output_tok) / 3))
else:
count_labels += ([0] * len(second_part_output_tok))
elif count_label_scheme == 'v3':
### For the first part output
if 'thrice' in first_part_input_tok and 'thrice' in second_part_input_tok:
start_count = 5
elif ('thrice' in first_part_input_tok and 'twice' in second_part_input_tok) or \
('twice' in first_part_input_tok and 'thrice' in second_part_input_tok):
start_count = 4
elif ('twice' in first_part_input_tok and 'twice' in second_part_input_tok) or \
('thrice' in first_part_input_tok) or ('thrice' in second_part_input_tok):
start_count = 3
elif 'twice' in first_part_input_tok or 'twice' in second_part_input_tok:
start_count = 2
else:
start_count = 1
if 'twice' in first_part_input_tok:
if 'after' in input_tok:
count_labels += ([start_count] * int(len(first_part_output_tok) / 2) + [start_count-1] * int(len(first_part_output_tok) / 2))
else:
count_labels += ([1] * int(len(first_part_output_tok) / 2) + [0] * int(len(first_part_output_tok) / 2))
# count_labels += ([1] + [0] * (int(len(first_part_output_tok) / 2) - 1)) * 2
elif 'thrice' in first_part_input_tok:
if 'after' in input_tok:
count_labels += ([start_count] * int(len(first_part_output_tok) / 3) + [start_count-1] * int(len(first_part_output_tok) / 3) + \
[start_count-2] * int(len(first_part_output_tok) / 3))
else:
count_labels += ([2] * int(len(first_part_output_tok) / 3) + [1] * int(len(first_part_output_tok) / 3) + \
[0] * int(len(first_part_output_tok) / 3))
# count_labels += ([1] + [0] * (int(len(first_part_output_tok) / 3) - 1)) * 3
else:
if 'after' in input_tok:
count_labels += ([start_count] * len(first_part_output_tok))
else:
count_labels += ([0] * len(first_part_output_tok))
# count_labels += ([1] + [0] * (int(len(first_part_output_tok)) - 1))
### For the second part output
if len(second_part_output_tok) > 0:
if 'twice' in second_part_input_tok:
if 'after' in input_tok:
count_labels += ([1] * int(len(second_part_output_tok) / 2) + [0] * int(len(second_part_output_tok) / 2))
else:
count_labels += ([start_count] * int(len(second_part_output_tok) / 2) + [start_count-1] * int(len(second_part_output_tok) / 2))
# count_labels += ([1] + [0] * (int(len(second_part_output_tok) / 2) - 1)) * 2
elif 'thrice' in second_part_input_tok:
if 'after' in input_tok:
count_labels += ([2] * int(len(second_part_output_tok) / 3) + [1] * int(len(second_part_output_tok) / 3) + \
[0] * int(len(second_part_output_tok) / 3))
else:
count_labels += ([start_count] * int(len(second_part_output_tok) / 3) + [start_count-1] * int(len(second_part_output_tok) / 3) + \
[start_count-2] * int(len(second_part_output_tok) / 3))
# count_labels += ([1] + [0] * (int(len(second_part_output_tok) / 3) - 1)) * 3
else:
if 'after' in input_tok:
count_labels += ([0] * len(second_part_output_tok))
else:
count_labels += ([start_count] * len(second_part_output_tok))
# count_labels += ([1] + [0] * (int(len(second_part_output_tok)) - 1))
elif count_label_scheme == 'v3.1':
### For the first part output
if 'thrice' in first_part_input_tok and 'thrice' in second_part_input_tok:
start_count = 5
elif ('thrice' in first_part_input_tok and 'twice' in second_part_input_tok) or \
('twice' in first_part_input_tok and 'thrice' in second_part_input_tok):
start_count = 4
elif ('twice' in first_part_input_tok and 'twice' in second_part_input_tok) or \
('thrice' in first_part_input_tok) or ('thrice' in second_part_input_tok):
start_count = 3
elif 'twice' in first_part_input_tok or 'twice' in second_part_input_tok:
start_count = 2
else:
start_count = 1
if 'twice' in first_part_input_tok:
count_labels += ([start_count] * int(len(first_part_output_tok) / 2) + [start_count - 1] * int(
len(first_part_output_tok) / 2))
# count_labels += ([1] + [0] * (int(len(first_part_output_tok) / 2) - 1)) * 2
elif 'thrice' in first_part_input_tok:
count_labels += ([start_count] * int(len(first_part_output_tok) / 3) + [start_count - 1] * int(
len(first_part_output_tok) / 3) + \
[start_count - 2] * int(len(first_part_output_tok) / 3))
else:
count_labels += ([start_count] * len(first_part_output_tok))
### For the second part output
if len(second_part_output_tok) > 0:
if 'twice' in second_part_input_tok:
count_labels += ([1] * int(len(second_part_output_tok) / 2) + [0] * int(
len(second_part_output_tok) / 2))
# count_labels += ([1] + [0] * (int(len(second_part_output_tok) / 2) - 1)) * 2
elif 'thrice' in second_part_input_tok:
count_labels += ([2] * int(len(second_part_output_tok) / 3) + [1] * int(
len(second_part_output_tok) / 3) + \
[0] * int(len(second_part_output_tok) / 3))
else:
count_labels += ([0] * len(second_part_output_tok))
else:
### For the first part output
if 'twice' in first_part_input_tok:
if 'after' in input_tok:
new_count_labels = list(range(int(len(first_part_output_tok) / 2)))[::-1] * 2
else:
new_count_labels = list(range(int(len(first_part_output_tok) / 2)))[::-1] * 2
elif 'thrice' in first_part_input_tok:
if 'after' in input_tok:
new_count_labels = list(range(int(len(first_part_output_tok) / 3)))[::-1] * 3
else:
new_count_labels = list(range(int(len(first_part_output_tok) / 3)))[::-1] * 3
else:
if 'after' in input_tok:
new_count_labels = list(range(len(first_part_output_tok)))[::-1]
else:
new_count_labels = list(range(len(first_part_output_tok)))[::-1]
count_labels += new_count_labels
### For the second part output
if len(second_part_output_tok) > 0:
if 'twice' in second_part_input_tok:
if 'after' in input_tok:
new_count_labels = list(range(int(len(second_part_output_tok) / 2)))[::-1] * 2
new_count_labels = [_c + 8 for _c in new_count_labels]
else:
new_count_labels = list(range(int(len(second_part_output_tok) / 2)))[::-1] * 2
new_count_labels = [_c + 8 for _c in new_count_labels]
elif 'thrice' in second_part_input_tok:
if 'after' in input_tok:
new_count_labels = list(range(int(len(second_part_output_tok) / 3)))[::-1] * 3
new_count_labels = [_c + 8 for _c in new_count_labels]
else:
new_count_labels = list(range(int(len(second_part_output_tok) / 3)))[::-1] * 3
new_count_labels = [_c + 8 for _c in new_count_labels]
else:
if 'after' in input_tok:
new_count_labels = list(range(len(second_part_output_tok)))[::-1]
new_count_labels = [_c + 8 for _c in new_count_labels]
else:
new_count_labels = list(range(len(second_part_output_tok)))[::-1]
new_count_labels = [_c + 8 for _c in new_count_labels]
count_labels += new_count_labels
# count_labels = []
# count_labels += list(range(len(first_part_output_tok)))[::-1]
# count_labels += list(range(len(second_part_output_tok)))[::-1]
assert len(count_labels) == len(output_tok), (len(count_labels), len(output_tok), input_text, first_part_input_tok, count_labels, output_tok,
first_part_output_text, first_part_output_tok, second_part_output_text, second_part_output_tok)
count_labels.append(-1) # For the EOS token
# count_labels.append(7) # For the EOS token
### 2. Build the action group labels ###
if group_label_scheme == 'v1': ## As used in exp 9.0-9.4
if 'around' in first_part_input_tok:
if 'after' in input_tok:
group_labels += ([4] * len(first_part_output_tok))
else:
group_labels += ([0] * len(first_part_output_tok))
elif 'opposite' in first_part_input_tok:
if 'after' in input_tok:
group_labels += ([5] * len(first_part_output_tok))
else:
group_labels += ([1] * len(first_part_output_tok))
elif 'left' in first_part_input_tok or 'right' in first_part_input_tok:
if 'after' in input_tok:
group_labels += ([6] * len(first_part_output_tok))
else:
group_labels += ([2] * len(first_part_output_tok))
else:
if 'after' in input_tok:
group_labels += ([7] * len(first_part_output_tok))
else:
group_labels += ([3] * len(first_part_output_tok))
if 'around' in second_part_input_tok:
if 'after' in input_tok:
group_labels += ([0] * len(second_part_output_tok))
else:
group_labels += ([4] * len(second_part_output_tok))
elif 'opposite' in second_part_input_tok:
if 'after' in input_tok:
group_labels += ([1] * len(second_part_output_tok))
else:
group_labels += ([5] * len(second_part_output_tok))
elif 'left' in second_part_input_tok or 'right' in second_part_input_tok:
if 'after' in input_tok:
group_labels += ([2] * len(second_part_output_tok))
else:
group_labels += ([6] * len(second_part_output_tok))
else:
if 'after' in input_tok:
group_labels += ([3] * len(second_part_output_tok))
else:
group_labels += ([7] * len(second_part_output_tok))
else:
### For the first part output
if 'twice' in first_part_input_tok:
if 'after' in input_tok:
new_group_labels = list(range(int(len(first_part_output_tok) / 2)))[::-1] * 2
new_group_labels = [_c + 8 for _c in new_group_labels]
else:
new_group_labels = list(range(int(len(first_part_output_tok) / 2)))[::-1] * 2
elif 'thrice' in first_part_input_tok:
if 'after' in input_tok:
new_group_labels = list(range(int(len(first_part_output_tok) / 3)))[::-1] * 3
new_group_labels = [_c + 8 for _c in new_group_labels]
else:
new_group_labels = list(range(int(len(first_part_output_tok) / 3)))[::-1] * 3
else:
if 'after' in input_tok:
new_group_labels = list(range(len(first_part_output_tok)))[::-1]
new_group_labels = [_c + 8 for _c in new_group_labels]
else:
new_group_labels = list(range(len(first_part_output_tok)))[::-1]
group_labels += new_group_labels
### For the second part output
if len(second_part_output_tok) > 0:
if 'twice' in second_part_input_tok:
if 'after' in input_tok:
new_group_labels = list(range(int(len(second_part_output_tok) / 2)))[::-1] * 2
else:
new_group_labels = list(range(int(len(second_part_output_tok) / 2)))[::-1] * 2
new_group_labels = [_c + 8 for _c in new_group_labels]
elif 'thrice' in second_part_input_tok:
if 'after' in input_tok:
new_group_labels = list(range(int(len(second_part_output_tok) / 3)))[::-1] * 3
else:
new_group_labels = list(range(int(len(second_part_output_tok) / 3)))[::-1] * 3
new_group_labels = [_c + 8 for _c in new_group_labels]
else:
if 'after' in input_tok:
new_group_labels = list(range(len(second_part_output_tok)))[::-1]
else:
new_group_labels = list(range(len(second_part_output_tok)))[::-1]
new_group_labels = [_c + 8 for _c in new_group_labels]
group_labels += new_group_labels
assert len(group_labels) == len(output_tok)
group_labels.append(-1) # For the EOS token
# group_labels.append(17) # For the EOS token
### 3. Build the action type labels ###
### For the first part output
if type_label_scheme == 'v1':
if 'around' in first_part_input_tok:
new_type_labels = [3] * len(first_part_output_tok)
elif 'opposite' in first_part_input_tok:
new_type_labels = [2] * len(first_part_output_tok)
elif 'left' in first_part_input_tok or 'right' in first_part_input_tok:
new_type_labels = [1] * len(first_part_output_tok)
else:
new_type_labels = [0] * len(first_part_output_tok)
# if 'after' in input_tok:
# new_type_labels = [_c + 4 for _c in new_type_labels]
type_labels += new_type_labels
### For the second part output
if len(second_part_output_tok) > 0:
if 'around' in second_part_input_tok:
new_type_labels = [3] * len(second_part_output_tok)
elif 'opposite' in second_part_input_tok:
new_type_labels = [2] * len(second_part_output_tok)
elif 'left' in second_part_input_tok or 'right' in second_part_input_tok:
new_type_labels = [1] * len(second_part_output_tok)
else:
new_type_labels = [0] * len(second_part_output_tok)
# if 'after' not in input_tok:
# new_type_labels = [_c + 4 for _c in new_type_labels]
type_labels += new_type_labels
elif type_label_scheme == 'v2':
if 'twice' in first_part_input_tok:
type_labels += ([1] * int(len(first_part_output_tok) / 2) + [0] * int(
len(first_part_output_tok) / 2))
elif 'thrice' in first_part_input_tok:
type_labels += ([2] * int(len(first_part_output_tok) / 3) + [1] * int(
len(first_part_output_tok) / 3) + \
[0] * int(len(first_part_output_tok) / 3))
else:
type_labels += ([0] * len(first_part_output_tok))
### For the second part output
if len(second_part_output_tok) > 0:
if 'twice' in second_part_input_tok:
type_labels += ([1] * int(len(second_part_output_tok) / 2) + [0] * int(
len(second_part_output_tok) / 2))
elif 'thrice' in second_part_input_tok:
type_labels += ([2] * int(len(second_part_output_tok) / 3) + [1] * int(
len(second_part_output_tok) / 3) + [0] * int(len(second_part_output_tok) / 3))
else:
type_labels += ([0] * len(second_part_output_tok))
assert len(type_labels) == len(output_tok)
type_labels.append(-1) # For the EOS token
# group_labels.append(17) # For the EOS token
# if _id not in no_skip_id:
# count_labels = [-1] * len(count_labels)
# group_labels = [-1] * len(group_labels)
# skip_cnt += 1
# else:
# sup_cnt += 1
all_action_type_labels.append(torch.tensor(type_labels).cuda())
all_count_labels.append(torch.tensor(count_labels).cuda())
all_action_group_labels.append(torch.tensor(group_labels).cuda())
print(skip_cnt, sup_cnt)
return all_count_labels, all_action_group_labels, all_action_type_labels
def convert_to_dict(self, raw_data):
dict_data = {}
for dp in raw_data:
input, output = dp[0], dp[1]
assert input not in dict_data
dict_data[input] = output
return dict_data
def __getitem__(self, index):
if self.tokenized:
dp = self.dataset[index]
source_ids, src_mask, target_ids = dp[0], dp[1], dp[2]
source_ids = source_ids[:self.max_source_length]
#src_mask = src_mask[:self.max_source_length]
target_ids = target_ids[:self.max_target_length]
else:
source_ids = self.source[index]
target_ids = self.target[index]
count_labels = self.action_count_labels[index]
group_labels = self.action_group_labels[index]
type_labels = self.action_type_labels[index]
return {"source_ids": source_ids, "target_ids": target_ids, "action_count_labels": count_labels,
"action_group_labels": group_labels, "action_type_labels": type_labels}
@staticmethod
def trim_seq2seq_batch(batch, src_pad_token_id, trg_pad_token_id, trim_y=True):
if trim_y:
y = trim_batch(batch["target_ids"], trg_pad_token_id)
else:
y = batch["target_ids"]
source_ids, source_mask = trim_batch(batch["source_ids"], src_pad_token_id, attention_mask=batch["source_mask"])
return source_ids, source_mask, y
def collate_fn(self, batch):
max_src_len = max(map(len, [x["source_ids"] for x in batch]))
max_trg_len = max(map(len, [x["target_ids"] for x in batch]))
src_mask = torch.stack([self.create_mask(x["source_ids"], max_src_len) for x in batch])
trg_mask = torch.stack([self.create_mask(x["target_ids"], max_trg_len) for x in batch])
src_ids = torch.stack([self.pad_to_max_len(x["source_ids"], max_src_len, self.src_lang.pad_token_id) for x in batch])
#masks = torch.stack([x["source_mask"] for x in batch])
trg_ids = torch.stack([self.pad_to_max_len(x["target_ids"], max_trg_len, self.trg_lang.pad_token_id) for x in batch])
action_count_labels = torch.stack([self.pad_to_max_len(x["action_count_labels"], max_trg_len, -1) for x in batch])
action_group_labels = torch.stack([self.pad_to_max_len(x["action_group_labels"], max_trg_len, -1) for x in batch])
action_type_labels = torch.stack(
[self.pad_to_max_len(x["action_type_labels"], max_trg_len, -1) for x in batch])
y = trim_batch(trg_ids, self.trg_lang.pad_token_id)
#action_count_labels = trim_batch(action_count_labels, -1)
# _src_ids, src_mask = trim_batch(src_ids, self.src_lang.pad_token_id, attention_mask=src_mask)
# print(_src_ids.size(), src_ids.size())
return {"source_ids": src_ids, "source_mask": src_mask, "target_ids": y, "target_mask": trg_mask,
"action_count_labels": action_count_labels, "action_group_labels": action_group_labels,
"action_type_labels": action_type_labels}
| 2.328125 | 2 |
bridger/serializers/fields/related.py | intellineers/django-bridger | 2 | 9921 | from typing import Dict
from rest_framework import serializers
from rest_framework.fields import empty
from rest_framework.relations import ManyRelatedField
from rest_framework.request import Request
from .mixins import BridgerSerializerFieldMixin
from .types import BridgerType, ReturnContentType
class BridgerManyRelatedField(ManyRelatedField):
def __init__(self, *args, **kwargs):
required = kwargs.get("required", True)
if not required:
kwargs["allow_null"] = True
super().__init__(*args, **kwargs)
def run_validation(self, data=empty):
# If the data is send through form data, we need to convert the data into a proper list of ids
if data not in [None, empty] and len(data) == 1 and isinstance(data[0], str) and "," in data[0]:
data = data[0].split(",")
# If the data is a list of an empty string we need to convert it (FORM DATA)
if data not in [None, empty] and len(data) == 1 and isinstance(data[0], str) and data[0] == "":
data = []
# If the data is a list and contains the string null, then we need to convert it (FORM DATA)
if data == ["null"]:
data = []
# If the data is None and null is an allowed value, data needs to be set to an empty list
if data is None and self.allow_null:
data = []
return super().run_validation(data)
def get_representation(self, request: Request, field_name: str) -> Dict:
representation = self.child_relation.get_representation(request, field_name)
representation["multiple"] = True
return representation
class PrimaryKeyRelatedField(BridgerSerializerFieldMixin, serializers.PrimaryKeyRelatedField):
MANY_RELATION_KWARGS = (
"read_only",
"write_only",
"required",
"default",
"initial",
"source",
"label",
"help_text",
"style",
"error_messages",
"allow_empty",
"html_cutoff",
"html_cutoff_text",
"allow_null",
)
def __init__(self, *args, **kwargs):
self.field_type = kwargs.pop("field_type", BridgerType.SELECT.value)
super().__init__(*args, **kwargs)
def __new__(cls, *args, **kwargs):
kwargs["style"] = {"base_template": "input.html"}
return super().__new__(cls, *args, **kwargs)
@classmethod
def many_init(cls, *args, **kwargs):
list_kwargs = {"child_relation": cls(*args, **kwargs)}
for key in kwargs:
if key in cls.MANY_RELATION_KWARGS:
list_kwargs[key] = kwargs[key]
return BridgerManyRelatedField(**list_kwargs)
def run_validation(self, data=empty):
if isinstance(data, str) and data == "null":
data = None
if data is empty:
parent_model_id = self.parent.context["view"].kwargs.get(f"{self.field_name}_id")
if parent_model_id:
data = parent_model_id
return super().run_validation(data)
class ListSerializer(serializers.ListSerializer):
"""
A Wrapper around the normal DRF ListSerializer which also return the child representation
"""
def get_representation(self, request: Request, field_name: str) -> Dict:
representation = self.child.get_representation(request, field_name)
representation["multiple"] = True
representation["related_key"] = self.source
return representation
| 2.3125 | 2 |
python/graphscope/experimental/nx/tests/algorithms/forward/operators/test_product.py | wenyuanyu/GraphScope | 2 | 9922 | <filename>python/graphscope/experimental/nx/tests/algorithms/forward/operators/test_product.py
import networkx.algorithms.operators.tests.test_product
import pytest
from graphscope.experimental.nx.utils.compat import import_as_graphscope_nx
import_as_graphscope_nx(networkx.algorithms.operators.tests.test_product,
decorators=pytest.mark.usefixtures("graphscope_session"))
def test_tensor_product_combinations():
# basic smoke test, more realistic tests would be useful
P5 = nx.path_graph(5)
K3 = nx.complete_graph(3)
G = nx.tensor_product(P5, K3)
assert nx.number_of_nodes(G) == 5 * 3
G = nx.tensor_product(nx.DiGraph(P5), nx.DiGraph(K3))
assert nx.number_of_nodes(G) == 5 * 3
@pytest.mark.skip(reason="not support multigraph")
def test_cartesian_product_multigraph():
pass
def test_lexicographic_product_combinations():
P5 = nx.path_graph(5)
K3 = nx.complete_graph(3)
G = nx.lexicographic_product(P5, K3)
assert nx.number_of_nodes(G) == 5 * 3
def test_strong_product_combinations():
P5 = nx.path_graph(5)
K3 = nx.complete_graph(3)
G = nx.strong_product(P5, K3)
assert nx.number_of_nodes(G) == 5 * 3
@pytest.mark.skip(reason="not support multigraph")
def test_graph_power_raises():
pass
| 2.375 | 2 |
Footy/UnsupportedBantzStrings.py | schleising/banter-bot | 0 | 9923 | <reponame>schleising/banter-bot
# {team} -> Name of team
# {name} -> Name of person who supports team
teamMatchStarted: list[str] = [
"{team} are shit",
"{team} cunts",
"Dirty {team}",
"Dirty {team}, dirty {name}",
]
drawing: list[str] = [
"{team} level, this is a shit match",
"Boring old {team}",
"Happy with how it's going, {name}?",
"Yawn...",
"{team} wankers",
"How can you support this rubbish, {name}?",
"You get the feeling that {team} don't really want this",
"No passion from {team}, {name}",
"If a game of football is like making love to a beautiful woman, this {team} game is a £10 hand job from a swivel-eyed misfit",
"This {team} match is like a game of chess. But with more players and only one piece",
]
teamLeadByOne: list[str] = [
"{team} cheats, the ref's a cunt",
"That was never a goal for {team}",
"{team} don't deserve that",
"Bollocks",
"That should go to VAR",
"Bit fortunuate for {team}",
"Can't imagine {team} will keep this lead",
"Lucky goal for {team}",
"{team} got lucky there",
"{team} aren't good enough to stay ahead",
"Offside!",
]
teamExtendingLead: list[str] = [
"There's no way {team} deserve this lead",
"Have {team} paid the ref?",
"This is bullshit",
"The ref's a cunt, {name}'s a cunt",
"The ref's a cunt, {team} are cunts, {name}'s a cunt",
"Something has gone seriously wrong with this country",
"When I voted for Brexit, I didn't vote for this",
"At least Boris remains in charge, we've still got that",
"<NAME> would be turning in his grave",
"Liberal elite bullshit",
"That was so offside",
"VAR!",
"Is the linesman OK?",
"If only {name}'s wife was as dirty as this game",
]
teamLosingLead: list[str] = [
"Lazy old {team}, lazy old {name}",
"{team} are throwing it away",
"{team} are rubbish",
"{team} fucking it up again",
"We really are being treated to some world class flouncing from {team} today",
"Brace yourself, {name}. This is going to hurt",
"This is brown trouser time for {team}",
"I hope {name} brought a spare pair of underpants",
"I see {team} are playing their B Team. B for Bullshit",
]
teamDeficitOfOne: list[str] = [
"This is more like it from {team}",
"Oh dear...",
"{team} wankers",
"How are you feeling, {name}?",
"Bit disappointing, {name}?",
"Not looking good for {team}, {name}",
"You must be furious, {name}",
"{team} have just got no heart",
"This is what happens when you try to buy success",
"All that money spent, {name}, and for what?",
]
teamExtendingDeficit: list[str] = [
"Starting to feel a bit sorry for {team}",
"Never mind, {name}, there's always the next game",
"Poor {team}",
"Whoops...",
"Oh dear, everything OK, {name}?",
"Hey {name}, where'd you get your keeper?\nPOUNDSHOP !! POUNDSHOP !!",
"{team} can't raise themselves for this game, typical",
"A team like {team} have such a proud history, but what we see today is just embarrassing",
"{team} clearly not up for it today",
"{team} are letting you down, {name}",
"Watching {team} is like watching a bunch of cavemen: Neanderthal",
]
teamLosingDeficit: list[str] = [
"Too little too late for {team}",
"{team} won't come back from here",
"The ref's a cunt",
"This is awful",
"What a mess",
"Well this is an unmitigated shit show",
]
teamWon: list[str] = [
"That was a shit game",
"There's no way {team} deserved that",
"Fuck you, {name} !!",
"This will go down in history...\nAs the most tedious game I have ever had the misfortune to witness",
]
teamLost: list[str] = [
"Justice done, {team} lost",
"Job done for {team}?",
"Job done, {name}?",
"{name} !!?",
"Probably the best {team} could hope for",
"Everything OK, {name}?",
"{team} continue to disappoint",
"Well if the football thing doesn't work out for {team}, they can always consider a career on the stage",
"{team} set the bar low",
"{team} fail to meet their already low expectations",
]
teamDrew: list [str] = [
"Another uninspiring result for {team}",
"Thanks for nothing, {team}",
"Well that's 90 minutes we won't get back, thanks {team}",
"Another draw for {team}",
"Boring old {team}",
"You should be happy with that result, {name}",
"If I could pick one highlight from this {team} game it would be when it finally ended.",
"I think {name} will be happy with {team}'s performance today.",
]
| 2.875 | 3 |
bench/fibrec.py | codr7/alisp | 8 | 9924 | from bench import bench
print(bench(100, '''
def fib(n):
return n if n < 2 else fib(n-1) + fib(n-2)
''', '''
fib(20)
'''))
| 2.890625 | 3 |
nlpir/native/classifier.py | NLPIR-team/nlpir-python | 18 | 9925 | # coding=utf-8
from nlpir.native.nlpir_base import NLPIRBase
from ctypes import c_bool, c_char_p, c_int, POINTER, Structure, c_float
class StDoc(Structure):
__fields__ = [
("sTitle", c_char_p),
("sContent", c_char_p),
("sAuthor", c_char_p),
("sBoard", c_char_p),
("sDatatype", c_char_p)
]
class Classifier(NLPIRBase):
@property
def dll_name(self):
return "LJClassifier"
@NLPIRBase.byte_str_transform
def init_lib(self, data_path: str, encode: int, license_code: str) -> int:
"""
Call **classifier_init**
:param data_path:
:param encode:
:param license_code:
:return: 1 success 0 fail
"""
return self.get_func("classifier_init", [c_char_p, c_char_p, c_int, c_char_p], c_bool)(
"rulelist.xml", data_path, encode, license_code)
@NLPIRBase.byte_str_transform
def exit_lib(self) -> bool:
"""
Call **classifier_exit**
:return: exit success or not
"""
return self.get_func("classifier_exit", None, None)()
@NLPIRBase.byte_str_transform
def get_last_error_msg(self) -> str:
return self.get_func("classifier_GetLastErrorMsg", None, c_char_p)()
@NLPIRBase.byte_str_transform
def exec_1(self, data: StDoc, out_type: int = 0):
"""
Call **classifier_exec1**
对输入的文章结构进行分类
:param data: 文章结构
:param out_type: 输出是否包括置信度, 0 没有置信度 1 有置信度
:return: 主题类别串 各类之间用\t隔开,类名按照置信度从高到低排序
举例:“要闻 敏感 诉讼”, “要闻 1.00 敏感 0.95 诉讼 0.82”
"""
return self.get_func("classifier_exec1", [POINTER(StDoc), c_int], c_char_p)(data, out_type)
@NLPIRBase.byte_str_transform
def exec(self, title: str, content: str, out_type: int):
"""
Call **classifier_exec**
对输入的文章进行分类
:param title: 文章标题
:param content: 文章内容
:param out_type: 输出知否包括置信度,同 :func:`exec_1`
:return: 同 :func:`exec_1`
"""
return self.get_func("classifier_exec", [c_char_p, c_char_p, c_int], c_char_p)(title, content, out_type)
@NLPIRBase.byte_str_transform
def exec_file(self, filename: str, out_type: int) -> str:
"""
Call **classifier_execFile**
:param filename: 文件名
:param out_type: 输出是否包括置信度, 0 没有置信度 1 有置信度
:return: 主题类别串 各类之间用\t隔开,类名按照置信度从高到低排序
举例:“要闻 敏感 诉讼”, “要闻 1.00 敏感 0.95 诉讼 0.82”
"""
return self.get_func("classifier_execFile", [c_char_p, c_int], c_char_p)(filename, out_type)
@NLPIRBase.byte_str_transform
def detail(self, class_name: str):
"""
Call **classifier_detail**
对于当前文档,输入类名,取得结果明细
:param class_name: 结果类名
:return: 结果明细 例如:
::
RULE3:
SUBRULE1: 内幕 1
SUBRULE2: 股市 1 基金 3 股票 8
SUBRULE3: 书摘 2
"""
return self.get_func("classifier_detail", [c_char_p], c_char_p)(class_name)
@NLPIRBase.byte_str_transform
def set_sim_thresh(self, sim: float):
"""
Call **classifier_setsimthresh**
设置阈值
:param sim: 阈值
:return:
"""
return self.get_func("classifier_setsimthresh", [c_float])(sim)
| 2.3125 | 2 |
tests/param/get_param_type_spec_test.py | nickgaya/bravado-core | 122 | 9926 | # -*- coding: utf-8 -*-
import pytest
from mock import Mock
from bravado_core.exception import SwaggerMappingError
from bravado_core.operation import Operation
from bravado_core.param import get_param_type_spec
from bravado_core.param import Param
from bravado_core.spec import Spec
@pytest.fixture
def body_param_spec():
return {
'name': 'body',
'in': 'body',
'description': 'pet id',
'required': True,
'schema': {
'type': 'string',
},
}
def test_location_is_body(empty_swagger_spec, body_param_spec):
param = Param(empty_swagger_spec, Mock(spec=Operation), body_param_spec)
assert body_param_spec['schema'] == get_param_type_spec(param)
def test_location_is_not_body(empty_swagger_spec):
for location in ('path', 'query', 'header', 'formData',):
param_spec = {
'name': 'petId',
'in': location,
'description': 'ID of pet that needs to be updated',
'required': True,
'type': 'string',
}
param = Param(empty_swagger_spec, Mock(spec=Operation), param_spec)
assert param_spec == get_param_type_spec(param)
def test_location_invalid(empty_swagger_spec, body_param_spec):
body_param_spec['in'] = 'foo'
param = Param(empty_swagger_spec, Mock(spec=Operation), body_param_spec)
with pytest.raises(SwaggerMappingError) as excinfo:
get_param_type_spec(param)
assert 'location foo' in str(excinfo.value)
def test_ref(minimal_swagger_dict, body_param_spec):
minimal_swagger_dict['parameters'] = {
'PetIdParam': body_param_spec,
}
param_ref_spec = {'$ref': '#/parameters/PetIdParam'}
swagger_spec = Spec(minimal_swagger_dict)
param = Param(swagger_spec, Mock(spec=Operation), param_ref_spec)
assert {'type': 'string'} == get_param_type_spec(param)
| 2.078125 | 2 |
functions/markdown-to-html/markdown2html.py | truls/faas-profiler | 0 | 9927 | # Copyright (c) 2019 Princeton University
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from markdown import markdown
import base64
import json
import base64
def main(params):
try:
md = json.loads(base64.decodebytes(params["__ow_body"].encode("utf-8")))["markdown"].encode("utf-8")
md_text = base64.decodebytes(md).decode("utf-8")
except KeyError:
return {'Error' : 'Possibly lacking markdown parameter in request.'}
test_id = params["__ow_query"].split("&")[0]
html = markdown(md_text)
return {"result": "ok", "html_response": html, "testid": test_id}
| 2.84375 | 3 |
Python Files/Dataset_Formating/Audio_splicing.py | brennanMosher/Music-Genre-Recognition-using-a-Machine-Learning-Appraoch | 1 | 9928 | from pydub import AudioSegment
import os
import math
from pathlib import Path
'''
Splice wav files into multiple segments.
'''
LENGTH = 3 # Set splice length in seconds
def splice(audioPath, outputPath):
# try:
# os.mkdir('Spliced Spectrogram training') # Need to figure out where to put this
# except OSError:
# print("Creation of the directory failed")
audio = AudioSegment.from_wav(audioPath)
count = math.ceil(audio.duration_seconds/LENGTH) # Do we want the last part of audio?
t1 = 0
t2 = LENGTH*1000
for i in range(count):
newAudio = audio[t1:t2]
newPath = outputPath+Path(audioPath).stem+'_splice'+str(i)+'.wav'
newAudio.export(newPath, format="wav")
t1 = t2
t2 = t2 + LENGTH*1000
| 3.453125 | 3 |
manpages.py | mba811/dash-manpages-zh | 1 | 9929 | <reponame>mba811/dash-manpages-zh
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@contact:
@date: 2014/06/23
"""
import os
import sqlite3
import urllib2
import shutil
import tarfile
import hashlib
import codecs
from mako.template import Template
from pyquery import PyQuery
currentPath = os.path.join(os.path.dirname(os.path.realpath(__file__)))
name = "manpages"
baseName = "manpages-zh"
output = baseName + ".docset"
appName = "dash-" + baseName
tarFileName = baseName + ".tgz"
feedName = baseName + ".xml"
version = "1.5.0"
docsetPath = os.path.join(currentPath, output, "Contents", "Resources", "Documents")
# Step 2: Copy the HTML Documentation
fin = codecs.open(os.path.join(docsetPath, "index.html"), "r", "utf-8")
content = fin.read()
fin.close()
jQuery = PyQuery(content)
jQuery.find("body").empty()
fileNames = []
itemTemplate = Template("<a href='html/${fileName}'>${name}</a><br />\n")
for fileName in os.listdir(os.path.join(docsetPath, "html")):
fileNames.append({
"name": fileName.split(".")[0],
"fileName": fileName
})
jQuery.find("body").append(itemTemplate.render(name = fileName.split(".")[0], fileName = fileName))
fin = codecs.open(os.path.join(docsetPath, "index.html"), "w", "utf-8")
newContent = jQuery.html()
fin.write(newContent)
fin.close()
# Step 3: create the Info.plist file
infoTemplate = Template('''<?xml version="1.0" encoding="UTF-8"?>
<plist version="1.0">
<dict>
<key>CFBundleIdentifier</key>
<string>${name}</string>
<key>CFBundleName</key>
<string>${name}</string>
<key>DocSetPlatformFamily</key>
<string>${name}</string>
<key>dashIndexFilePath</key>
<string>index.html</string>
<key>dashIndexFilePath</key>
<string>index.html</string>
<key>isDashDocset</key><true/>
<key>isJavaScriptEnabled</key><true/>
</dict>
</plist>''')
infoPlistFile = os.path.join(currentPath, output, "Contents", "Info.plist")
fin = open(infoPlistFile, "w")
fin.write(infoTemplate.render(name = name))
fin.close()
# Step 4: Create the SQLite Index
dbFile = os.path.join(currentPath, output, "Contents", "Resources", "docSet.dsidx")
if os.path.exists(dbFile):
os.remove(dbFile)
db = sqlite3.connect(dbFile)
cursor = db.cursor()
try:
cursor.execute("DROP TABLE searchIndex;")
except Exception:
pass
cursor.execute('CREATE TABLE searchIndex(id INTEGER PRIMARY KEY, name TEXT, type TEXT, path TEXT);')
cursor.execute('CREATE UNIQUE INDEX anchor ON searchIndex (name, type, path);')
insertTemplate = Template("INSERT OR IGNORE INTO searchIndex(name, type, path) VALUES ('${name}', '${type}', '${path}');")
# Step 5: Populate the SQLite Index
for result in fileNames:
sql = insertTemplate.render(name = result["name"], type = "Builtin", path = "html/" + result["fileName"])
print sql
cursor.execute(sql)
db.commit()
db.close()
# Step 6: copy icon
shutil.copyfile(os.path.join(currentPath, "icon.png"),
os.path.join(currentPath, output, "icon.png"))
shutil.copyfile(os.path.join(currentPath, "[email protected]"),
os.path.join(currentPath, output, "[email protected]"))
# Step 7: 打包
if not os.path.exists(os.path.join(currentPath, "dist")):
os.makedirs(os.path.join(currentPath, "dist"))
tarFile = tarfile.open(os.path.join(currentPath, "dist", tarFileName), "w:gz")
for root, dirNames, fileNames in os.walk(output):
for fileName in fileNames:
fullPath = os.path.join(root, fileName)
tarFile.add(fullPath)
tarFile.close()
# Step 8: 更新feed url
feedTemplate = Template('''<entry>
<version>${version}</version>
<sha1>${sha1Value}</sha1>
<url>https://raw.githubusercontent.com/magicsky/${appName}/master/dist/${tarFileName}</url>
</entry>''')
fout = open(os.path.join(currentPath, "dist", tarFileName), "rb")
sha1Value = hashlib.sha1(fout.read()).hexdigest()
fout.close()
fin = open(os.path.join(currentPath, feedName), "w")
fin.write(feedTemplate.render(sha1Value = sha1Value, appName = appName, tarFileName = tarFileName, version = version))
fin.close()
| 2.15625 | 2 |
docs/conf.py | alexweav/nisystemlink-clients-python | 0 | 9930 | <reponame>alexweav/nisystemlink-clients-python
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# --------------------------------------------------------------------------------------
project = "nisystemlink"
copyright = "2020, National Instruments"
author = "National Instruments"
# The short X.Y version
version = "0.1"
# The full version, including alpha/beta/rc tags
release = "0.1.3"
# --------------------------------------------------------------------------------------
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
"sphinx_autodoc_typehints",
"docs.cleanup",
]
master_doc = "index"
html_theme = "sphinx_rtd_theme"
html_extra_path = [
"../LICENSE",
]
nitpicky = True
nitpick_ignore = [
("py:class", "datetime.datetime"),
("py:class", "datetime.timedelta"),
("py:class", "pathlib.Path"),
("py:data", "typing.Any"),
("py:data", "typing.Awaitable"),
("py:data", "typing.Dict"),
("py:data", "typing.Iterable"),
("py:data", "typing.List"),
("py:data", "typing.Optional"),
("py:data", "typing.Sequence"),
("py:data", "typing.Tuple"),
("py:data", "typing.Union"),
]
autodoc_default_options = {
"inherited-members": True,
"special-members": "__init__",
"no-private-members": True,
}
# Don't let napoleon force methods to be included in the docs; use autodoc flags and our
# own docs.cleanup module for that.
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = False
| 1.359375 | 1 |
multinet/api/views/common.py | multinet-app/multinet-api | 0 | 9931 | from typing import Dict, List
from arango.cursor import Cursor
from django.http.response import Http404
from django.shortcuts import get_object_or_404
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.request import Request
from rest_framework_extensions.mixins import NestedViewSetMixin
from multinet.api.models import Workspace, WorkspaceRole
from multinet.api.utils.arango import ArangoQuery
class MultinetPagination(LimitOffsetPagination):
default_limit = 100
class ArangoPagination(LimitOffsetPagination):
"""Override the LimitOffsetPagination class to allow for use with arango cursors."""
def _set_pre_query_params(self, request):
self.limit = self.get_limit(request)
if self.limit is None:
return None
self.offset = self.get_offset(request)
self.request = request
def _set_post_query_params(self):
if self.count > self.limit and self.template is not None:
self.display_page_controls = True
def paginate_queryset(self, query: ArangoQuery, request: Request) -> List[Dict]:
self._set_pre_query_params(request)
paginated_query = query.paginate(self.limit, self.offset)
cur: Cursor = paginated_query.execute(full_count=True)
self.count = cur.statistics()['fullCount']
self._set_post_query_params()
return list(cur)
class WorkspaceChildMixin(NestedViewSetMixin):
def get_queryset(self):
"""
Get the queryset for workspace child enpoints.
Check that the requeting user has appropriate permissions for the associated workspace.
"""
child_objects = super().get_queryset()
# prevent warning for schema generation incompatibility
if getattr(self, 'swagger_fake_view', False):
return child_objects.none()
parent_query_dict = self.get_parents_query_dict()
workspace = get_object_or_404(
Workspace.objects.select_related('owner'), name=parent_query_dict['workspace__name']
)
# No user or user permission required for public workspaces
if workspace.public:
return child_objects
# Private workspace
request_user = self.request.user
if not request_user.is_authenticated: # anonymous user
raise Http404
workspace_role = WorkspaceRole.objects.filter(
workspace=workspace, user=request_user
).first()
# If the user is at least a reader or the owner, grant access
if workspace_role is not None or workspace.owner == request_user:
return child_objects
# Read access denied
raise Http404
| 1.976563 | 2 |
scripts/tests/generate_host_files.py | NDevTK/cel | 0 | 9932 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import logging
import os
import sys
def ParseArgs():
parser = argparse.ArgumentParser(
description='Host file generator for CELab E2E tests')
all_tokens = ['project_id', 'storage_bucket', 'storage_prefix']
template_help = 'The full path to the *.host.textpb template file to use. '
template_help += 'Must contain the following tokens: %s' % all_tokens
parser.add_argument(
'--template', metavar='<host_file>', required=True, help=template_help)
parser.add_argument(
'--projects',
metavar='<projectA;projectB;...>',
dest="projects",
required=True,
help='The values to replace "<project_id>" with.')
parser.add_argument(
'--storage_bucket',
metavar='<token>',
dest="storage_bucket",
required=True,
help='The value to replace "<storage_bucket>" with.')
parser.add_argument(
'--storage_prefix',
metavar='<token>',
dest="storage_prefix",
required=True,
help='The value to replace "<storage_prefix>" with.')
parser.add_argument(
'--destination_dir',
metavar='<path>',
dest='destination',
required=True,
action='store',
help='Where to collect extra logs on test failures')
return parser.parse_args()
def ConfigureLogging(args):
logfmt = '%(asctime)s %(filename)s:%(lineno)s: [%(levelname)s] %(message)s'
datefmt = '%Y/%m/%d %H:%M:%S'
logging.basicConfig(level=logging.INFO, format=logfmt, datefmt=datefmt)
if __name__ == '__main__':
args = ParseArgs()
ConfigureLogging(args)
logging.info("Arguments: %s" % args)
if not os.path.exists(args.template):
raise ValueError('Template host file not found: %s' % args.template)
if not os.path.exists(args.destination):
raise ValueError('Destination directory not found: %s' % args.destination)
# Generate all the host files based off the arguments passed.
with open(args.template, 'r') as f:
template = f.read()
for project_id in args.projects.split(';'):
filename = "%s.host.textpb" % project_id
destination = os.path.join(args.destination, filename)
with open(destination, 'w') as f:
logging.info("Generating %s" % destination)
content = template.replace("<project_id>", project_id)
content = content.replace("<storage_bucket>", args.storage_bucket)
content = content.replace("<storage_prefix>", args.storage_prefix)
f.write(content)
sys.exit(0)
| 2.4375 | 2 |
users/models.py | moshthepitt/probsc | 0 | 9933 | from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from django.urls import reverse
from django_extensions.db.models import TimeStampedModel
from mptt.models import MPTTModel, TreeForeignKey
from .managers import UserProfileManager, DepartmentManager, PositionManager
User = settings.AUTH_USER_MODEL
class Department(MPTTModel, TimeStampedModel):
"""
Departments in an organisation
"""
name = models.CharField(_("Name"), max_length=255)
description = models.TextField(_("Description"), blank=True, default="")
parent = TreeForeignKey('self', verbose_name=_("Parent"), null=True,
blank=True, related_name='children', db_index=True,
on_delete=models.PROTECT,
help_text=_("The parent department"))
customer = models.ForeignKey(
'customers.Customer', verbose_name=_("Customer"),
on_delete=models.PROTECT)
manager = models.ForeignKey(
User, verbose_name=_("Manager"), on_delete=models.PROTECT,
blank=True, null=True)
active = models.BooleanField(_("Active"), default=True)
objects = DepartmentManager()
class Meta:
verbose_name = _("Department")
verbose_name_plural = _("Departments")
ordering = ['name']
def get_absolute_url(self):
return "#"
def get_edit_url(self):
return reverse('users:departments_edit', args=[self.pk])
def get_delete_url(self):
return reverse('users:departments_delete', args=[self.pk])
def get_list_url(self):
return reverse('users:departments_list')
def __str__(self):
return self.name
class Position(MPTTModel, TimeStampedModel):
"""
Job positions in an organisation
"""
name = models.CharField(_("Name"), max_length=255)
description = models.TextField(_("Description"), blank=True, default="")
department = models.ForeignKey(
Department, verbose_name=_("Department"), on_delete=models.PROTECT)
parent = TreeForeignKey('self', verbose_name=_("Reports To"), null=True,
blank=True, related_name='children', db_index=True,
on_delete=models.PROTECT,
help_text=_("The parent Job Position"))
supervisor = models.ForeignKey(
User, verbose_name=_("Supervisor"), on_delete=models.PROTECT,
blank=True, null=True)
customer = models.ForeignKey(
'customers.Customer', verbose_name=_("Customer"),
on_delete=models.PROTECT)
active = models.BooleanField(_("Active"), default=True)
objects = PositionManager()
class Meta:
verbose_name = _("Job Positions")
verbose_name_plural = _("Job Positions")
ordering = ['name']
def get_absolute_url(self):
return "#"
def get_edit_url(self):
return reverse('users:positions_edit', args=[self.pk])
def get_delete_url(self):
return reverse('users:positions_delete', args=[self.pk])
def get_list_url(self):
return reverse('users:positions_list')
def __str__(self):
return "{} - {}".format(self.department.name, self.name)
@python_2_unicode_compatible
class UserProfile(models.Model):
"""
Model used to store more information on users
"""
ADMIN = '1'
MEMBER = '2'
EDITOR = '3'
MEMBER_ROLE_CHOICES = (
(ADMIN, _('Admin')),
(EDITOR, _('Editor')),
(MEMBER, _('Member')),
)
created_on = models.DateTimeField(_("Created on"), auto_now_add=True)
updated_on = models.DateTimeField(_("Updated on"), auto_now=True)
user = models.OneToOneField(User, verbose_name=_("User"))
position = models.ForeignKey(Position, verbose_name=_(
"job Position"), on_delete=models.SET_NULL, blank=True, null=True,
default=None)
customer = models.ForeignKey('customers.Customer', verbose_name=_(
"Customer"), on_delete=models.SET_NULL, blank=True, null=True,
default=None)
role = models.CharField(
_("Role"), max_length=1, choices=MEMBER_ROLE_CHOICES, blank=False,
default=MEMBER)
active = models.BooleanField(
_("Active"), default=True, help_text="Is the staff member actively "
"employed?")
objects = UserProfileManager()
class Meta:
verbose_name = _("Staff Member")
verbose_name_plural = _("Staff Members")
ordering = ['user__first_name', 'user__last_name', 'user__email']
def get_name(self):
if self.user.get_full_name():
return self.user.get_full_name()
if self.user.email:
return self.user.email
return self.user.username
def get_initials(self):
if self.user.first_name and self.user.last_name:
return "{}{}".format(self.user.first_name[0],
self.user.last_name[0])
if self.user.first_name:
return self.user.first_name[0]
if self.user.last_name:
return self.user.last_name[0]
return self.user.email[0]
def is_admin(self):
return self.role == self.ADMIN
def is_editor(self):
return self.role == self.EDITOR
def can_edit(self):
return self.role == self.EDITOR or self.role == self.ADMIN
def get_subordinates(self):
"""
Returns a queryset of UserProfile objects which report to this
userprofile
"""
if self.position:
queryset = UserProfile.objects.active().exclude(
id=self.id).filter(
models.Q(
position__supervisor=self.user) | models.Q(
position__department__manager=self.user) | models.Q(
position__parent=self.position))
else:
queryset = UserProfile.objects.active().exclude(
id=self.id).filter(
models.Q(
position__supervisor=self.user) | models.Q(
position__department__manager=self.user))
# get job positions of subs
subordinate_positions = Position.objects.filter(
userprofile__in=queryset)
# get any position that may report to these positions
# list of position ids of Positions that report to
# subordinate_positions
reporting_jp_ids = []
for sub_p in subordinate_positions:
reporting_jps = sub_p.get_descendants(include_self=False)
if reporting_jps is not None:
reporting_jp_ids = reporting_jp_ids + list(
reporting_jps.values_list('id', flat=True))
reporting_jp_ids = list(set(reporting_jp_ids))
# get user profiles wiht positions that report to subordinate_positions
reporting_profiles = UserProfile.objects.active().filter(
position__id__in=reporting_jp_ids)
queryset = queryset.union(reporting_profiles)
# unions result in weird filtering so we create a new queryset
queryset_ids = list(set([x.id for x in queryset]))
if queryset_ids:
queryset = UserProfile.objects.filter(id__in=queryset_ids)
else:
queryset = UserProfile.objects.none()
return queryset
def has_subordinates(self):
return self.get_subordinates().exists()
def get_department(self):
if self.position is not None:
return self.position.department.name
return None
def get_absolute_url(self):
return "#"
def get_edit_url(self):
return reverse('users:userprofiles_edit', args=[self.pk])
def get_delete_url(self):
return "#"
def get_list_url(self):
return reverse('users:userprofiles_list')
def __str__(self):
return _("{user}").format(user=self.get_name())
| 2.15625 | 2 |
azure-devops/azure/devops/released/build/build_client.py | imafidon2020/azure-devops-python-api | 248 | 9934 | <filename>azure-devops/azure/devops/released/build/build_client.py
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from ...v5_1.build import models
class BuildClient(Client):
"""Build
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(BuildClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = '965220d5-5bb9-42cf-8d67-9b146df2a5a4'
def create_artifact(self, artifact, project, build_id):
"""CreateArtifact.
Associates an artifact with a build.
:param :class:`<BuildArtifact> <azure.devops.v5_1.build.models.BuildArtifact>` artifact: The artifact.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:rtype: :class:`<BuildArtifact> <azure.devops.v5_1.build.models.BuildArtifact>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
content = self._serialize.body(artifact, 'BuildArtifact')
response = self._send(http_method='POST',
location_id='1db06c96-014e-44e1-ac91-90b2d4b3e984',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('BuildArtifact', response)
def get_artifact(self, project, build_id, artifact_name):
"""GetArtifact.
Gets a specific artifact for a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param str artifact_name: The name of the artifact.
:rtype: :class:`<BuildArtifact> <azure.devops.v5_1.build.models.BuildArtifact>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
query_parameters = {}
if artifact_name is not None:
query_parameters['artifactName'] = self._serialize.query('artifact_name', artifact_name, 'str')
response = self._send(http_method='GET',
location_id='1db06c96-014e-44e1-ac91-90b2d4b3e984',
version='5.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('BuildArtifact', response)
def get_artifact_content_zip(self, project, build_id, artifact_name, **kwargs):
"""GetArtifactContentZip.
Gets a specific artifact for a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param str artifact_name: The name of the artifact.
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
query_parameters = {}
if artifact_name is not None:
query_parameters['artifactName'] = self._serialize.query('artifact_name', artifact_name, 'str')
response = self._send(http_method='GET',
location_id='1db06c96-014e-44e1-ac91-90b2d4b3e984',
version='5.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='application/zip')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_artifacts(self, project, build_id):
"""GetArtifacts.
Gets all artifacts for a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:rtype: [BuildArtifact]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
response = self._send(http_method='GET',
location_id='1db06c96-014e-44e1-ac91-90b2d4b3e984',
version='5.1',
route_values=route_values)
return self._deserialize('[BuildArtifact]', self._unwrap_collection(response))
def get_file(self, project, build_id, artifact_name, file_id, file_name, **kwargs):
"""GetFile.
Gets a file from the build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param str artifact_name: The name of the artifact.
:param str file_id: The primary key for the file.
:param str file_name: The name that the file will be set to.
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
query_parameters = {}
if artifact_name is not None:
query_parameters['artifactName'] = self._serialize.query('artifact_name', artifact_name, 'str')
if file_id is not None:
query_parameters['fileId'] = self._serialize.query('file_id', file_id, 'str')
if file_name is not None:
query_parameters['fileName'] = self._serialize.query('file_name', file_name, 'str')
response = self._send(http_method='GET',
location_id='1db06c96-014e-44e1-ac91-90b2d4b3e984',
version='5.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def delete_build(self, project, build_id):
"""DeleteBuild.
Deletes a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
self._send(http_method='DELETE',
location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf',
version='5.1',
route_values=route_values)
def get_build(self, project, build_id, property_filters=None):
"""GetBuild.
Gets a build
:param str project: Project ID or project name
:param int build_id:
:param str property_filters:
:rtype: :class:`<Build> <azure.devops.v5_1.build.models.Build>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
query_parameters = {}
if property_filters is not None:
query_parameters['propertyFilters'] = self._serialize.query('property_filters', property_filters, 'str')
response = self._send(http_method='GET',
location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf',
version='5.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('Build', response)
def get_builds(self, project, definitions=None, queues=None, build_number=None, min_time=None, max_time=None, requested_for=None, reason_filter=None, status_filter=None, result_filter=None, tag_filters=None, properties=None, top=None, continuation_token=None, max_builds_per_definition=None, deleted_filter=None, query_order=None, branch_name=None, build_ids=None, repository_id=None, repository_type=None):
"""GetBuilds.
Gets a list of builds.
:param str project: Project ID or project name
:param [int] definitions: A comma-delimited list of definition IDs. If specified, filters to builds for these definitions.
:param [int] queues: A comma-delimited list of queue IDs. If specified, filters to builds that ran against these queues.
:param str build_number: If specified, filters to builds that match this build number. Append * to do a prefix search.
:param datetime min_time: If specified, filters to builds that finished/started/queued after this date based on the queryOrder specified.
:param datetime max_time: If specified, filters to builds that finished/started/queued before this date based on the queryOrder specified.
:param str requested_for: If specified, filters to builds requested for the specified user.
:param str reason_filter: If specified, filters to builds that match this reason.
:param str status_filter: If specified, filters to builds that match this status.
:param str result_filter: If specified, filters to builds that match this result.
:param [str] tag_filters: A comma-delimited list of tags. If specified, filters to builds that have the specified tags.
:param [str] properties: A comma-delimited list of properties to retrieve.
:param int top: The maximum number of builds to return.
:param str continuation_token: A continuation token, returned by a previous call to this method, that can be used to return the next set of builds.
:param int max_builds_per_definition: The maximum number of builds to return per definition.
:param str deleted_filter: Indicates whether to exclude, include, or only return deleted builds.
:param str query_order: The order in which builds should be returned.
:param str branch_name: If specified, filters to builds that built branches that built this branch.
:param [int] build_ids: A comma-delimited list that specifies the IDs of builds to retrieve.
:param str repository_id: If specified, filters to builds that built from this repository.
:param str repository_type: If specified, filters to builds that built from repositories of this type.
:rtype: :class:`<GetBuildsResponseValue>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if definitions is not None:
definitions = ",".join(map(str, definitions))
query_parameters['definitions'] = self._serialize.query('definitions', definitions, 'str')
if queues is not None:
queues = ",".join(map(str, queues))
query_parameters['queues'] = self._serialize.query('queues', queues, 'str')
if build_number is not None:
query_parameters['buildNumber'] = self._serialize.query('build_number', build_number, 'str')
if min_time is not None:
query_parameters['minTime'] = self._serialize.query('min_time', min_time, 'iso-8601')
if max_time is not None:
query_parameters['maxTime'] = self._serialize.query('max_time', max_time, 'iso-8601')
if requested_for is not None:
query_parameters['requestedFor'] = self._serialize.query('requested_for', requested_for, 'str')
if reason_filter is not None:
query_parameters['reasonFilter'] = self._serialize.query('reason_filter', reason_filter, 'str')
if status_filter is not None:
query_parameters['statusFilter'] = self._serialize.query('status_filter', status_filter, 'str')
if result_filter is not None:
query_parameters['resultFilter'] = self._serialize.query('result_filter', result_filter, 'str')
if tag_filters is not None:
tag_filters = ",".join(tag_filters)
query_parameters['tagFilters'] = self._serialize.query('tag_filters', tag_filters, 'str')
if properties is not None:
properties = ",".join(properties)
query_parameters['properties'] = self._serialize.query('properties', properties, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if max_builds_per_definition is not None:
query_parameters['maxBuildsPerDefinition'] = self._serialize.query('max_builds_per_definition', max_builds_per_definition, 'int')
if deleted_filter is not None:
query_parameters['deletedFilter'] = self._serialize.query('deleted_filter', deleted_filter, 'str')
if query_order is not None:
query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str')
if branch_name is not None:
query_parameters['branchName'] = self._serialize.query('branch_name', branch_name, 'str')
if build_ids is not None:
build_ids = ",".join(map(str, build_ids))
query_parameters['buildIds'] = self._serialize.query('build_ids', build_ids, 'str')
if repository_id is not None:
query_parameters['repositoryId'] = self._serialize.query('repository_id', repository_id, 'str')
if repository_type is not None:
query_parameters['repositoryType'] = self._serialize.query('repository_type', repository_type, 'str')
response = self._send(http_method='GET',
location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf',
version='5.1',
route_values=route_values,
query_parameters=query_parameters)
response_value = self._deserialize('[Build]', self._unwrap_collection(response))
continuation_token = self._get_continuation_token(response)
return self.GetBuildsResponseValue(response_value, continuation_token)
class GetBuildsResponseValue(object):
def __init__(self, value, continuation_token):
"""
Response for the get_builds method
:param value:
:type value: :class:`<[Build]> <azure.devops.v5_1.build.models.[Build]>`
:param continuation_token: The continuation token to be used to get the next page of results.
:type continuation_token: str
"""
self.value = value
self.continuation_token = continuation_token
def queue_build(self, build, project, ignore_warnings=None, check_in_ticket=None, source_build_id=None):
"""QueueBuild.
Queues a build
:param :class:`<Build> <azure.devops.v5_1.build.models.Build>` build:
:param str project: Project ID or project name
:param bool ignore_warnings:
:param str check_in_ticket:
:param int source_build_id:
:rtype: :class:`<Build> <azure.devops.v5_1.build.models.Build>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if ignore_warnings is not None:
query_parameters['ignoreWarnings'] = self._serialize.query('ignore_warnings', ignore_warnings, 'bool')
if check_in_ticket is not None:
query_parameters['checkInTicket'] = self._serialize.query('check_in_ticket', check_in_ticket, 'str')
if source_build_id is not None:
query_parameters['sourceBuildId'] = self._serialize.query('source_build_id', source_build_id, 'int')
content = self._serialize.body(build, 'Build')
response = self._send(http_method='POST',
location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf',
version='5.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('Build', response)
def update_build(self, build, project, build_id, retry=None):
"""UpdateBuild.
Updates a build.
:param :class:`<Build> <azure.devops.v5_1.build.models.Build>` build: The build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param bool retry:
:rtype: :class:`<Build> <azure.devops.v5_1.build.models.Build>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
query_parameters = {}
if retry is not None:
query_parameters['retry'] = self._serialize.query('retry', retry, 'bool')
content = self._serialize.body(build, 'Build')
response = self._send(http_method='PATCH',
location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf',
version='5.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('Build', response)
def update_builds(self, builds, project):
"""UpdateBuilds.
Updates multiple builds.
:param [Build] builds: The builds to update.
:param str project: Project ID or project name
:rtype: [Build]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(builds, '[Build]')
response = self._send(http_method='PATCH',
location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('[Build]', self._unwrap_collection(response))
def get_build_changes(self, project, build_id, continuation_token=None, top=None, include_source_change=None):
"""GetBuildChanges.
Gets the changes associated with a build
:param str project: Project ID or project name
:param int build_id:
:param str continuation_token:
:param int top: The maximum number of changes to return
:param bool include_source_change:
:rtype: :class:`<GetBuildChangesResponseValue>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
query_parameters = {}
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if include_source_change is not None:
query_parameters['includeSourceChange'] = self._serialize.query('include_source_change', include_source_change, 'bool')
response = self._send(http_method='GET',
location_id='54572c7b-bbd3-45d4-80dc-28be08941620',
version='5.1',
route_values=route_values,
query_parameters=query_parameters)
response_value = self._deserialize('[Change]', self._unwrap_collection(response))
continuation_token = self._get_continuation_token(response)
return self.GetBuildChangesResponseValue(response_value, continuation_token)
class GetBuildChangesResponseValue(object):
def __init__(self, value, continuation_token):
"""
Response for the get_build_changes method
:param value:
:type value: :class:`<[Change]> <azure.devops.v5_1.build.models.[Change]>`
:param continuation_token: The continuation token to be used to get the next page of results.
:type continuation_token: str
"""
self.value = value
self.continuation_token = continuation_token
def get_build_controller(self, controller_id):
"""GetBuildController.
Gets a controller
:param int controller_id:
:rtype: :class:`<BuildController> <azure.devops.v5_1.build.models.BuildController>`
"""
route_values = {}
if controller_id is not None:
route_values['controllerId'] = self._serialize.url('controller_id', controller_id, 'int')
response = self._send(http_method='GET',
location_id='fcac1932-2ee1-437f-9b6f-7f696be858f6',
version='5.1',
route_values=route_values)
return self._deserialize('BuildController', response)
def get_build_controllers(self, name=None):
"""GetBuildControllers.
Gets controller, optionally filtered by name
:param str name:
:rtype: [BuildController]
"""
query_parameters = {}
if name is not None:
query_parameters['name'] = self._serialize.query('name', name, 'str')
response = self._send(http_method='GET',
location_id='fcac1932-2ee1-437f-9b6f-7f696be858f6',
version='5.1',
query_parameters=query_parameters)
return self._deserialize('[BuildController]', self._unwrap_collection(response))
def create_definition(self, definition, project, definition_to_clone_id=None, definition_to_clone_revision=None):
"""CreateDefinition.
Creates a new definition.
:param :class:`<BuildDefinition> <azure.devops.v5_1.build.models.BuildDefinition>` definition: The definition.
:param str project: Project ID or project name
:param int definition_to_clone_id:
:param int definition_to_clone_revision:
:rtype: :class:`<BuildDefinition> <azure.devops.v5_1.build.models.BuildDefinition>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if definition_to_clone_id is not None:
query_parameters['definitionToCloneId'] = self._serialize.query('definition_to_clone_id', definition_to_clone_id, 'int')
if definition_to_clone_revision is not None:
query_parameters['definitionToCloneRevision'] = self._serialize.query('definition_to_clone_revision', definition_to_clone_revision, 'int')
content = self._serialize.body(definition, 'BuildDefinition')
response = self._send(http_method='POST',
location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6',
version='5.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('BuildDefinition', response)
def delete_definition(self, project, definition_id):
"""DeleteDefinition.
Deletes a definition and all associated builds.
:param str project: Project ID or project name
:param int definition_id: The ID of the definition.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
self._send(http_method='DELETE',
location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6',
version='5.1',
route_values=route_values)
def get_definition(self, project, definition_id, revision=None, min_metrics_time=None, property_filters=None, include_latest_builds=None):
"""GetDefinition.
Gets a definition, optionally at a specific revision.
:param str project: Project ID or project name
:param int definition_id: The ID of the definition.
:param int revision: The revision number to retrieve. If this is not specified, the latest version will be returned.
:param datetime min_metrics_time: If specified, indicates the date from which metrics should be included.
:param [str] property_filters: A comma-delimited list of properties to include in the results.
:param bool include_latest_builds:
:rtype: :class:`<BuildDefinition> <azure.devops.v5_1.build.models.BuildDefinition>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
query_parameters = {}
if revision is not None:
query_parameters['revision'] = self._serialize.query('revision', revision, 'int')
if min_metrics_time is not None:
query_parameters['minMetricsTime'] = self._serialize.query('min_metrics_time', min_metrics_time, 'iso-8601')
if property_filters is not None:
property_filters = ",".join(property_filters)
query_parameters['propertyFilters'] = self._serialize.query('property_filters', property_filters, 'str')
if include_latest_builds is not None:
query_parameters['includeLatestBuilds'] = self._serialize.query('include_latest_builds', include_latest_builds, 'bool')
response = self._send(http_method='GET',
location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6',
version='5.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('BuildDefinition', response)
def get_definitions(self, project, name=None, repository_id=None, repository_type=None, query_order=None, top=None, continuation_token=None, min_metrics_time=None, definition_ids=None, path=None, built_after=None, not_built_after=None, include_all_properties=None, include_latest_builds=None, task_id_filter=None, process_type=None, yaml_filename=None):
"""GetDefinitions.
Gets a list of definitions.
:param str project: Project ID or project name
:param str name: If specified, filters to definitions whose names match this pattern.
:param str repository_id: A repository ID. If specified, filters to definitions that use this repository.
:param str repository_type: If specified, filters to definitions that have a repository of this type.
:param str query_order: Indicates the order in which definitions should be returned.
:param int top: The maximum number of definitions to return.
:param str continuation_token: A continuation token, returned by a previous call to this method, that can be used to return the next set of definitions.
:param datetime min_metrics_time: If specified, indicates the date from which metrics should be included.
:param [int] definition_ids: A comma-delimited list that specifies the IDs of definitions to retrieve.
:param str path: If specified, filters to definitions under this folder.
:param datetime built_after: If specified, filters to definitions that have builds after this date.
:param datetime not_built_after: If specified, filters to definitions that do not have builds after this date.
:param bool include_all_properties: Indicates whether the full definitions should be returned. By default, shallow representations of the definitions are returned.
:param bool include_latest_builds: Indicates whether to return the latest and latest completed builds for this definition.
:param str task_id_filter: If specified, filters to definitions that use the specified task.
:param int process_type: If specified, filters to definitions with the given process type.
:param str yaml_filename: If specified, filters to YAML definitions that match the given filename.
:rtype: :class:`<GetDefinitionsResponseValue>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if name is not None:
query_parameters['name'] = self._serialize.query('name', name, 'str')
if repository_id is not None:
query_parameters['repositoryId'] = self._serialize.query('repository_id', repository_id, 'str')
if repository_type is not None:
query_parameters['repositoryType'] = self._serialize.query('repository_type', repository_type, 'str')
if query_order is not None:
query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if min_metrics_time is not None:
query_parameters['minMetricsTime'] = self._serialize.query('min_metrics_time', min_metrics_time, 'iso-8601')
if definition_ids is not None:
definition_ids = ",".join(map(str, definition_ids))
query_parameters['definitionIds'] = self._serialize.query('definition_ids', definition_ids, 'str')
if path is not None:
query_parameters['path'] = self._serialize.query('path', path, 'str')
if built_after is not None:
query_parameters['builtAfter'] = self._serialize.query('built_after', built_after, 'iso-8601')
if not_built_after is not None:
query_parameters['notBuiltAfter'] = self._serialize.query('not_built_after', not_built_after, 'iso-8601')
if include_all_properties is not None:
query_parameters['includeAllProperties'] = self._serialize.query('include_all_properties', include_all_properties, 'bool')
if include_latest_builds is not None:
query_parameters['includeLatestBuilds'] = self._serialize.query('include_latest_builds', include_latest_builds, 'bool')
if task_id_filter is not None:
query_parameters['taskIdFilter'] = self._serialize.query('task_id_filter', task_id_filter, 'str')
if process_type is not None:
query_parameters['processType'] = self._serialize.query('process_type', process_type, 'int')
if yaml_filename is not None:
query_parameters['yamlFilename'] = self._serialize.query('yaml_filename', yaml_filename, 'str')
response = self._send(http_method='GET',
location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6',
version='5.1',
route_values=route_values,
query_parameters=query_parameters)
response_value = self._deserialize('[BuildDefinitionReference]', self._unwrap_collection(response))
continuation_token = self._get_continuation_token(response)
return self.GetDefinitionsResponseValue(response_value, continuation_token)
class GetDefinitionsResponseValue(object):
def __init__(self, value, continuation_token):
"""
Response for the get_definitions method
:param value:
:type value: :class:`<[BuildDefinitionReference]> <azure.devops.v5_1.build.models.[BuildDefinitionReference]>`
:param continuation_token: The continuation token to be used to get the next page of results.
:type continuation_token: str
"""
self.value = value
self.continuation_token = continuation_token
def restore_definition(self, project, definition_id, deleted):
"""RestoreDefinition.
Restores a deleted definition
:param str project: Project ID or project name
:param int definition_id: The identifier of the definition to restore.
:param bool deleted: When false, restores a deleted definition.
:rtype: :class:`<BuildDefinition> <azure.devops.v5_1.build.models.BuildDefinition>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
query_parameters = {}
if deleted is not None:
query_parameters['deleted'] = self._serialize.query('deleted', deleted, 'bool')
response = self._send(http_method='PATCH',
location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6',
version='5.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('BuildDefinition', response)
def update_definition(self, definition, project, definition_id, secrets_source_definition_id=None, secrets_source_definition_revision=None):
"""UpdateDefinition.
Updates an existing definition.
:param :class:`<BuildDefinition> <azure.devops.v5_1.build.models.BuildDefinition>` definition: The new version of the definition.
:param str project: Project ID or project name
:param int definition_id: The ID of the definition.
:param int secrets_source_definition_id:
:param int secrets_source_definition_revision:
:rtype: :class:`<BuildDefinition> <azure.devops.v5_1.build.models.BuildDefinition>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
query_parameters = {}
if secrets_source_definition_id is not None:
query_parameters['secretsSourceDefinitionId'] = self._serialize.query('secrets_source_definition_id', secrets_source_definition_id, 'int')
if secrets_source_definition_revision is not None:
query_parameters['secretsSourceDefinitionRevision'] = self._serialize.query('secrets_source_definition_revision', secrets_source_definition_revision, 'int')
content = self._serialize.body(definition, 'BuildDefinition')
response = self._send(http_method='PUT',
location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6',
version='5.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('BuildDefinition', response)
def get_build_log(self, project, build_id, log_id, start_line=None, end_line=None, **kwargs):
"""GetBuildLog.
Gets an individual log file for a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param int log_id: The ID of the log file.
:param long start_line: The start line.
:param long end_line: The end line.
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
if log_id is not None:
route_values['logId'] = self._serialize.url('log_id', log_id, 'int')
query_parameters = {}
if start_line is not None:
query_parameters['startLine'] = self._serialize.query('start_line', start_line, 'long')
if end_line is not None:
query_parameters['endLine'] = self._serialize.query('end_line', end_line, 'long')
response = self._send(http_method='GET',
location_id='35a80daf-7f30-45fc-86e8-6b813d9c90df',
version='5.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='text/plain')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_build_log_lines(self, project, build_id, log_id, start_line=None, end_line=None):
"""GetBuildLogLines.
Gets an individual log file for a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param int log_id: The ID of the log file.
:param long start_line: The start line.
:param long end_line: The end line.
:rtype: [str]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
if log_id is not None:
route_values['logId'] = self._serialize.url('log_id', log_id, 'int')
query_parameters = {}
if start_line is not None:
query_parameters['startLine'] = self._serialize.query('start_line', start_line, 'long')
if end_line is not None:
query_parameters['endLine'] = self._serialize.query('end_line', end_line, 'long')
response = self._send(http_method='GET',
location_id='35a80daf-7f30-45fc-86e8-6b813d9c90df',
version='5.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[str]', self._unwrap_collection(response))
def get_build_logs(self, project, build_id):
"""GetBuildLogs.
Gets the logs for a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:rtype: [BuildLog]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
response = self._send(http_method='GET',
location_id='35a80daf-7f30-45fc-86e8-6b813d9c90df',
version='5.1',
route_values=route_values)
return self._deserialize('[BuildLog]', self._unwrap_collection(response))
def get_build_logs_zip(self, project, build_id, **kwargs):
"""GetBuildLogsZip.
Gets the logs for a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
response = self._send(http_method='GET',
location_id='35a80daf-7f30-45fc-86e8-6b813d9c90df',
version='5.1',
route_values=route_values,
accept_media_type='application/zip')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_build_log_zip(self, project, build_id, log_id, start_line=None, end_line=None, **kwargs):
"""GetBuildLogZip.
Gets an individual log file for a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param int log_id: The ID of the log file.
:param long start_line: The start line.
:param long end_line: The end line.
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
if log_id is not None:
route_values['logId'] = self._serialize.url('log_id', log_id, 'int')
query_parameters = {}
if start_line is not None:
query_parameters['startLine'] = self._serialize.query('start_line', start_line, 'long')
if end_line is not None:
query_parameters['endLine'] = self._serialize.query('end_line', end_line, 'long')
response = self._send(http_method='GET',
location_id='35a80daf-7f30-45fc-86e8-6b813d9c90df',
version='5.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='application/zip')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_build_option_definitions(self, project=None):
"""GetBuildOptionDefinitions.
Gets all build definition options supported by the system.
:param str project: Project ID or project name
:rtype: [BuildOptionDefinition]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='591cb5a4-2d46-4f3a-a697-5cd42b6bd332',
version='5.1',
route_values=route_values)
return self._deserialize('[BuildOptionDefinition]', self._unwrap_collection(response))
def get_definition_revisions(self, project, definition_id):
"""GetDefinitionRevisions.
Gets all revisions of a definition.
:param str project: Project ID or project name
:param int definition_id: The ID of the definition.
:rtype: [BuildDefinitionRevision]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
response = self._send(http_method='GET',
location_id='7c116775-52e5-453e-8c5d-914d9762d8c4',
version='5.1',
route_values=route_values)
return self._deserialize('[BuildDefinitionRevision]', self._unwrap_collection(response))
def get_build_settings(self, project=None):
"""GetBuildSettings.
Gets the build settings.
:param str project: Project ID or project name
:rtype: :class:`<BuildSettings> <azure.devops.v5_1.build.models.BuildSettings>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='aa8c1c9c-ef8b-474a-b8c4-785c7b191d0d',
version='5.1',
route_values=route_values)
return self._deserialize('BuildSettings', response)
def update_build_settings(self, settings, project=None):
"""UpdateBuildSettings.
Updates the build settings.
:param :class:`<BuildSettings> <azure.devops.v5_1.build.models.BuildSettings>` settings: The new settings.
:param str project: Project ID or project name
:rtype: :class:`<BuildSettings> <azure.devops.v5_1.build.models.BuildSettings>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(settings, 'BuildSettings')
response = self._send(http_method='PATCH',
location_id='aa8c1c9c-ef8b-474a-b8c4-785c7b191d0d',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('BuildSettings', response)
def add_build_tag(self, project, build_id, tag):
"""AddBuildTag.
Adds a tag to a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param str tag: The tag to add.
:rtype: [str]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
if tag is not None:
route_values['tag'] = self._serialize.url('tag', tag, 'str')
response = self._send(http_method='PUT',
location_id='6e6114b2-8161-44c8-8f6c-c5505782427f',
version='5.1',
route_values=route_values)
return self._deserialize('[str]', self._unwrap_collection(response))
def add_build_tags(self, tags, project, build_id):
"""AddBuildTags.
Adds tags to a build.
:param [str] tags: The tags to add.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:rtype: [str]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
content = self._serialize.body(tags, '[str]')
response = self._send(http_method='POST',
location_id='6e6114b2-8161-44c8-8f6c-c5505782427f',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('[str]', self._unwrap_collection(response))
def delete_build_tag(self, project, build_id, tag):
"""DeleteBuildTag.
Removes a tag from a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param str tag: The tag to remove.
:rtype: [str]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
if tag is not None:
route_values['tag'] = self._serialize.url('tag', tag, 'str')
response = self._send(http_method='DELETE',
location_id='6e6114b2-8161-44c8-8f6c-c5505782427f',
version='5.1',
route_values=route_values)
return self._deserialize('[str]', self._unwrap_collection(response))
def get_build_tags(self, project, build_id):
"""GetBuildTags.
Gets the tags for a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:rtype: [str]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
response = self._send(http_method='GET',
location_id='6e6114b2-8161-44c8-8f6c-c5505782427f',
version='5.1',
route_values=route_values)
return self._deserialize('[str]', self._unwrap_collection(response))
def get_tags(self, project):
"""GetTags.
Gets a list of all build and definition tags in the project.
:param str project: Project ID or project name
:rtype: [str]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='d84ac5c6-edc7-43d5-adc9-1b34be5dea09',
version='5.1',
route_values=route_values)
return self._deserialize('[str]', self._unwrap_collection(response))
def delete_template(self, project, template_id):
"""DeleteTemplate.
Deletes a build definition template.
:param str project: Project ID or project name
:param str template_id: The ID of the template.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if template_id is not None:
route_values['templateId'] = self._serialize.url('template_id', template_id, 'str')
self._send(http_method='DELETE',
location_id='e884571e-7f92-4d6a-9274-3f5649900835',
version='5.1',
route_values=route_values)
def get_template(self, project, template_id):
"""GetTemplate.
Gets a specific build definition template.
:param str project: Project ID or project name
:param str template_id: The ID of the requested template.
:rtype: :class:`<BuildDefinitionTemplate> <azure.devops.v5_1.build.models.BuildDefinitionTemplate>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if template_id is not None:
route_values['templateId'] = self._serialize.url('template_id', template_id, 'str')
response = self._send(http_method='GET',
location_id='e884571e-7f92-4d6a-9274-3f5649900835',
version='5.1',
route_values=route_values)
return self._deserialize('BuildDefinitionTemplate', response)
def get_templates(self, project):
"""GetTemplates.
Gets all definition templates.
:param str project: Project ID or project name
:rtype: [BuildDefinitionTemplate]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='e884571e-7f92-4d6a-9274-3f5649900835',
version='5.1',
route_values=route_values)
return self._deserialize('[BuildDefinitionTemplate]', self._unwrap_collection(response))
def save_template(self, template, project, template_id):
"""SaveTemplate.
Updates an existing build definition template.
:param :class:`<BuildDefinitionTemplate> <azure.devops.v5_1.build.models.BuildDefinitionTemplate>` template: The new version of the template.
:param str project: Project ID or project name
:param str template_id: The ID of the template.
:rtype: :class:`<BuildDefinitionTemplate> <azure.devops.v5_1.build.models.BuildDefinitionTemplate>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if template_id is not None:
route_values['templateId'] = self._serialize.url('template_id', template_id, 'str')
content = self._serialize.body(template, 'BuildDefinitionTemplate')
response = self._send(http_method='PUT',
location_id='e884571e-7f92-4d6a-9274-3f5649900835',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('BuildDefinitionTemplate', response)
def get_build_timeline(self, project, build_id, timeline_id=None, change_id=None, plan_id=None):
"""GetBuildTimeline.
Gets details for a build
:param str project: Project ID or project name
:param int build_id:
:param str timeline_id:
:param int change_id:
:param str plan_id:
:rtype: :class:`<Timeline> <azure.devops.v5_1.build.models.Timeline>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
if timeline_id is not None:
route_values['timelineId'] = self._serialize.url('timeline_id', timeline_id, 'str')
query_parameters = {}
if change_id is not None:
query_parameters['changeId'] = self._serialize.query('change_id', change_id, 'int')
if plan_id is not None:
query_parameters['planId'] = self._serialize.query('plan_id', plan_id, 'str')
response = self._send(http_method='GET',
location_id='8baac422-4c6e-4de5-8532-db96d92acffa',
version='5.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('Timeline', response)
def get_build_work_items_refs(self, project, build_id, top=None):
"""GetBuildWorkItemsRefs.
Gets the work items associated with a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param int top: The maximum number of work items to return.
:rtype: [ResourceRef]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
query_parameters = {}
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
response = self._send(http_method='GET',
location_id='5a21f5d2-5642-47e4-a0bd-1356e6731bee',
version='5.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[ResourceRef]', self._unwrap_collection(response))
def get_build_work_items_refs_from_commits(self, commit_ids, project, build_id, top=None):
"""GetBuildWorkItemsRefsFromCommits.
Gets the work items associated with a build, filtered to specific commits.
:param [str] commit_ids: A comma-delimited list of commit IDs.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param int top: The maximum number of work items to return, or the number of commits to consider if no commit IDs are specified.
:rtype: [ResourceRef]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
query_parameters = {}
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
content = self._serialize.body(commit_ids, '[str]')
response = self._send(http_method='POST',
location_id='5a21f5d2-5642-47e4-a0bd-1356e6731bee',
version='5.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('[ResourceRef]', self._unwrap_collection(response))
| 2.125 | 2 |
tests/test_train.py | hugobb/sgda | 1 | 9935 | import unittest
from gamesopt.train import train, TrainConfig
class TestOptimizer(unittest.TestCase):
def test_sgda(self):
config = TrainConfig(num_iter=2)
train(config) | 2.3125 | 2 |
loadCSVdata.py | christostsekouronas/academyposttestanalysis | 0 | 9936 | <filename>loadCSVdata.py
import pandas as pd
def loadTest(filepath):
df = pd.read_csv(filepath)
return df | 2.421875 | 2 |
sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2015_05_01/models/_application_insights_management_client_enums.py | iscai-msft/azure-sdk-for-python | 8 | 9937 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class ApplicationType(str, Enum):
web = "web"
other = "other"
class FlowType(str, Enum):
bluefield = "Bluefield"
class RequestSource(str, Enum):
rest = "rest"
class PurgeState(str, Enum):
pending = "pending"
completed = "completed"
class FavoriteType(str, Enum):
shared = "shared"
user = "user"
class WebTestKind(str, Enum):
ping = "ping"
multistep = "multistep"
class ItemScope(str, Enum):
shared = "shared"
user = "user"
class ItemType(str, Enum):
query = "query"
function = "function"
folder = "folder"
recent = "recent"
class SharedTypeKind(str, Enum):
user = "user"
shared = "shared"
class FavoriteSourceType(str, Enum):
retention = "retention"
notebook = "notebook"
sessions = "sessions"
events = "events"
userflows = "userflows"
funnel = "funnel"
impact = "impact"
segmentation = "segmentation"
class ItemScopePath(str, Enum):
analytics_items = "analyticsItems"
myanalytics_items = "myanalyticsItems"
class ItemTypeParameter(str, Enum):
none = "none"
query = "query"
function = "function"
folder = "folder"
recent = "recent"
class CategoryType(str, Enum):
workbook = "workbook"
tsg = "TSG"
performance = "performance"
retention = "retention"
| 2.015625 | 2 |
setup.py | dhruvdcoder/allennlp-wandb | 0 | 9938 | <reponame>dhruvdcoder/allennlp-wandb
from setuptools import setup, find_packages
install_requires = [
"allennlp>=0.9.0",
"wandb==0.8.15",
]
setup(
name='allennlp_wandb',
version='0.0.1',
description='Utilities to use allennlp with wandb',
packages=find_packages(
exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
package_data={'allennlp_wandb': ['py.typed']},
install_requires=install_requires,
zip_safe=False)
| 1.546875 | 2 |
setup.py | mark-mishyn/django-axes | 0 | 9939 | <gh_stars>0
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name="django-axes",
description="Keep track of failed login attempts in Django-powered sites.",
long_description="\n".join(
[
open("README.rst", encoding="utf-8").read(),
open("CHANGES.rst", encoding="utf-8").read(),
]
),
keywords="authentication django pci security",
author=", ".join(
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
]
),
author_email="<EMAIL>",
maintainer="Jazzband",
maintainer_email="<EMAIL>",
url="https://github.com/jazzband/django-axes",
project_urls={
"Documentation": "https://django-axes.readthedocs.io/",
"Source": "https://github.com/jazzband/django-axes",
"Tracker": "https://github.com/jazzband/django-axes/issues",
},
license="MIT",
package_dir={"axes": "axes"},
use_scm_version=True,
setup_requires=["setuptools_scm"],
python_requires="~=3.6",
install_requires=["django>=1.11", "django-appconf>=1.0.3", "django-ipware>=2.0.2"],
include_package_data=True,
packages=find_packages(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Environment :: Plugins",
"Framework :: Django",
"Framework :: Django :: 1.11",
"Framework :: Django :: 2.2",
"Framework :: Django :: 3.0",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Internet :: Log Analysis",
"Topic :: Security",
"Topic :: System :: Logging",
],
zip_safe=False,
)
| 1.296875 | 1 |
achievements/admin.py | peterkrauz/rpg-achievements-django | 1 | 9940 | <reponame>peterkrauz/rpg-achievements-django<filename>achievements/admin.py
from django.contrib import admin
from achievements import models
admin.site.register(models.Achievement)
| 1.359375 | 1 |
src/modules/loss.py | ab3llini/BlindLess | 1 | 9941 | <gh_stars>1-10
from torch.nn import CrossEntropyLoss
class GPT2Loss(CrossEntropyLoss):
def __init__(self, pad_token_id):
super(GPT2Loss, self).__init__(ignore_index=pad_token_id)
def forward(self, output, labels):
"""
Loss function for gpt2
:param output:
:param labels:
:return:
"""
# Flatten the tensors (shift-align)
# Remove last token from output
output = output[..., :-1, :].contiguous().view(-1, output.size(-1))
# Remove the first token from labels e do not care for question
labels = (labels[..., 1:].contiguous()).view(-1)
# Compute the actual loss
return super(GPT2Loss, self).forward(output, labels)
class VisualGPT2Loss(GPT2Loss):
def __init__(self, pad_token_id, extract=None):
super(VisualGPT2Loss, self).__init__(pad_token_id=pad_token_id)
if extract is not None:
assert type(extract) == int, 'Extract value MUST be integer'
self.extract = extract
def forward(self, output, labels):
if self.extract is not None:
output = output[self.extract]
# Compute the actual loss
return super(VisualGPT2Loss, self).forward(output, labels[0])
class BERTLoss(CrossEntropyLoss):
def __init__(self, pad_token_id):
super(BERTLoss, self).__init__(ignore_index=pad_token_id)
def forward(self, output, labels):
"""
Loss function for gpt2
:param output:
:param labels:
:return:
"""
# Flatten the tensors (shift-align)
# Remove last token from output
output = output[..., :-1, :].contiguous().view(-1, output.size(-1))
# Remove the first token from labels e do not care for question
labels = (labels[..., 1:].contiguous()).view(-1)
# Compute the actual loss
return super(BERTLoss, self).forward(output, labels)
| 2.4375 | 2 |
analyze_tls.py | khushhallchandra/CN-project | 0 | 9942 | <gh_stars>0
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def main(filename):
data = pd.read_csv(filename, header=None)
means = data.mean(axis = 0)
stds = data.std(axis = 0)
return means[0], means[1], stds[0], stds[1]
if __name__ == '__main__':
files_http1 = ['./results/benchmark_size/http1_txt1.csv', './results/benchmark_size/http1_txt2.csv', './results/benchmark_size/http1_txt3.csv', './results/benchmark_size/http1_txt4.csv', './results/benchmark_size/http1_txt5.csv']
files_http1_tls = ['./results/benchmark_size/http1_tls_txt1.csv', './results/benchmark_size/http1_tls_txt2.csv', './results/benchmark_size/http1_tls_txt3.csv', './results/benchmark_size/http1_tls_txt4.csv', './results/benchmark_size/http1_tls_txt5.csv']
files_http2 = ['./results/benchmark_size/http2_txt1.csv', './results/benchmark_size/http2_txt2.csv', './results/benchmark_size/http2_txt3.csv', './results/benchmark_size/http2_txt4.csv', './results/benchmark_size/http2_txt5.csv']
files_http2_tls = ['./results/benchmark_size/http2_tls_txt1.csv', './results/benchmark_size/http2_tls_txt2.csv', './results/benchmark_size/http2_tls_txt3.csv', './results/benchmark_size/http2_tls_txt4.csv', './results/benchmark_size/http2_tls_txt5.csv']
time_tot_http2, time_contentTransfer_http2 = [], []
std_tot_http2, std_contentTransfer_http2 = [], []
time_tot_http1, time_contentTransfer_http1 = [], []
std_tot_http1, std_contentTransfer_http1 = [], []
time_tot_http2_tls, time_contentTransfer_http2_tls = [], []
std_tot_http2_tls, std_contentTransfer_http2_tls = [], []
time_tot_http1_tls, time_contentTransfer_http1_tls = [], []
std_tot_http1_tls, std_contentTransfer_http1_tls = [], []
for f in files_http2:
t1, t2, std1, std2 = main(f)
time_contentTransfer_http2.append(t1)
time_tot_http2.append(t2)
std_contentTransfer_http2.append(2*std1)
std_tot_http2.append(2*std2)
for f in files_http1:
t1, t2, std1, std2 = main(f)
time_contentTransfer_http1.append(t1)
time_tot_http1.append(t2)
std_contentTransfer_http1.append(2*std1)
std_tot_http1.append(2*std2)
for f in files_http2_tls:
t1, t2, std1, std2 = main(f)
time_contentTransfer_http2_tls.append(t1)
time_tot_http2_tls.append(t2)
std_contentTransfer_http2_tls.append(2*std1)
std_tot_http2_tls.append(2*std2)
for f in files_http1_tls:
t1, t2, std1, std2 = main(f)
time_contentTransfer_http1_tls.append(t1)
time_tot_http1_tls.append(t2)
std_contentTransfer_http1_tls.append(2*std1)
std_tot_http1_tls.append(2*std2)
x = [100, 1000, 10000, 100000, 1000000]
time_tot_http2, time_contentTransfer_http2 = np.array(time_tot_http2), np.array(time_contentTransfer_http2)
std_tot_http2, std_contentTransfer_http2 = np.array(std_tot_http2), np.array(std_contentTransfer_http2)
time_tot_http1, time_contentTransfer_http1 = np.array(time_tot_http1), np.array(time_contentTransfer_http1)
std_tot_http1, std_contentTransfer_http1 = np.array(std_tot_http1), np.array(std_contentTransfer_http1)
time_tot_http2_tls, time_contentTransfer_http2_tls = np.array(time_tot_http2_tls), np.array(time_contentTransfer_http2_tls)
std_tot_http2_tls, std_contentTransfer_http2_tls = np.array(std_tot_http2_tls), np.array(std_contentTransfer_http2_tls)
time_tot_http1_tls, time_contentTransfer_http1_tls = np.array(time_tot_http1_tls), np.array(time_contentTransfer_http1_tls)
std_tot_http1_tls, std_contentTransfer_http1_tls = np.array(std_tot_http1_tls), np.array(std_contentTransfer_http1_tls)
fig, ax = plt.subplots()
ax.grid()
ax.plot(x, time_contentTransfer_http1, 'o-', color='r', label="HTTP1")
ax.plot(x, time_contentTransfer_http1_tls, 'o-', color='g', label="HTTP1_with_tls")
ax.plot(x, time_contentTransfer_http2, 'o-', color='b', label="SPDY")
ax.plot(x, time_contentTransfer_http2_tls, 'o-', color='k', label="SPDY_with_tls")
ax.fill_between(x, time_contentTransfer_http1 - std_contentTransfer_http1, time_contentTransfer_http1 + std_contentTransfer_http1, color='gray', alpha=0.3)
ax.fill_between(x, time_contentTransfer_http2 - std_contentTransfer_http2, time_contentTransfer_http2 + std_contentTransfer_http2, color='gray', alpha=0.3)
ax.fill_between(x, time_contentTransfer_http1_tls - std_contentTransfer_http1_tls, time_contentTransfer_http1_tls + std_contentTransfer_http1_tls, color='gray', alpha=0.3)
ax.fill_between(x, time_contentTransfer_http2_tls - std_contentTransfer_http2_tls, time_contentTransfer_http2_tls + std_contentTransfer_http2_tls, color='gray', alpha=0.3)
# ax.errorbar(x, time_contentTransfer_http2, yerr=std_contentTransfer_http2, fmt='-', color='r', label="HTTP2")
# ax.errorbar(x, time_contentTransfer_quic, yerr=std_contentTransfer_quic, fmt='-', color='b', label="QUIC")
ax.set_xlabel('Size of data (Length)')
ax.set_ylabel('Time (in ms)')
ax.legend()
ax.set_xscale('log')
ax.set_title('Comparison of Time Taken for Data Transfer with TLS ON/OFF')
fig.savefig('results/plots/time_contentTransfer_tls.png', dpi=fig.dpi)
fig, ax = plt.subplots()
ax.grid()
ax.plot(x, time_tot_http1, 'o-', color='r', label="HTTP1")
ax.plot(x, time_tot_http1_tls, 'o-', color='g', label="HTTP1_with_tls")
ax.plot(x, time_tot_http2, 'o-', color='b', label="SPDY")
ax.plot(x, time_tot_http2_tls, 'o-', color='k', label="SPDY_with_tls")
ax.fill_between(x, time_tot_http1 - std_tot_http1, time_tot_http1 + std_tot_http1, color='gray', alpha=0.3)
ax.fill_between(x, time_tot_http2 - std_tot_http2, time_tot_http2 + std_tot_http2, color='gray', alpha=0.3)
ax.fill_between(x, time_tot_http1_tls - std_tot_http1_tls, time_tot_http1_tls + std_tot_http1_tls, color='gray', alpha=0.3)
ax.fill_between(x, time_tot_http2_tls - std_tot_http2_tls, time_tot_http2_tls + std_tot_http2_tls, color='gray', alpha=0.3)
# ax.errorbar(x, time_tot_http2, yerr=std_tot_http2, fmt='-', color='r', label="HTTP2")
# ax.errorbar(x, time_tot_quic, yerr=std_tot_quic, fmt='-', color='b', label="QUIC")
ax.set_xlabel('Size of data (Length)')
ax.set_ylabel('Time (in ms)')
ax.legend()
ax.set_xscale('log')
ax.set_title('Comparison of Total Time with TLS ON/OFF')
fig.savefig('results/plots/total_time_tls.png', dpi=fig.dpi) | 2.453125 | 2 |
validation/utils/m1.py | PedrV/stfX | 0 | 9943 | <filename>validation/utils/m1.py
import unittest
import os
from matplotlib import pyplot as plt
from shapely import geometry, affinity
X_COORDINATE = 0
Y_COORDINATE = 1
def extract_x_y(polygon: list) -> (list, list):
"""Extract the x and y coordinates as two separate lists"""
x_list = []
y_list = []
for vertex in polygon:
x_list.append(vertex[X_COORDINATE])
y_list.append(vertex[Y_COORDINATE])
return (x_list, y_list)
def save_fig(dir: str):
"""Save the current plt figure in the given directory under the name: m1.png"""
plt.savefig(dir + '/m1.png')
plt.clf()
def plot_polygons(hull: list, min_hull: list, perceived_poly: list, real_poly: list, dir: str = None):
"""Plot the given two polygons, in a single figure, with different colors"""
h1_x, h1_y = extract_x_y(hull)
h2_x, h2_y = extract_x_y(min_hull)
p1_x, p1_y = extract_x_y(perceived_poly)
p2_x, p2_y = extract_x_y(real_poly)
# Figure settings
fig = plt.figure()
# fig.suptitle('Convex hull area (red) VS real representation area (blue)')
plt.xlabel('x')
plt.ylabel('y')
# Plotting hulls
plt.fill(h1_x, h1_y, color="#FF000020")
plt.fill(h2_x, h2_y, color="#0000FF20")
# Plotting polygons lines
plt.plot(p1_x, p1_y, color="#FF000060") # Red perceived poly
plt.plot(p2_x, p2_y, color="#0000FF60") # Blue real poly
# Plotting polygons points
for p in perceived_poly:
plt.plot(p[X_COORDINATE], p[Y_COORDINATE], 'o', color="#FF0000A0")
for p in real_poly:
plt.plot(p[X_COORDINATE], p[Y_COORDINATE], 'x', color="#0000FFA0")
# plt.show()
if dir is not None:
save_fig(dir)
def surveyor_formula(polygon: list) -> float:
"""Find the area of the given polygon using the surveyor formula"""
# Check if first and last points of polygon are equal
parsed_poly = polygon[0:-1]\
if polygon[0] == polygon[len(polygon)-1]\
else polygon
area = 0
for i in range(-1, len(parsed_poly)-1):
area += parsed_poly[i][X_COORDINATE] * parsed_poly[i+1][Y_COORDINATE] -\
parsed_poly[i][Y_COORDINATE] * parsed_poly[i+1][X_COORDINATE]
return abs(area / 2)
def polygon_to_vertices_list(polygon: geometry.Polygon) -> list:
"""Extract the polygon vertices as a list"""
return list(polygon.exterior.coords)
def apply_transformations(initial_representation: list, events: list) -> float:
"""Apply the transformations in the events list to the initial representation"""
scale = 1
rot_angle = 0
trans_vector = [0, 0]
for item in events:
for event in item["events"]:
if event["type"] == "TRANSLATION":
trans_vector[X_COORDINATE] += event["trigger"]["transformation"][X_COORDINATE]
trans_vector[Y_COORDINATE] += event["trigger"]["transformation"][Y_COORDINATE]
elif event["type"] == "ROTATION":
rot_angle += event["trigger"]["transformation"]
elif event["type"] == "UNIFORM_SCALE":
scale *= event["trigger"]["transformation"]
# Apply multiplication
polygon = geometry.Polygon(initial_representation)
s_polygon = affinity.scale(polygon,
xfact=scale,
yfact=scale,
origin=(0, 0))
r_s_polygon = affinity.rotate(s_polygon,
rot_angle,
origin=(0, 0))
t_r_s_polygon = affinity.translate(r_s_polygon,
xoff=trans_vector[0],
yoff=trans_vector[1])
return polygon_to_vertices_list(t_r_s_polygon)
def apply_m1(real_representation: list, perceived_representation: list, dir: str = None) -> float:
"""Apply the metric M1 and obtain its result, between 0 and 1"""
joint_point_set = real_representation + perceived_representation
# Getting necessary hulls
real_convex_hull = geometry.MultiPoint(real_representation).convex_hull
perceived_hull = geometry.MultiPoint(perceived_representation).convex_hull
convex_hull = geometry.MultiPoint(joint_point_set).convex_hull
# Getting vertices of hulls
real_vertices = polygon_to_vertices_list(real_convex_hull)
perceived_vertices = polygon_to_vertices_list(perceived_hull)
joint_vertices = polygon_to_vertices_list(convex_hull)
# Getting the min area
real_area = surveyor_formula(real_vertices)
perceived_area = surveyor_formula(perceived_vertices)
if real_area <= perceived_area:
min_area = real_area
min_vertices = real_vertices
else:
min_area = perceived_area
min_vertices = perceived_vertices
plot_polygons(hull=joint_vertices,
min_hull=min_vertices,
perceived_poly=perceived_representation,
real_poly=real_representation,
dir=dir)
return min_area / surveyor_formula(joint_vertices)
class TestM1(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestM1, self).__init__(*args, **kwargs)
self.representation = [
[1, 1],
[1, -1],
[-1, -1],
[-1, 1],
[1, 1]
]
self.transformations = [{
"events": [
{"type": "TRANSLATION", "trigger": {"transformation": [5, 5]}},
{"type": "ROTATION", "trigger": {"transformation": 180}},
{"type": "UNIFORM_SCALE", "trigger": {"transformation": 1.25}}
]
}, {
"events": [
{"type": "TRANSLATION", "trigger": {"transformation": [5, 0]}},
{"type": "ROTATION", "trigger": {"transformation": -90}},
{"type": "UNIFORM_SCALE", "trigger": {"transformation": 1.6}}
]
}]
self.min_scale = [{
"events": [
{"type": "UNIFORM_SCALE", "trigger": {"transformation": 0.5}}
]
}]
def test_area(self):
square = [
[1, 1],
[1, -1],
[-1, -1],
[-1, 1]
]
self.assertEqual(surveyor_formula(square), 4)
self.assertEqual(surveyor_formula(self.representation), 4)
def test_transformations(self):
self.assertEqual(apply_transformations(self.representation, self.transformations), [
(8.0, 7.0),
(12.0, 7.0),
(12.0, 3.0),
(8.0, 3.0),
(8.0, 7.0),
])
def test_M1(self):
self.assertEqual(apply_m1(self.representation, self.representation), 1)
self.assertTrue(apply_m1(self.representation,
apply_transformations(self.representation, self.transformations))
< 0.1)
self.assertEqual(apply_m1([
(8.0, 7.0),
(12.0, 7.0),
(12.0, 3.0),
(8.0, 3.0),
(8.0, 7.0)],
apply_transformations(self.representation, self.transformations)),
1)
def test_mean_perceived(self):
self.assertEqual(apply_m1(self.representation,
apply_transformations(self.representation, self.min_scale)),
0.25)
if __name__ == '__main__':
unittest.main()
| 2.78125 | 3 |
movefiles.py | linhailan/JPG-PNG-to-MNIST-NN-Format | 0 | 9944 | <reponame>linhailan/JPG-PNG-to-MNIST-NN-Format
import os
from PIL import Image
from array import *
from random import shuffle
import shutil
def move_file(src_path, dst_path, file):
print("from : ",src_path)
print("to : ",dst_path)
try:
# cmd = 'chmod -R +x ' + src_path
# os.popen(cmd)
f_src = os.path.join(src_path, file)
if not os.path.exists(dst_path):
os.mkdir(dst_path)
f_dst = os.path.join(dst_path, file)
shutil.move(f_src, f_dst)
except Exception as e:
print("move file ERROR: ",e)
# Load from and save to
def loadfile(Names):
FileList = []
for dirname in os.listdir(Names[0][0]):
path = os.path.join(Names[0][0], dirname)
print(path)
i = 0
for filename in os.listdir(path):
if i >= 50:
break
if filename.endswith(".jpg"):
print(i,":",filename)
src_path = os.path.join(Names[0][0],dirname)
dst_path = os.path.join(Names[1][0],dirname)
move_file(src_path,dst_path,filename)
i += 1
Names = [['./training-images','train'], ['./test-images','test']]
for name in Names:
FileList = []
for dirname in os.listdir(name[0]):
path = os.path.join(name[0],dirname)
print(path,":",len(os.listdir(path)))
| 2.6875 | 3 |
spark_work.py | nszceta/spark-python-celery-demo | 8 | 9945 | <reponame>nszceta/spark-python-celery-demo
import sys
from pyspark import SparkContext
import json
print('spark got python path -> ' + str(sys.executable))
logfile = sys.argv[1]
sc = SparkContext()
logdata = sc.textFile(logfile).cache()
a_count = logdata.filter(lambda s: 'a' in s).count()
b_count = logdata.filter(lambda s: 'b' in s).count()
print(json.dumps({'a': a_count, 'b': b_count}))
| 2.546875 | 3 |
nodes/List/GetTaskRenderListIndex.py | atticus-lv/RenderNode | 17 | 9946 | import bpy
from bpy.props import *
from ...nodes.BASE.node_base import RenderNodeBase
class RenderNodeGetListIndex(RenderNodeBase):
"""A simple input node"""
bl_idname = 'RenderNodeGetListIndex'
bl_label = 'Get List Index'
def init(self, context):
self.create_output('RenderNodeSocketInt', "index", 'Index')
def process(self,context,id,path):
node = self.id_data.nodes.get(bpy.context.window_manager.rsn_active_list)
if not node or node.bl_idname != 'RenderNodeTaskRenderListNode': return
self.outputs[0].set_value(node.active_index)
def register():
bpy.utils.register_class(RenderNodeGetListIndex)
def unregister():
bpy.utils.unregister_class(RenderNodeGetListIndex)
| 2.21875 | 2 |
src/recognizeDigit.py | RsTaK/Sudoku | 2 | 9947 | from keras.models import load_model
import cv2
import pickle
import keras.backend as K
import numpy as np
from src.model_path import MODEL_PATH
'''def predict(self, cell):
model = load_model('./model/Model.h5')
f = K.function([model.layers[0].input, K.learning_phase()],[model.layers[-1].output])
rescaled_cell = self.rescale(cell)
result = []
for _ in range(10):
result.append(f([rescaled_cell, 1]))
result = np.array(result)
prediction = result.mean(axis=0)
uncertainty = result.var(axis=0)
if uncertainty.argmax() > 3:
new_prediction = 0
print(prediction.argmax(),uncertainty.argmax(),new_prediction)
else:
print(prediction.argmax(),uncertainty.argmax())'''
class recognizeDigit:
def __init__(self, cell):
self._prediction = self.predict(cell)
def predict(self, cell):
model = load_model(MODEL_PATH)
rescaled_cell = self.rescale(cell)
pred = model.predict(rescaled_cell)
return pred.argmax()
def rescale(self, cell):
resized_cell = cv2.resize(cell, (28, 28))
return resized_cell.reshape(1, resized_cell.shape[0], resized_cell.shape[1], 1)
@property
def prediction(self):
return self._prediction | 2.828125 | 3 |
plots.py | olihawkins/penguin-models | 1 | 9948 | # -*- coding: utf-8 -*-
"""A module for plotting penguins data for modelling with scikit-learn."""
# Imports ---------------------------------------------------------------------
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Constants -------------------------------------------------------------------
SPECIES_COLORS = {
'Adelie': '#4daf4a',
'Gentoo': '#ffb000',
'Chinstrap': '#0084f7'
}
X_AXIS = [30, 60]
Y_AXIS = [12, 22]
# Set style -------------------------------------------------------------------
# Load the style from a file
plt.style.use('./style/eda.mplstyle')
# Alternatively, load the style from the library in ~/.matplotlib/stylelib
# plt.style.use(['eda'])
# Functions -------------------------------------------------------------------
def get_contour_data(model, pipeline, n_points=1000):
"""Create the data used to show the boundary of the decision function."""
x0s = np.linspace(X_AXIS[0], X_AXIS[1], n_points)
x1s = np.linspace(Y_AXIS[0], Y_AXIS[1], n_points)
x0, x1 = np.meshgrid(x0s, x1s)
X = np.c_[x0.ravel(), x1.ravel()]
df_X = pd.DataFrame(X, columns=['bill_length_mm', 'bill_depth_mm'])
X = pipeline.transform(df_X)
y_pred = model.predict(X).reshape(x0.shape)
y_decision = model.decision_function(X).reshape(x0.shape)
return x0, x1, y_pred, y_decision
def get_target_colors(target):
"""Create a dictionary of colors to use in binary classification plots."""
return {
target : '#984ea3',
'Other': '#ff7f00'
}
# Plots -----------------------------------------------------------------------
def plot_example():
plt.style.reload_library()
plt.style.use(['eda'])
fig, ax = plt.subplots()
ax.set_title('Some random words of the title')
ax.scatter(np.random.normal(0,1,10), np.random.normal(0,1,10))
fig.savefig('plots/test.svg', format='svg')
fig.savefig('plots/test.png', format='png')
plt.close()
def plot_target_by_features(df):
"""Plot the different target species."""
fig, ax = plt.subplots()
ax.set_title(
label='Palmer penguins by species and bill characteristics',
loc='center')
ax.get_xaxis().set_major_formatter(
mpl.ticker.FormatStrFormatter('%.0f'))
ax.set_xlim(X_AXIS[0], X_AXIS[1])
ax.set_xlabel('Bill length (mm)')
ax.get_yaxis().set_major_formatter(
mpl.ticker.FormatStrFormatter('%.0f'))
ax.set_ylim(Y_AXIS[0], Y_AXIS[1])
ax.set_ylabel('Bill depth (mm)')
grouped = df.groupby('species')
for key, group in grouped:
ax.scatter(
group['bill_length_mm'],
group['bill_depth_mm'],
c=SPECIES_COLORS[key],
s=40,
label=key,
alpha=0.55)
ax.legend(loc='lower left', handletextpad=0.2)
fig.savefig('plots/target-by-features.png', format='png')
plt.close()
def plot_model(df, model, pipeline, f_score, target, title, filename):
"""Plot the results of a binary classification model."""
fig, ax = plt.subplots()
ax.set_title(title, loc='center')
ax.get_xaxis().set_major_formatter(
mpl.ticker.FormatStrFormatter('%.0f'))
ax.set_xlim(X_AXIS[0], X_AXIS[1])
ax.set_xlabel('Bill length (mm)')
ax.get_yaxis().set_major_formatter(
mpl.ticker.FormatStrFormatter('%.0f'))
ax.set_ylim(Y_AXIS[0], Y_AXIS[1])
ax.set_ylabel('Bill depth (mm)')
# Plot the boundary of the decision function
x0, x1, y_pred, y_decision = get_contour_data(model, pipeline)
ax.contourf(x0, x1, y_pred, cmap=plt.cm.PuOr, alpha=0.2)
# This plots the decision score, if needed
# ax.contourf(x0, x1, y_decision, cmap=plt.cm.PuOr, alpha=0.1)
df = df.copy()
df['species'] = df['target'].apply(lambda t: target if t == 1 else 'Other')
colors = get_target_colors(target)
grouped = df.groupby('species')
for key, group in grouped:
ax.scatter(
group['bill_length_mm'],
group['bill_depth_mm'],
c=colors[key],
s=40,
label=key,
alpha=0.55)
ax.legend(loc='lower left', handletextpad=0.2)
bbox_style = {
'boxstyle': 'round',
'facecolor': '#ffffff',
'edgecolor': '#d4d4d4',
'alpha': 0.8
}
ax.text(53, 12.415, '$F_1$ score: {0}'.format(f_score), bbox=bbox_style)
fig.savefig('plots/{0}.png'.format(filename), format='png')
plt.close() | 3.15625 | 3 |
dojo/db_migrations/0147_rename_sslyze_parser.py | dant24/django-DefectDojo | 249 | 9949 | <filename>dojo/db_migrations/0147_rename_sslyze_parser.py
from django.db import migrations
def rename_sslyze_parser(apps, schema_editor):
Test_Type_model = apps.get_model('dojo', 'Test_Type')
try:
test_type_sslyze = Test_Type_model.objects.get(name='SSLyze 3 Scan (JSON)')
test_type_sslyze.name = 'SSLyze Scan (JSON)'
test_type_sslyze.save()
except Test_Type_model.DoesNotExist:
# This happens when a new instance of DD is initialized
pass
class Migration(migrations.Migration):
dependencies = [
('dojo', '0146_lead_optional'),
]
operations = [
migrations.RunPython(rename_sslyze_parser),
]
| 1.914063 | 2 |
server/forestgame/game/test_world.py | Nick-Pearson/forestgame | 0 | 9950 | import unittest
from forestgame.game.world import World
class WorldTest(unittest.TestCase):
def test_world_inits_to_empty_data(self):
world = World(None, "1", "0", 0, 0, [], [])
self.assertEqual(0, world.get_size_x())
self.assertEqual(0, world.get_size_y())
self.assertEqual([], world.get_tile_data())
def test_world_with_tiles_inits__with_tiles_to_empty_data(self):
world = World(None, "1", "0", 3, 3, [(1, 1, 0)], [])
expected_tile_data = [
[1, 1, 1],
[1, 0, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_from_zero_initialsies_from_forest(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 3)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_with_larger_x_y_pads_with_forest(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(2, 2)
world.set_size(3, 3)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_with_larger_x_pads_with_forest(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(2, 3)
world.set_size(3, 3)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_with_larger_y_pads_with_forest(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 2)
world.set_size(3, 3)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_with_smaller_x_y_removes_data(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 3)
world.set_size(2, 2)
expected_tile_data = [
[1, 1],
[1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(2, world.get_size_x())
self.assertEqual(2, world.get_size_y())
def test_set_size_with_smaller_x_removes_data(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 3)
world.set_size(2, 3)
expected_tile_data = [
[1, 1],
[1, 1],
[1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(2, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_with_smaller_y_removes_data(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 3)
world.set_size(3, 2)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(2, world.get_size_y())
def test_set_size_with_same_x_y_does_nothing(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 3)
world.set_size(3, 3)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
# set tile range checks
def test_set_tile_changes_tile_data(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(5, 5)
world.set_tile_at(2, 3, 0)
self.assertEqual(0, world.get_tile_at(2, 3))
expected_tile_data = [
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
[1, 1, 1, 1, 1]
]
self.assertEqual(expected_tile_data, world.get_tile_data())
| 3.484375 | 3 |
invconv/xlsx.py | TechPowerAwaits/ax-toolkit | 0 | 9951 | # Copyright 2021 <NAME> <<EMAIL>>
# SPDX-license-identifier: 0BSD
import string
from loguru import logger
try:
import cell_pos
from exceptions import InvconvMissingHeaders
import ftype
import msg_handler
except ModuleNotFoundError:
import invconv.cell_pos as cell_pos
from invconv.exceptions import InvconvMissingHeaders
import invconv.ftype as ftype
import invconv.msg_handler as msg_handler
used = True
try:
from openpyxl import load_workbook
except ModuleNotFoundError:
used = False
# load_workbook is used repeatedly with similar settings
# every time.
WB_SETTINGS = {
"read_only": True,
"keep_vba": False,
"data_only": True,
"keep_links": False,
}
class XlsxDataTuple(ftype.BasicFtypeDataClass):
def __init__(self, filename, wsname, headers):
self.filename = filename
self.wsname = wsname
self.headers = headers
self.cur_row = None
self.cur_col = None
super().__init__(
filename=self.filename, sectionname=self.wsname, headers=self.headers
)
# Set relevant values and gets the number of operations
# to be performed based on the dimensions.
def set_oper_num(self, min_row, max_row, max_col):
self.min_row = min_row
self.min_col = 1
self.max_row = max_row
self.max_col = max_col
delta_col = self.max_col - self.min_col + 1
delta_row = self.max_row - self.min_row + 1
self.num_oper = delta_col * delta_row
return self.num_oper
def load_workbook(self):
return load_workbook(self.filename, **WB_SETTINGS)
def parser(self):
if self.cur_row is None:
self.cur_row = self.min_row
if self.cur_col is None:
self.cur_col = self.min_col
if self.cur_col > self.max_col:
self.cur_col = self.min_col
self.cur_row += 1
if self.cur_row > self.max_row:
self.cur_row = None
self.cur_col = None
return None
col_letter = cell_pos.get_col_letter(self.cur_col)
row_str = str(self.cur_row)
wb = self.load_workbook()
ws = wb[self.wsname]
cell_val = ws[col_letter + row_str].value
return_str = str(cell_val)
if cell_val is None:
return_str = ""
if return_str == "#REF!":
logger.warning(
string.Template(
'Unknown reference found at $cell_pos in $id. Defaulting to "unknown".'
).substitute(
cell_pos=col_letter + row_str,
id=msg_handler.get_id((self.filename, self.wsname), "WS"),
)
)
return_str = "unknown"
self.cur_col += 1
wb.close()
return return_str
# Will store a file, worksheet tuple-like class
# with additional data accessible.
xlsx_data_list = ftype.FtypeDataList()
# Contains just a list of file, worksheet tuples.
xlsx_tuple_list = []
# xlsx files always start counting at 1.
INVALID_ROW = 0
def start(input_files):
# Gets the name of worksheets and
# adds it to xlsx_tuple_list.
get_worksheets(input_files)
# Sometimes, openpyxl can't get
# the proper dimensions of a worksheet,
# so it handles that. It also deals with
# headers in the worksheets and removes
# blank cells from the size of the sheet.
set_data()
# Check if some file worksheet pairs don't
# have a valid header.
if not xlsx_data_list:
raise InvconvMissingHeaders
# Can't directly check for membership of
# items from xlsx_tuple_list in xlsx_data_list,
# for they are different types.
for file_section in xlsx_tuple_list:
found_file_section = False
for data_file_section in xlsx_data_list:
# The first element in if statement
# has to be XlsxDataTuple, as it
# contains a __eq__() function
# that should work in this case.
if data_file_section == file_section:
found_file_section = True
break
if not found_file_section:
logger.error(
f"{msg_handler.get_id(file_section, 'ws')} contains no valid headers."
)
msg_handler.does_continue()
return xlsx_data_list
def get_worksheets(input_files):
for input_file in input_files:
wb = load_workbook(input_file, **WB_SETTINGS)
sheetname_list = wb.sheetnames
for sheetname in sheetname_list:
xlsx_tuple_list.append((input_file, sheetname))
wb.close()
def set_data():
for filename, wsname in xlsx_tuple_list:
wb = load_workbook(filename, **WB_SETTINGS)
ws = wb[wsname]
# max_col and max_row can be None.
cur_max_col = ws.max_column
cur_max_row = ws.max_row
# Close workbook right away so
# it won't remain open in case script
# gets closed or crashes.
wb.close()
max_col = get_max_col(filename, wsname, cur_max_col)
max_row = get_max_row(filename, wsname, cur_max_row)
# Get the row where a header was found.
header_row = get_header_row(filename, wsname, max_row)
# check_header_row() ensures that a non-blank row
# is after header row. If not, it might not
# actually be a header row.
if (
header_row == INVALID_ROW
or header_row == max_row
or not check_header_row(filename, wsname, max_col, header_row)
):
continue
# The first row after the header_row.
min_row = header_row + 1
header_list = get_header_list(filename, wsname, max_col, header_row)
if max_col > len(header_list):
logger.info(
string.Template(
"Reducing max column length of $id from $cur_col to $new_col due to None in $cell_pos."
)
)
max_col = len(header_list)
DataTuple = XlsxDataTuple(filename, wsname, header_list)
DataTuple.set_oper_num(min_row, max_row, max_col)
xlsx_data_list.append(DataTuple)
def get_max_col(filename, wsname, max_col):
xlsx_id = msg_handler.get_id((filename, wsname), "WS")
while (not isinstance(max_col, int)) or (max_col <= INVALID_ROW):
logger.error(f"Max col for {xlsx_id} is {str(max_col)}.")
msg_handler.does_continue()
try:
logger.info("User providing number of columns (starting at 1).")
max_col = int(
input("Please provide the number of columns (starting at 1) > ")
)
except (ValueError, TypeError):
logger.log("FAILURE", "Input could not be converted to int.")
max_col = None
if (isinstance(max_col, int)) and (max_col <= 0):
logger.log("FAILURE", "Input is less than one.")
return max_col
def get_max_row(filename, wsname, max_row):
xlsx_id = msg_handler.get_id((filename, wsname))
while (not isinstance(max_row, int)) or (max_row <= 0):
logger.error(f"Max row for {xlsx_id} is {str(max_row)}.")
msg_handler.does_continue()
try:
logger.info("User providing number of rows (starting at 1).")
max_row = int(input("Please provide the number of rows (starting at 1) > "))
except (ValueError, TypeError):
logger.log("FAILURE", "Input could not be converted to int.")
max_row = None
if (isinstance(max_row, int)) and (max_row <= 0):
logger.log("FAILURE", "Input is less than one.")
return max_row
def get_header_row(filename, wsname, max_row):
wb = load_workbook(filename, **WB_SETTINGS)
ws = wb[wsname]
# header_row starts at 1,
# so a value of 0 indicates
# it wasn't found.
header_row = INVALID_ROW
for row in cell_pos.row_iter(max_row):
row_str = str(row)
# A row with just a title would not fill up the entire max_column.
# As a result, there would be None at either the first or second
# position.
cell1 = ws["A" + row_str].value
cell2 = ws["B" + row_str].value
if cell1 is not None and cell2 is not None:
header_row = row
break
wb.close()
return header_row
def check_header_row(filename, wsname, max_col, header_row):
wb = load_workbook(filename, **WB_SETTINGS)
ws = wb[wsname]
# Check the row after the header row
# for content.
post_header_row = header_row + 1
row_str = str(post_header_row)
# List of items in row.
row_list = []
for col in cell_pos.col_iter(max_col):
col_letter = cell_pos.get_col_letter(col)
row_list.append(str(ws[col_letter + row_str].value))
wb.close()
# Ensure the row is not blank.
if row_list.count("None") != len(row_list):
return True
return False
def get_header_list(filename, wsname, max_col, header_row):
wb = load_workbook(filename, **WB_SETTINGS)
ws = wb[wsname]
header_list = []
row_str = str(header_row)
for col in cell_pos.col_iter(max_col):
col_letter = cell_pos.get_col_letter(col)
header_item = ws[col_letter + row_str].value
# Assuming the header doesn't have blank
# items between entries. Only at the end.
if header_item is None:
logger.warning(
f"Blank header {col_letter+row_str} in {msg_handler.get_id((filename, wsname), 'WS')} will be ignored."
)
break
header_list.append(header_item)
wb.close()
return header_list
if used:
ftype.add("xlsx", start)
| 2.09375 | 2 |
solvers/generation_solver/img_interface.py | Anthony102899/Lego-ImageGenerator | 1 | 9952 | import os
from tkinter import *
import tkinter.filedialog as tkfd
from PIL import Image
import numpy as np
import solvers.generation_solver.image_seperation as IS
def layer_interface(img_num):
layer_names = []
layer_nums = []
for k in range(img_num):
master = Toplevel()
master.title(f"Image number {k+1}")
master.geometry("+300+200")
# input image and layer
img_label = Label(master, text="Image").grid(row=0)
layer_label = Label(master, text="Layer").grid(row=1)
entry_img = Entry(master, width=30)
entry_layer = Entry(master, width=30)
entry_img.grid(row=0, column=1)
entry_layer.grid(row=1, column=1)
if k == img_num - 1:
Button(master, text='Done', command=master.quit).grid(row=2, column=2, sticky=W, pady=4)
else:
Button(master, text='Next', command=master.quit).grid(row=2, column=2, sticky=W, pady=4)
img_path = "inputs/images/"
img_path = os.path.join(os.path.dirname(__file__), img_path)
path = tkfd.askopenfilename(initialdir = img_path, title = "Select file", filetypes = (("png files","*.png"),("all files","*.*")))
entry_img.insert('0', os.path.basename(path))
image = Image.open(path)
img = PhotoImage(file=path)
width, height = img.width(), img.height()
if width > 250:
scale_w = int(round(width / 250, 0))
scale_h = int(round(height / 250, 0))
img = img.subsample(scale_w, scale_h)
if width < 250:
scale_w = int(round(250 / width, 0))
scale_h = int(round(250 / height, 0))
img = img.zoom(scale_w, scale_h)
Label(master, image=img).grid(row=2, column=1)
mainloop()
img_name = entry_img.get()
img_layer = entry_layer.get()
layer_names.append(img_name)
layer_nums.append(img_layer)
return layer_names, layer_nums
def show_interface():
root = Tk()
root.geometry("+300+300")
Label(root, text="Graph", font=("", 14, "bold", "underline"), fg='#696969').grid(row=0, sticky='w')
entry_graph = Entry(root, width=15)
entry_graph.grid(row=0, column=1)
graph_path = "connectivity/"
graph_path = os.path.join(os.path.dirname(__file__), graph_path)
path = tkfd.askopenfilename(initialdir = graph_path, title = "Select file", filetypes = (("pkl files", "*.pkl"), ("all files","*.*")))
entry_graph.insert('0', os.path.basename(path))
# input No. image and button
Label(root, text="Input image", font=("", 14, "bold", "underline"), fg='#696969').grid(row=1, sticky='w')
entry_file = Entry(root, width=15)
entry_file.grid(row=1, column=1)
entry_path = "inputs/images/"
entry_path = os.path.join(os.path.dirname(__file__), entry_path)
input_path = tkfd.askopenfilename(initialdir=entry_path, title="Select input image", filetypes=(("png files", "*.png"), ("jpg files", "*.jpg")))
entry_file.insert('0', os.path.basename(input_path))
Button(root, text='Next', command=root.quit).grid(row=1, column=2, sticky='e', pady=4)
# input background color
Label(root, text="").grid(row=2, column=1)
Label(root, text="Background color", font=("", 14, "bold", "underline"), fg='#696969').grid(row=3, sticky='w')
Label(root, text="R", fg='#4f4f4f').grid(row=4, column=0)
Label(root, text="G", fg='#4f4f4f').grid(row=4, column=1)
Label(root, text="B", fg='#4f4f4f').grid(row=4, column=2)
entry_r = Entry(root, width=15)
entry_g = Entry(root, width=15)
entry_b = Entry(root, width=15)
entry_r.grid(row=5, column=0)
entry_g.grid(row=5, column=1)
entry_b.grid(row=5, column=2)
# input rotation and scaling
Label(root, text="").grid(row=6, column=1)
Label(root, text="Rotation degree", font=("", 14, "bold", "underline"), fg='#696969').grid(row=7, sticky='w')
entry_degree = Entry(root, width=15, textvariable=StringVar(root, value='0'))
entry_degree.grid(row=7, column=1)
Label(root, text="Scale", font=("", 14, "bold", "underline"), fg='#696969').grid(row=7, column=2)
entry_scale = Entry(root, width=15, textvariable=StringVar(root, value='1'))
entry_scale.grid(row=7, column=3)
# input translation
Label(root, text="").grid(row=8, column=1)
Label(root, text="x translation", font=("", 14, "bold", "underline"), fg='#696969').grid(row=9, sticky='w')
entry_x = Entry(root, width=15, textvariable=StringVar(root, value='0'))
entry_x.grid(row=9, column=1)
Label(root, text="y translation", font=("", 14, "bold", "underline"), fg='#696969').grid(row=9, column=2)
entry_y = Entry(root, width=15, textvariable=StringVar(root, value='0'))
entry_y.grid(row=9, column=3)
Label(root, text="").grid(row=9, column=1)
mainloop()
img_path = input_path
print(img_path)
img_num = IS.seperate_color(img_path, "./cache/")
r, g, b = entry_r.get(), entry_g.get(), entry_b.get()
if len(r) == 0:
r = 0
if len(g) == 0:
g = 0
if len(b) == 0:
b = 0
if r == 0 and g == 0 and b == 0:
rgb = []
else:
rgb = np.array((int(r), int(g), int(b)))
layer_names, layer_nums = layer_interface(img_num)
return entry_graph.get(), img_num, layer_names, layer_nums, rgb, int(entry_degree.get()), float(entry_scale.get()), int(entry_x.get()), int(entry_y.get())
if __name__ == '__main__':
print(show_interface()) | 2.984375 | 3 |
Constellations/get_brightest_stars.py | PatD123/Polar-Constellation | 0 | 9953 | <filename>Constellations/get_brightest_stars.py<gh_stars>0
from bs4 import BeautifulSoup as soup
from urllib.request import urlopen as uReq
import re, json
# Getting the page
URL = "https://www.astronomytrek.com/star-constellations-brightest-stars/"
uClient = uReq(url=URL)
page_html = uClient.read()
page_soup = soup(page_html, "html.parser")
# Opening a file to write in
stars_file = open("brightest_stars.txt", 'w')
#
def find_space(star):
for i in range(0, len(star)):
if star[i] == " " and star[i + 1] == "(":
return i
brightest_uncleaned = page_soup.find_all("tr")
for html in brightest_uncleaned:
col_4 = html.contents[4].contents[0]
col_5 = html.contents[5].string
if col_5 is not None:
idx = find_space(col_5)
col_5 = col_5[0:idx]
if col_5 == "Brightest Star": continue
stars_file.write(col_5 + "\n")
else:
idx = find_space(col_4)
col_4 = col_4[0:idx]
stars_file.write(col_4 + "\n")
stars_file.close() | 3.1875 | 3 |
TwoPointers/Leetcode11.py | Rylie-W/LeetRecord | 0 | 9954 | class Solution:
def maxArea(self, height) -> int:
left=0
right=len(height)-1
res=min(height[left],height[right])*(right-left)
while right>left:
res=max(res,(right-left)*min(height[right],height[left]))
if height[left]<height[right]:
left+=1
else: right-=1
return res
if __name__ == '__main__':
sol=Solution()
# height = [1, 1]
height=[1,3,2,5,25,24,5]
print(sol.maxArea(height))
| 3.375 | 3 |
tensorflow_text/python/ops/bert_tokenizer_test.py | hashim361/text | 1 | 9955 | # coding=utf-8
# Copyright 2020 TF.Text Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# encoding=utf-8
r"""Tests for BertTokenizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_map_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
from tensorflow_text.python.ops import bert_tokenizer
def _utf8(x):
return x.encode('utf-8')
# TODO(thuang513): It appears there isn't a Ragged version of substr; consider
# checking this into core TF.
def _ragged_substr(text_input, begin, end):
text_input_flat = None
if ragged_tensor.is_ragged(text_input):
text_input_flat = text_input.flat_values
else:
text_input_flat = text_input
def _ragged_tile(x):
input_text, indices = x
multiple = math_ops.reduce_sum(indices.row_lengths())
return array_ops.tile([input_text], [multiple])
broadcasted_text = ragged_map_ops.map_fn(
_ragged_tile,
(text_input_flat, begin),
dtype=ragged_tensor.RaggedTensorType(dtype=dtypes.string, ragged_rank=1),
infer_shape=False,
)
size = math_ops.sub(
array_ops.squeeze(end.flat_values), array_ops.squeeze(begin.flat_values))
new_tokens = string_ops.substr_v2(broadcasted_text,
array_ops.squeeze(begin.flat_values), size)
return begin.with_flat_values(new_tokens.flat_values)
_VOCAB = [
b'[unused1]',
b'[unused23]',
b"'",
b'##%',
b'##af',
b'##book',
b'##c',
b'##fr',
b'##hey',
b'##is',
b'##o',
b'##ost',
b'##s',
b'##tri',
b'##y',
b'$',
b'%',
b'&',
b'(',
b')',
b'*',
b'-',
b'.',
b'20',
b':',
b'?',
b'[CLS]',
b'[SEP]',
_utf8(u'國'),
_utf8(u'暐'),
_utf8(u'瀚'),
_utf8(u'韓'),
_utf8(u'食'),
_utf8(u'黃'),
_utf8(u'🤔'),
_utf8(u'🤣'),
b'^',
b'a',
b'ago',
b'among',
b'an',
b'and',
b'are',
b'aren',
b'awesome',
b'between',
b'candy',
b'china',
b'companies',
b'company',
b'crushed',
b'dug',
b'earnings',
b'engaged',
b'even',
b'few',
b'forecast',
b'getting',
b'had',
b'han',
b'has',
b'hers',
b'high',
b'hit',
b'hs',
b'hurting',
b'in',
b'indie',
b'is',
b'isn',
b'ka',
b'ku',
b'major',
b'maker',
b'moth',
b'nearly',
b'new',
b'now',
b'president',
b'record',
b'regulators',
b'reported',
b'rift',
b'rust',
b'sales',
b'shares',
b'slightly',
b'sprint',
b'states',
b'stock',
b't',
b'taste',
b'tension',
b'that',
b'the',
b'this',
b'today',
b'told',
b'topped',
b'trade',
b'trump',
b'united',
b'up',
b'weeks',
b'what',
b'why',
b'with',
b'year',
b'yo',
b'yu',
_utf8(u'\u7231'),
_utf8(u'\u4e0a'),
_utf8(u'\u4e00'),
_utf8(u'\u4e2a'),
_utf8(u'\u4e0d'),
_utf8(u'\u56de'),
_utf8(u'\u5bb6'),
_utf8(u'\u7684'),
_utf8(u'\u4eba'),
]
def _create_table(vocab, num_oov=1):
init = lookup_ops.KeyValueTensorInitializer(
vocab,
math_ops.range(
array_ops.size(vocab, out_type=dtypes.int64), dtype=dtypes.int64),
key_dtype=dtypes.string,
value_dtype=dtypes.int64)
return lookup_ops.StaticVocabularyTableV1(
init, num_oov, lookup_key_dtype=dtypes.string)
class BertTokenizerTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def test_bert_tokenizer_outputs(self):
text_inputs = constant_op.constant([_utf8('Test')])
vocab = _VOCAB
table = _create_table(vocab, 2)
self.evaluate(table.initializer)
tokenizer = bert_tokenizer.BertTokenizer(
table,
token_out_type=dtypes.int32)
results = tokenizer.tokenize(text_inputs)
self.assertAllEqual(results.dtype, dtypes.int32)
@parameterized.parameters([
dict(
text_inputs=[
_utf8(u'taste the rustisc indiefrost'),
_utf8(u'Han Kuo-yu (韓國食)🤔'),
_utf8(u'Añade la información del formulario y tus preguntas'),
],
expected_tokens=[[b'taste', b'the', b'rustisc', b'indiefrost'],
[
b'Han', b'Kuo', b'-', b'yu', b'(',
b'\xe9\x9f\x93', b'\xe5\x9c\x8b',
b'\xe9\xa3\x9f', b')', b'\xf0\x9f\xa4\x94'
],
[
b'A\xc3\xb1ade', b'la', b'informaci\xc3\xb3n',
b'del', b'formulario', b'y', b'tus', b'preguntas'
]],
),
dict(
text_inputs=[
_utf8(u'UNwant\u00E9d,running'),
_utf8(u'Añade la información del formulario y tus preguntas'),
],
expected_tokens=[[b'unwanted', b',', b'running'],
[
b'anade', b'la', b'informacion', b'del',
b'formulario', b'y', b'tus', b'preguntas'
]],
lower_case=True,
),
dict(
text_inputs=[
_utf8(u'Añade la información del formulario y tus preguntas')
],
expected_tokens=[[
b'An\xcc\x83ade', b'la', b'informacio\xcc\x81n', b'del',
b'formulario', b'y', b'tus', b'preguntas'
]],
normalization_form='NFD',
),
# Test CJK are tokenized by unicode characters
dict(
text_inputs=[
_utf8(u'香港では4日'),
_utf8(u'영어독해 자만심 왜 문제일까'),
_utf8(u'據港媒《東網》報導')
],
expected_tokens=[
[_utf8(u'香'),
_utf8(u'港'),
_utf8(u'では4'),
_utf8(u'日')],
[
_utf8(u'영어독해'),
_utf8(u'자만심'),
_utf8(u'왜'),
_utf8(u'문제일까'),
],
[
_utf8(u'據'),
_utf8(u'港'),
_utf8(u'媒'),
_utf8(u'《'),
_utf8(u'東'),
_utf8(u'網'),
_utf8(u'》'),
_utf8(u'報'),
_utf8(u'導')
],
],
normalization_form=None,
),
# Test Katakana followed by Hiragana.
dict(
text_inputs=[_utf8(u'のテキストとして')],
expected_tokens=[
[_utf8(u'のテキストとして')],
],
normalization_form=None,
),
])
@test_util.run_in_graph_and_eager_modes
def test_basic_tokenize(self,
text_inputs,
expected_tokens,
lower_case=False,
normalization_form='NFC'):
text_inputs = ragged_factory_ops.constant(text_inputs)
tokenizer = bert_tokenizer.BasicTokenizer(
lower_case=lower_case, normalization_form=normalization_form)
tokens = tokenizer.tokenize(text_inputs)
self.assertAllEqual(tokens, expected_tokens)
@parameterized.parameters([
dict(
text_inputs=[
b'taste the rustisc indiefrost',
_utf8(u'Han Kuo-yu (韓國食)🤔'),
_utf8(u'dugtrio had an awesome 🤣 dugbook'),
b'yo^what$is*up?',
b'mothaf*&%ka',
],
expected=[[[b'taste'], [b'the'], [b'rust', b'##is', b'##c'],
[b'indie', b'##fr', b'##ost']],
[[b'han'], [b'ku', b'##o'], [b'-'], [b'yu'], [b'('],
[_utf8(u'韓')], [_utf8(u'國')], [_utf8(u'食')], [b')'],
[_utf8(u'🤔')]],
[[b'dug', b'##tri', b'##o'], [b'had'], [b'an'],
[b'awesome'], [_utf8(u'🤣')], [b'dug', b'##book']],
[[b'yo'], [b'^'], [b'what'], [b'$'], [b'is'], [b'*'],
[b'up'], [b'?']],
[[b'moth', b'##af'], [b'*'], [b'&'], [b'%'], [b'ka']]],
expected_extracted=[[[b'taste'], [b'the'], [b'rust', b'is', b'c'],
[b'indie', b'fr', b'ost']],
[[b'Han'], [b'Ku', b'o'], [b'-'], [b'yu'], [b'('],
[_utf8(u'韓')], [_utf8(u'國')], [_utf8(u'食')],
[b')'], [_utf8(u'🤔')]],
[[b'dug', b'tri', b'o'], [b'had'], [b'an'],
[b'awesome'], [_utf8(u'🤣')], [b'dug', b'book']],
[[b'yo'], [b'^'], [b'what'], [b'$'], [b'is'],
[b'*'], [b'up'], [b'?']],
[[b'moth', b'af'], [b'*'], [b'&'], [b'%'],
[b'ka']]],
lower_case=True,
),
# Test when we are expecting multiple OOV vocab ids and tf.string just
# maps out [UNK] token.
dict(
text_inputs=[
b'mothaf*&%ka cantfindme whodis',
],
expected=[[[b'moth', b'##af'], [b'*'], [b'&'], [b'%'], [b'ka'],
[b'[UNK]'], [b'[UNK]']]],
expected_extracted=[[[b'moth', b'af'], [b'*'], [b'&'], [b'%'],
[b'ka'], [b'cantfindme'], [b'whodis']]],
lower_case=True,
num_oov=2,
),
dict(
text_inputs=[
b'candy',
],
expected=[[[b'candy']]],
lower_case=True,
num_oov=2,
),
dict(
text_inputs=[
_utf8(u'爱上一个不回家的人'),
],
expected=[[[_utf8(u'爱')], [_utf8(u'上')], [_utf8(u'一')], [_utf8(u'个')],
[_utf8(u'不')], [_utf8(u'回')], [_utf8(u'家')], [_utf8(u'的')],
[_utf8(u'人')]]],
lower_case=True,
num_oov=2,
),
# Test 'preserve_unused_token' option
dict(
text_inputs=[
b'taste the rustisc indiefrost [unused1]',
_utf8(u'爱上一个不回家的人[unused23]'),
],
expected=[[[b'taste'], [b'the'], [b'rust', b'##is', b'##c'],
[b'indie', b'##fr', b'##ost'], [b'[unused1]']],
[[_utf8(u'爱')], [_utf8(u'上')], [_utf8(u'一')], [_utf8(u'个')],
[_utf8(u'不')], [_utf8(u'回')], [_utf8(u'家')], [_utf8(u'的')],
[_utf8(u'人')], [b'[unused23]']]],
preserve_unused_token=True,
),
])
@test_util.run_in_graph_and_eager_modes
def test_bert_tokenizer(self,
text_inputs,
expected,
vocab=None,
expected_extracted=None,
lower_case=True,
num_oov=1,
preserve_unused_token=False):
text_inputs = constant_op.constant(text_inputs)
if not vocab:
vocab = _VOCAB
table = _create_table(vocab, num_oov)
self.evaluate(table.initializer)
tokenizer = bert_tokenizer.BertTokenizer(
table,
token_out_type=dtypes.string,
lower_case=lower_case,
preserve_unused_token=preserve_unused_token)
results = tokenizer.tokenize(text_inputs)
self.assertAllEqual(results, expected)
# Verify that the int ids are the same.
expected_rt = ragged_factory_ops.constant(expected)
expected_int = table.lookup(expected_rt.flat_values)
expected_int_rt = ragged_tensor.RaggedTensor.from_nested_row_splits(
expected_int, expected_rt.nested_row_splits)
int_tokenizer = bert_tokenizer.BertTokenizer(
vocab_lookup_table=table,
token_out_type=dtypes.int64,
lower_case=lower_case,
preserve_unused_token=preserve_unused_token)
results_int = int_tokenizer.tokenize(text_inputs)
self.assertAllEqual(results_int, expected_int_rt)
# Verify that the offsets can extract the expected tokens
_, begin, end = tokenizer.tokenize_with_offsets(text_inputs)
extracted_wordpieces = _ragged_substr(text_inputs, begin, end)
if expected_extracted:
self.assertAllEqual(extracted_wordpieces, expected_extracted)
else:
# The extracted won't have any wordpieces with '##' prefix. Strip them
# out.
stripped_prefix_flat = string_ops.regex_replace(expected_rt.flat_values,
'##', '')
stripped_prefix = expected_rt.with_flat_values(stripped_prefix_flat)
self.assertAllEqual(extracted_wordpieces, stripped_prefix)
if __name__ == '__main__':
test.main()
| 2.40625 | 2 |
tests/index_test.py | DubeySandeep/pending-review-notification | 0 | 9956 | <filename>tests/index_test.py
"""Unit test for the index.py file."""
import unittest
from datetime import datetime, timedelta, timezone
import json
from unittest.mock import patch, mock_open
import requests_mock
from src import index
from src import github_services
class ModuleIntegerationTest(unittest.TestCase):
"""Integeration test for the send notification feature."""
def setUp(self):
self.orgName = 'orgName'
self.repoName = 'repo'
self.pull_response = [{
'html_url': 'https://githuburl.pull/123',
'number': 123,
'title': 'PR title 1',
'user': {
'login': 'authorName',
},
'assignees': [{
'login': 'reviewerName1',
}, {
'login': 'reviewerName2',
}]
}, {
'html_url': 'https://githuburl.pull/234',
'number': 234,
'title': 'PR title 2',
'user': {
'login': 'authorName',
},
'assignees': [{
'login': 'reviewerName1',
}, {
'login': 'reviewerName2',
}]
}]
def get_past_time(hours=0):
return (
datetime.now(timezone.utc) - timedelta(hours=hours)).strftime(
"%Y-%m-%dT%H:%M:%SZ")
self.timeline1 = [{
'event': 'created'
}, {
'event': 'assigned',
'assignee': {
'login': 'reviewerName1'
},
'created_at': get_past_time(hours=22)
},{
'event': 'assigned',
'assignee': {
'login': 'reviewerName2'
},
'created_at': get_past_time(hours=56)
}]
self.timeline2 = [{
'event': 'created'
}, {
'event': 'assigned',
'assignee': {
'login': 'reviewerName1'
},
'created_at': get_past_time(hours=23)
}, {
'event': 'assigned',
'assignee': {
'login': 'reviewerName2'
},
'created_at': get_past_time(hours=19)
}]
self.test_template = "{{ username }}\n{{ pr_list }}"
def mock_all_get_requests(self, mock_request):
param_page_1='?page=1&per_page=100'
param_page_2='?page=2&per_page=100'
mock_request.get(
github_services.PULL_REQUESTS_URL_TEMPLATE.format(
self.orgName, self.repoName) + param_page_1,
text=json.dumps(self.pull_response))
mock_request.get(
github_services.PULL_REQUESTS_URL_TEMPLATE.format(
self.orgName, self.repoName) + param_page_2,
text=json.dumps([]))
mock_request.get(
github_services.ISSUE_TIMELINE_URL_TEMPLATE.format(
self.orgName, self.repoName, 123) + param_page_1,
text=json.dumps(self.timeline1))
mock_request.get(
github_services.ISSUE_TIMELINE_URL_TEMPLATE.format(
self.orgName, self.repoName, 123) + param_page_2,
text=json.dumps([]))
mock_request.get(
github_services.ISSUE_TIMELINE_URL_TEMPLATE.format(
self.orgName, self.repoName, 234) + param_page_1,
text=json.dumps(self.timeline2))
mock_request.get(
github_services.ISSUE_TIMELINE_URL_TEMPLATE.format(
self.orgName, self.repoName, 234) + param_page_2,
text=json.dumps([]))
def mock_post_discussion_request(self, mock_request):
request = mock_request.post(
github_services.CREATE_DISCUSSION_URL_TEMPLATE.format(
self.orgName, 'teamName'),
text=json.dumps({}))
return request
def test_executing_main_function_sends_notification(self):
with requests_mock.Mocker() as mock_request:
self.mock_all_get_requests(mock_request)
request = self.mock_post_discussion_request(mock_request)
file_data = mock_open(read_data=self.test_template)
with patch("builtins.open", file_data):
index.main([
'--team', 'teamName',
'--repo', 'orgName/repo',
'--max-wait-hours', '20',
'--token', 'githubTokenForApiRequest'
])
self.assertTrue(request.called)
self.assertEqual(request.call_count, 2)
expected_messages = [
{
'title': '[@reviewerName1] Pending review on PRs',
'body': '@reviewerName1\n- [#123](https://githuburl.pull/123) '
'[Waiting from the last 22 hours]\n'
'- [#234](https://githuburl.pull/234) '
'[Waiting from the last 23 hours]'
},
{
'title': '[@reviewerName2] Pending review on PRs',
'body': '@reviewerName2\n- [#123](https://githuburl.pull/123) '
'[Waiting from the last 2 days, 8 hours]'
},
]
self.assertEqual(
request.request_history[0].json(), expected_messages[0])
self.assertEqual(
request.request_history[1].json(), expected_messages[1])
| 2.890625 | 3 |
rqalpha/data/instrument_mixin.py | mysky528/rqalpha | 3 | 9957 | <gh_stars>1-10
# -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
class InstrumentMixin(object):
def __init__(self, instruments):
self._instruments = {i.order_book_id: i for i in instruments}
self._sym_id_map = {i.symbol: k for k, i in six.iteritems(self._instruments)
# 过滤掉 CSI300, SSE50, CSI500, SSE180
if not i.order_book_id.endswith('INDX')}
try:
# FIXME
# 沪深300 中证500 固定使用上证的
for o in ['000300.XSHG', '000905.XSHG']:
self._sym_id_map[self._instruments[o].symbol] = o
# 上证180 及 上证180指数 两个symbol都指向 000010.XSHG
self._sym_id_map[self._instruments['SSE180.INDX'].symbol] = '000010.XSHG'
except KeyError:
pass
def sector(self, code):
return [v.order_book_id for v in self._instruments.values()
if v.type == 'CS' and v.sector_code == code]
def industry(self, code):
return [v.order_book_id for v in self._instruments.values()
if v.type == 'CS' and v.industry_code == code]
def concept(self, *concepts):
return [v.order_book_id for v in self._instruments.values()
if v.type == 'CS' and any(c in v.concept_names.split('|') for c in concepts)]
def all_instruments(self, types, dt=None):
return [i for i in self._instruments.values()
if ((dt is None or i.listed_date.date() <= dt.date() <= i.de_listed_date.date()) and
(types is None or i.type in types))]
def _instrument(self, sym_or_id):
try:
return self._instruments[sym_or_id]
except KeyError:
try:
sym_or_id = self._sym_id_map[sym_or_id]
return self._instruments[sym_or_id]
except KeyError:
return None
def instruments(self, sym_or_ids):
if isinstance(sym_or_ids, six.string_types):
return self._instrument(sym_or_ids)
return [i for i in [self._instrument(sid) for sid in sym_or_ids] if i is not None]
def get_future_contracts(self, underlying, date):
date = date.replace(hour=0, minute=0, second=0)
futures = [v for o, v in six.iteritems(self._instruments)
if v.type == 'Future' and v.underlying_symbol == underlying and
not o.endswith('88') and not o.endswith('99')]
if not futures:
return []
return sorted(i.order_book_id for i in futures if i.listed_date <= date <= i.de_listed_date)
| 1.992188 | 2 |
Python 3/19.prac_no2.py | ByeonUi-Hyeok/practice | 1 | 9958 | import funcvote as vote
votes = input("투표내용 >>>")
# print(votes)
# print(type(votes))
result = vote.str2int(votes)
print(vote.countvotes(result))
result = vote.countvotes(result)
vote.printvote(result)
# 투표 초안 | 3.421875 | 3 |
tutorials/30-days-of-code/30-operators.py | PingHuskar/hackerrank | 41 | 9959 | <filename>tutorials/30-days-of-code/30-operators.py
# Day 2: Operators
# Start using arithmetic operators.
#
# https://www.hackerrank.com/challenges/30-operators/problem
#
#!/bin/python3
import sys
if __name__ == "__main__":
meal_cost = float(input().strip())
tip_percent = int(input().strip())
tax_percent = int(input().strip())
cost = meal_cost * (1 + tip_percent / 100 + tax_percent / 100)
print("The total meal cost is {:.0f} dollars.".format(cost))
| 4.15625 | 4 |
articles/views.py | qwghlm/CommentIsMee | 0 | 9960 | from django.http import HttpResponse
from django.template import RequestContext, loader
from django.shortcuts import render, get_object_or_404, redirect
from django.core.urlresolvers import reverse
from django.core.cache import cache
from articles.models import CIFArticle
from .forms import CIFArticleForm
def index(request):
"""
Handle requests to the homepage
"""
article = None
# If a user has submitted a URL...
if request.POST:
form = CIFArticleForm(request.POST)
if (form.is_valid()):
try:
article = form.save(commit=False)
existing_articles = CIFArticle.objects.filter(url=article.url).count()
if existing_articles:
article = CIFArticle.objects.get(url=article.url)
else:
article.measure_ego()
article.save()
except ValueError, e:
article = None
form._errors["url"] = form.error_class([str(e)])
# If no URL submitted, just set up a blank form
else:
form = CIFArticleForm()
# If an article is found or created due to a user submission, redirect there
if article:
return redirect(reverse("articles:detail", args=(article.id,)))
# Else show the homepage & rendered form
else:
top_articles = cache.get('cim:top_articles')
if top_articles is None:
top_articles = CIFArticle.objects.filter(is_cif=1).order_by('-score')[:10]
cache.set('cim:top_articles', top_articles, 60)
latest_articles = cache.get('cim:latest_articles')
if latest_articles is None:
latest_articles = CIFArticle.objects.filter(is_cif=1).order_by('-id')[:5]
cache.set('cim:latest_articles', latest_articles, 30)
return render(request, 'articles/index.html', {
'form' : form ,
'top_articles' : top_articles,
'latest_articles' : latest_articles
})
def detail(request, article_id):
"""
Handle detail view for an article
"""
# Quite simple, set up article and form
form = CIFArticleForm()
article_key = 'cim:article:%s' % article_id
article = cache.get(article_key)
if article is None:
article = get_object_or_404(CIFArticle, id=article_id)
cache.set(article_key, article, 300)
return render(request, 'articles/detail.html', {
'article' : article,
'form' : form })
| 2.359375 | 2 |
utils/runtime_mode.py | omiderfanmanesh/dengue-infections-prediction | 0 | 9961 | <gh_stars>0
# Copyright (c) 2021, <NAME>, All rights reserved.
class RuntimeMode:
TRAIN = 0
TUNING = 1
CROSS_VAL = 2
FEATURE_IMPORTANCE = 3
| 1.296875 | 1 |
pydoc_fork/__main__.py | matthewdeanmartin/pydoc_fork | 0 | 9962 | <gh_stars>0
# noinspection PyPep8
"""pydoc_fork
A fork of pydoc that is optimized for generating html documentation in a CI context
Usage:
pydoc_fork <package>... [options]
pydoc_fork (-h | --help)
pydoc_fork --version
Options:
-h --help Show this screen.
-v --version Show version.
--quiet No printing or logging.
--verbose Crank up the logging.
--config <config> pyproject.toml or other toml config.
--document_internals respect underscore or __all__ private
--prefer_docs_python_org link to python.org or generate own stdlib docs
-o --output <folder> where to write files
"""
# TODO: implement this
# pydoc_fork dot_notation <importable>... [--output=<folder>] [--document_internals]
# pydoc_fork source_path <path>... [--output=<folder>] [--document_internals]
import logging
import sys
import docopt
from pydoc_fork import commands, settings
from pydoc_fork.settings import load_config
LOGGER = logging.getLogger(__name__)
LOGGERS = []
__version__ = "3.0.0"
def main() -> int:
"""Get the args object from command parameters"""
arguments = docopt.docopt(__doc__, version=f"pydoc_fork {__version__}")
config_path = arguments.get("<config>")
if config_path:
load_config(config_path)
LOGGER.debug(f"Invoking with docopts: {str(arguments)}")
output_folder = arguments["--output"]
# TODO: add lists of packages
package = arguments["<package>"] or []
# quiet = bool(arguments.get("--quiet", False))
if arguments.get("--document_internals"):
settings.DOCUMENT_INTERNALS = arguments["--document_internals"]
if arguments.get("--prefer_docs_python_org"):
settings.PREFER_DOCS_PYTHON_ORG = arguments["--prefer_docs_python_org"]
if arguments.get("--verbose"):
# root logger, all modules
for root in ("pydoc_fork", "__main__"):
logger = logging.getLogger(root)
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
log_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
formatter = logging.Formatter(log_format)
handler.setFormatter(formatter)
logger.addHandler(handler)
LOGGERS.append(logger)
commands.process_path_or_dot_name(
package,
output_folder=output_folder,
)
# # TODO
# print("Don't recognize that command.")
# return -1
return 0
if __name__ == "__main__":
sys.exit(main())
| 2.296875 | 2 |
backend/src/libs/strings.py | codeglitchz/attendance-system | 37 | 9963 | """
libs.strings
By default, uses `en-gb.json` file inside the `strings` top-level folder.
If language changes, set `libs.strings.default_locale` and run `libs.strings.refresh()`.
"""
import json
default_locale = "en-us"
cached_strings = {}
def refresh():
global cached_strings
with open(f"strings/{default_locale}.json") as f:
cached_strings = json.load(f)
def gettext(name):
return cached_strings[name]
refresh()
| 2.53125 | 3 |
nemo_cmd/deflate.py | SalishSeaCast/NEMO-Cmd | 1 | 9964 | # Copyright 2013-2021 The Salish Sea MEOPAR Contributors
# and The University of British Columbia
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NEMO-Cmd command plug-in for deflate sub-command.
Deflate variables in netCDF files using Lempel-Ziv compression.
"""
import logging
import math
import multiprocessing
from pathlib import Path
import shlex
import subprocess
import time
import attr
import cliff.command
logger = logging.getLogger(__name__)
class Deflate(cliff.command.Command):
"""Deflate variables in netCDF files using Lempel-Ziv compression."""
def get_parser(self, prog_name):
parser = super(Deflate, self).get_parser(prog_name)
parser.description = """
Deflate variables in netCDF files using Lempel-Ziv compression.
Converts files to netCDF-4 format.
The deflated file replaces the original file.
This command is effectively the same as running
ncks -4 -L -O FILEPATH FILEPATH
for each FILEPATH.
"""
parser.add_argument(
"filepaths",
nargs="+",
type=Path,
metavar="FILEPATH",
help="Path/name of file to be deflated.",
)
parser.add_argument(
"-j",
"--jobs",
type=int,
default=math.floor(multiprocessing.cpu_count() / 2),
help=(
"Maximum number of concurrent deflation processes allowed. "
"Defaults to 1/2 the number of cores detected."
),
)
return parser
def take_action(self, parsed_args):
"""Execute the :command:`nemo deflate` sub-command.
Deflate variables in netCDF files using Lempel-Ziv compression.
Converts files to netCDF-4 format.
The deflated file replaces the original file.
This command is effectively the same as
:command:`ncks -4 -L -O filename filename`.
"""
deflate(parsed_args.filepaths, parsed_args.jobs)
@attr.s
class DeflateJob(object):
"""netCDF file deflation job."""
#: Path/name of the netCDF file to deflate.
filepath = attr.ib()
#: Lempel-Ziv compression level to use.
dfl_lvl = attr.ib(default=4)
#: Deflation job subprocess object.
process = attr.ib(default=None)
#: Deflation job process PID.
pid = attr.ib(default=None)
#: Deflation job process return code.
returncode = attr.ib(default=None)
def start(self):
"""Start the deflation job in a subprocess.
Cache the subprocess object and its process id as job attributes.
"""
cmd = "nccopy -s -4 -d{0.dfl_lvl} {0.filepath} {0.filepath}.nccopy.tmp".format(
self
)
self.process = subprocess.Popen(
shlex.split(cmd),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
self.pid = self.process.pid
logger.debug("deflating {0.filepath} in process {0.pid}".format(self))
@property
def done(self):
"""Return a boolean indicating whether or not the job has finished.
Cache the subprocess return code as a job attribute.
"""
finished = False
self.returncode = self.process.poll()
if self.returncode is not None:
if self.returncode == 0:
Path("{0.filepath}.nccopy.tmp".format(self)).rename(self.filepath)
finished = True
logger.debug(
"deflating {0.filepath} finished "
"with return code {0.returncode}".format(self)
)
return finished
def deflate(filepaths, max_concurrent_jobs):
"""Deflate variables in each of the netCDF files in filepaths using
Lempel-Ziv compression.
Converts files to netCDF-4 format.
The deflated file replaces the original file.
:param sequence filepaths: Paths/names of files to be deflated.
:param int max_concurrent_jobs: Maximum number of concurrent deflation
processes allowed.
"""
logger.info(
"Deflating in up to {} concurrent sub-processes".format(
int(max_concurrent_jobs)
)
)
jobs = [DeflateJob(fp) for fp in filepaths if fp.exists()]
jobs_in_progress = _launch_initial_jobs(jobs, max_concurrent_jobs)
while jobs or jobs_in_progress:
time.sleep(1)
_poll_and_launch(jobs, jobs_in_progress)
def _launch_initial_jobs(jobs, max_concurrent_jobs):
jobs_in_progress = {}
for process in range(int(max_concurrent_jobs)):
try:
job = jobs.pop(0)
except IndexError:
break
else:
job.start()
jobs_in_progress[job.pid] = job
return jobs_in_progress
def _poll_and_launch(jobs, jobs_in_progress):
for running_job in jobs_in_progress.copy().values():
if running_job.done:
result, _ = running_job.process.communicate()
logger.error(result) if result else logger.info(
"netCDF4 deflated {.filepath}".format(running_job)
)
jobs_in_progress.pop(running_job.pid)
try:
job = jobs.pop(0)
except IndexError:
continue
else:
job.start()
jobs_in_progress[job.pid] = job
| 2.09375 | 2 |
src/UQpy/distributions/collection/__init__.py | SURGroup/UncertaintyQuantification | 0 | 9965 | """distributions module."""
from UQpy.distributions.collection.Beta import Beta
from UQpy.distributions.collection.Binomial import Binomial
from UQpy.distributions.collection.Cauchy import Cauchy
from UQpy.distributions.collection.ChiSquare import ChiSquare
from UQpy.distributions.collection.Exponential import Exponential
from UQpy.distributions.collection.Gamma import Gamma
from UQpy.distributions.collection.GeneralizedExtreme import GeneralizedExtreme
from UQpy.distributions.collection.InverseGaussian import InverseGauss
from UQpy.distributions.collection.Laplace import Laplace
from UQpy.distributions.collection.Levy import Levy
from UQpy.distributions.collection.Logistic import Logistic
from UQpy.distributions.collection.Lognormal import Lognormal
from UQpy.distributions.collection.Maxwell import Maxwell
from UQpy.distributions.collection.Multinomial import Multinomial
from UQpy.distributions.collection.MultivariateNormal import MultivariateNormal
from UQpy.distributions.collection.Normal import Normal
from UQpy.distributions.collection.Pareto import Pareto
from UQpy.distributions.collection.Poisson import Poisson
from UQpy.distributions.collection.Rayleigh import Rayleigh
from UQpy.distributions.collection.TruncatedNormal import TruncatedNormal
from UQpy.distributions.collection.Uniform import Uniform
from UQpy.distributions.collection.JointIndependent import JointIndependent
from UQpy.distributions.collection.JointCopula import JointCopula
| 1.148438 | 1 |
Learning Python/Exercise Files/Ch2/helloworld_my.py | RomanShevtsiv/linkedin-learning | 0 | 9966 | #
# Example file for HelloWorld
#
def main():
print("Hello World")
name = input("What is your name? ")
print("Nice to meet you,", name)
if __name__ == "__main__":
main()
| 3.5625 | 4 |
RocketSimulation.py | pietrotrope/SolarSystemSimulation | 0 | 9967 | <filename>RocketSimulation.py<gh_stars>0
import sys
import csv
import json
import math
import pygame
import numpy as np
from pygame.locals import *
import pandas as pd
from data import *
from agent import agentsList, Agent
global screenSize
screenSize = [1920, 1080]
def load_parameters(path):
package = []
file = open(path, 'r')
j = json.load(file)
for subgroup in j.values():
package.append([cast(x) for x in subgroup.values()])
env_variables = package.pop(4)
file.close()
return (package, env_variables)
def cast(x):
try:
return float(x)
except Exception:
return str(x)
class Environment:
def __init__(self, vars):
# Environmental Constants
self.elev, self.t, self.g, self.M_air, self.R, self.gamma, self.P_zero = vars # noqa
self.g_zero = self.g
self.Re = 6356766
# Layer base altitudes
self.hb = [0, 11000, 20000, 32000, 47000, 51000, 71000]
# Layer base pressures
self.Pb = [101325, 22632.1, 5474.89,
868.019, 110.906, 66.9389, 3.95642]
# Layer base temperatures
self.Tb = [288.15, 216.65, 216.65, 228.65, 270.65, 270.65, 214.65]
# Layer lapse rates
self.Lm = [-0.0065, 0.0, 0.001, 0.0028, 0.0, -0.0028, -0.002]
def get_geopotential_altitude(self, z: float) -> float:
return self.Re*z / (self.Re+z)
def atmo_heterosphere_equ(self, z: float, a, b, c, d, e):
z_km = z/1000
return math.exp(a * z_km**4 + b * z_km**3 + c * z_km**2 + d * z_km + e) # noqa
def get_gravity(self, z: float) -> float:
return self.g_zero * (self.Re / (self.Re + z))**2
def get_temp(self, z: float, h: float) -> float:
if h <= 84852:
for i in range(len(self.hb)-1):
if self.hb[i] <= h <= self.hb[i+1]:
return (self.Tb[i] + self.Lm[i]*(h-self.hb[i]), i)
return (self.Tb[i+1] + self.Lm[i+1]*(h-self.hb[i+1]), i+1)
elif 86000 < z <= 91000:
return (186.87, 7)
elif 91000 < z <= 110000:
if 91000 < z <= 100000:
layer = 8
elif 100000 < z <= 110000:
layer = 9
return (
263.1905 - 76.3232 * math.sqrt(1 - ((z - 91000) / -19942.9)**2), # noqa
layer
)
elif 110000 < z <= 120000:
return (240 + 0.012 * (z - 110000), 10)
elif 120000 < z <= 1000000:
if 120000 < z <= 150000:
layer = 11
elif 150000 < z <= 200000:
layer = 12
elif 200000 < z <= 300000:
layer = 13
elif 300000 < z <= 500000:
layer = 14
elif 500000 < z <= 750000:
layer = 15
elif 750000 < z <= 1000000:
layer = 16
xi = (z - 120000) * (6356766 + 120000) / (6356766 + z)
return (1000 - 640 * math.exp(-0.00001875 * xi), layer)
def get_pressure(self, z: float, h: float, T: float, b: int) -> float:
if b <= 6:
if self.Lm[b] != 0:
return self.Pb[b] * (self.Tb[b]/T)**(self.g_zero*self.M_air/(self.R*self.Lm[b])) # noqa
else:
return self.Pb[b] * math.exp(-self.g_zero * self.M_air * (h-self.hb[b]) / (self.R*self.Tb[b])) # noqa
elif b == 7:
return self.atmo_heterosphere_equ(
z, 0.000000, 2.159582e-6, -4.836957e-4, -0.1425192, 13.47530)
elif b == 8:
return self.atmo_heterosphere_equ(
z, 0.000000, 3.304895e-5, -0.009062730, 0.6516698, -11.03037)
elif b == 9:
return self.atmo_heterosphere_equ(
z, 0.000000, 6.693926e-5, -0.01945388, 1.719080, -47.75030)
elif b == 10:
return self.atmo_heterosphere_equ(
z, 0.000000, -6.539316e-5, 0.02485568, -3.223620, 135.9355)
elif b == 11:
return self.atmo_heterosphere_equ(
z, 2.283506e-7, -1.343221e-4, 0.02999016, -3.055446, 113.5764)
elif b == 12:
return self.atmo_heterosphere_equ(
z, 1.209434e-8, -9.692458e-6, 0.003002041, -0.4523015, 19.19151)
elif b == 13:
return self.atmo_heterosphere_equ(
z, 8.113942e-10, -9.822568e-7, 4.687616e-4, -0.1231710, 3.067409)
elif b == 14:
return self.atmo_heterosphere_equ(
z, 9.814674e-11, -1.654439e-7, 1.148115e-4, -0.05431334, -2.011365)
elif b == 15:
return self.atmo_heterosphere_equ(
z, -7.835161e-11, 1.964589e-7, -1.657213e-4, 0.04305869, -14.77132)
elif b == 16:
return self.atmo_heterosphere_equ(
z, 2.813255e-11, -1.120689e-7, 1.695568e-4, -0.1188941, 14.56718)
def get_density(self, z: float, P: float, T: float, b) -> float:
if b <= 6:
return (P * self.M_air)/(self.R * T)
elif b == 7:
return self.atmo_heterosphere_equ(
z, 0.000000, -3.322622E-06, 9.111460E-04, -0.2609971, 5.944694)
elif b == 8:
return self.atmo_heterosphere_equ(
z, 0.000000, 2.873405e-05, -0.008492037, 0.6541179, -23.62010)
elif b == 9:
return self.atmo_heterosphere_equ(
z, -1.240774e-05, 0.005162063, -0.8048342, 55.55996, -1443.338)
elif b == 10:
return self.atmo_heterosphere_equ(
z, 0.00000, -8.854164e-05, 0.03373254, -4.390837, 176.5294)
elif b == 11:
return self.atmo_heterosphere_equ(
z, 3.661771e-07, -2.154344e-04, 0.04809214, -4.884744, 172.3597)
elif b == 12:
return self.atmo_heterosphere_equ(
z, 1.906032e-08, -1.527799E-05, 0.004724294, -0.6992340, 20.50921)
elif b == 13:
return self.atmo_heterosphere_equ(
z, 1.199282e-09, -1.451051e-06, 6.910474e-04, -0.1736220, -5.321644)
elif b == 14:
return self.atmo_heterosphere_equ(
z, 1.140564e-10, -2.130756e-07, 1.570762e-04, -0.07029296, -12.89844)
elif b == 15:
return self.atmo_heterosphere_equ(
z, 8.105631e-12, -2.358417e-09, -2.635110e-06, -0.01562608, -20.02246)
elif b == 16:
return self.atmo_heterosphere_equ(
z, -3.701195e-12, -8.608611e-09, 5.118829e-05, -0.06600998, -6.137674)
def get_c(self, T: float) -> float:
return math.sqrt((self.gamma * self.R * T) / self.M_air)
def get_status(self, z: float):
h = round(self.get_geopotential_altitude(z), 0)
self.g = self.get_gravity(z)
self.T, b = self.get_temp(z, h)
self.P = self.get_pressure(z, h, self.T, b)
self.Rho = self.get_density(z, self.P, self.T, b)
self.c = self.get_c(self.T)
class System:
def __init__(self, params, env, burn_time: float):
package = params
print(package)
# Environment
self.env = env
# Burn time
self.num_steps = int(burn_time // self.env.t)
self.burn_time = self.num_steps * self.env.t
# Engine specs
self.etype = package[0][0]
package[0].pop(0)
if self.etype == "Liquid":
self.isp, self.thrust = package[0]
elif self.etype == "Solid":
self.isp, self.avg_thrust, path = package[0] # noqa
with(open(path)) as f:
csv_reader = csv.reader(f)
self.thrust_curve = {}
for row in csv_reader:
self.thrust_curve.update({
float(row[0]): float(row[1])
})
f.close()
# Fuel Specs
if self.etype == "Liquid":
self.OFratio, self.Reserve = package[1]
elif self.etype == "Solid":
self.OFratio = 0
self.Reserve = package[1][0]
# Flow Rate
if self.etype == "Liquid":
self.w = (self.thrust/self.env.g_zero)/self.isp
elif self.etype == "Solid":
self.w = (self.avg_thrust/self.env.g_zero)/self.isp
self.dF = self.w * (1 / (self.OFratio + 1))
self.dOx = (self.w - self.dF)
# Fuel & Oxidizer
self.F = (self.dF * self.burn_time)/(1 - self.Reserve/100)
self.Ox = (self.dOx * self.burn_time)/(1 - self.Reserve/100)
# Mass
self.dry_mass = package[2][0]
# Aerodynamics
self.Cd, self.cross_section = package[3]
# Output
self.csvout = package[4][0]
self.field_names = ["t", "thrust", "drag", "m", "v", "mach", "a", "altitude",
"asl", "twr", "max_v", "max_mach", "max_acc", "min_acc", "max_g", "min_g"]
with open(self.csvout, "w", newline="") as f:
csv_writer = csv.writer(f)
csv_writer.writerow(self.field_names)
f.close()
# Flight
def launch(self):
"""Runs a simulation within the given parameters."""
# Variables setup
self.t = 0
self.altitude = 0
self.asl = self.altitude + self.env.elev
self.calc_mass()
self.env.get_status(self.asl)
self.calc_thrust()
self.calc_twr()
self.drag = 0
self.v = 0
self.max_v = 0
self.mach = 0
self.max_mach = 0
self.max_acc = 0
self.max_g = 0
self.min_acc = 0
self.min_g = 0
self.a = 0
self.j = 0
self.s = 0
# Used by matplotlib
self.data = [[], [], [], [], [], [], [], [], [], [], []]
# Accelaration phase
for i in range(self.num_steps):
# Output management
self.add_data()
# Environment-related
self.update_env()
# Thrust-related
self.calc_thrust()
# Accelaration/derivative-related
self.calc_acc()
self.calc_additional_derivatives()
# Position-related
self.set_altitude()
# Velocity-related
self.calc_velocity()
# Force-related
self.calc_drag()
self.calc_twr()
# Mass-related
self.calc_propellant()
self.calc_mass()
# Time-related
self.t += self.env.t
if self.a > self.max_acc:
self.max_acc = self.a
self.max_g = self.max_acc/self.env.g
if self.v > self.max_v:
self.max_v = self.v
self.max_mach = self.mach
self.thrust = 0
# Deceleration phase
while self.v > 0:
# Output management
self.add_data()
# Environment-related
self.update_env()
# Accelaration/derivative-related
self.calc_acc()
self.calc_additional_derivatives()
# Position-related
self.set_altitude()
# Velocity-related
self.calc_velocity()
# Force-related
self.calc_drag()
self.calc_twr()
# Mass-related
self.calc_mass()
# Time-related
self.t += self.env.t
if self.a < self.min_acc:
self.min_acc = self.a
self.min_g = self.min_acc/self.env.g
self.output("max_v", "max_mach", "max_acc",
"min_acc", "max_g", "min_g")
def suicide_burn(self):
"""Run a suicide burn simulation, will affct ascent simulation."""
self.Vt = math.sqrt((2 * self.m * self.env.g) / (self.env.Rho * self.cross_section * self.Cd)) # noqa
# Mass
def calc_mass(self):
self.propellant_mass = (self.Ox + self.F)
self.m = self.propellant_mass + self.dry_mass
def calc_propellant(self):
if self.etype == "Liquid":
self.w = (self.thrust/self.env.g_zero)/self.isp
elif self.etype == "Solid":
self.w = (self.avg_thrust/self.env.g_zero)/self.isp
self.dF = self.w * (1/(self.OFratio+1))
self.dOx = (self.w - self.dF)
self.Ox -= self.dOx * self.env.t
self.F -= self.dF * self.env.t
# Position
def set_altitude(self):
self.altitude += self.v * self.env.t + (self.a * self.env.t**2)/2 # noqa
self.asl = self.altitude + self.env.elev
# Derivatives of position
def calc_velocity(self):
self.v += self.a * self.env.t
self.mach = self.v/self.env.c
def calc_acc(self):
self.a = (self.thrust - (self.m * self.env.g + self.drag)) / self.m
def calc_additional_derivatives(self):
self.j = (self.a - self.data[4][-1]) / self.env.t
self.s = (self.j - self.data[5][-1]) / self.env.t
# Forces
def calc_thrust(self):
if self.etype == "Liquid":
pass
elif self.etype == "Solid":
self.thrust = self.thrust_curve[round(self.t, 3)]
def calc_drag(self):
self.drag = 0.5 * (self.env.Rho * self.v**2 * self.Cd * self.cross_section) # noqa
def calc_twr(self):
self.twr = self.thrust / (self.m * self.env.g)
# Environment
def update_env(self):
self.env.get_status(self.asl)
# Ouput
def output(self, *args):
values = []
for field in self.field_names:
value = str(round(eval(field, self.__dict__), 5))
values.append(value)
with open(self.csvout, "a", newline="") as f:
csv_writer = csv.writer(f)
csv_writer.writerow(values)
f.close()
def add_data(self):
self.data[0].append(self.t)
self.data[1].append(self.altitude)
self.data[2].append(self.v)
self.data[3].append(self.env.c)
self.data[4].append(self.a)
self.data[5].append(self.j)
self.data[6].append(self.s)
self.data[7].append(self.drag)
self.output("t", "thrust", "drag", "m", "v",
"mach", "a", "altitude", "asl", "twr")
def run_simulation(burn_time):
params = load_parameters("RocketSimulationData/info.json")
env = Environment(params[1])
s = System(params[0], env, burn_time)
s.launch()
def renderAgents(screen, res, ratio):
screen.fill((0, 0, 0))
pygame.draw.rect(screen, (0, 0, 255), (0, 1080-108, 1920, 108))
pos = screenSize[1]-158 - res["altitude"]*ratio
# print("altitude: "+str(res["altitude"])+", pos: "+str(pos))
pygame.draw.rect(screen, (255, 255, 255), (940, pos, 20, 50))
pygame.display.update()
def simulateRocket(screen):
run_simulation(150)
df = pd.read_csv('RocketSimulationData/Flight.csv')
result = df.to_dict("index")
ratio = screenSize[1]/1000000
interestingPoint = None
for res in result:
# print("time: "+str(result[res]["t"])+" Altitude: "+str(result[res]["altitude"]))
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
renderAgents(screen, result[res], ratio)
if result[res]["altitude"] < 800000:
interestingPoint = result[res]
pygame.display.update()
return interestingPoint
| 2.625 | 3 |
sourcecode/GAN/FID/__init__.py | toufeeqahamedns/GeneratingHumanFaces | 0 | 9968 | """ Package has implementation for the FID score calculation
"""
from GAN.FID import fid_score
from GAN.FID import inception
| 1.203125 | 1 |
flask_monitoringdashboard/test/core/profiler/util/test_stringhash.py | timgates42/Flask-MonitoringDashboard | 3 | 9969 | import unittest
from flask_monitoringdashboard.core.profiler.util.stringHash import StringHash
class TestStringHash(unittest.TestCase):
def test_stringhash(self):
string_hash = StringHash()
self.assertEqual(string_hash.hash('abc'), 0)
self.assertEqual(string_hash.hash('def'), 1)
self.assertEqual(string_hash.hash('abc'), 0)
def test_unhash(self):
string_hash = StringHash()
self.assertEqual(string_hash.unhash(string_hash.hash('abc')), 'abc')
self.assertRaises(ValueError, string_hash.unhash, 'unknown')
| 3.03125 | 3 |
news/views.py | valch85/newssite | 0 | 9970 | from django.shortcuts import render, get_object_or_404
from .models import News
# Create your views here.
def index(request):
latest_news_list = News.objects.order_by('-pub_date')[:10]
context = {'latest_news_list': latest_news_list}
return render(request, 'news/index.html', context)
def detail(request, news_id):
new = get_object_or_404(News, pk=news_id)
return render(request, 'news/detail.html', {'new': new})
| 2 | 2 |
benchmark/generate_libs/jamplus.py | chadaustin/ibb | 4 | 9971 | #!/usr/bin/python
import os.path
import cppcodebase
import random
def CreateLibJamfile(lib_number, classes):
os.chdir(cppcodebase.lib_name(lib_number))
handle = file("Jamfile.jam", "w")
handle.write ("SubDir TOP lib_" + str(lib_number) + " ;\n\n")
handle.write ("SubDirHdrs $(INCLUDES) ;\n\n")
handle.write ("Library lib_" + str(lib_number) + " :\n")
for i in xrange(classes):
handle.write(' class_' + str(i) + '.cpp\n')
handle.write (' ;\n')
os.chdir('..')
def CreateFullJamfile(libs):
handle = file("Jamfile.jam", "w")
handle.write ("SubDir TOP ;\n\n")
for i in xrange(libs):
handle.write('SubInclude TOP ' + cppcodebase.lib_name(i) + ' ;\n')
handle.write('\nWorkspace GeneratedLibs :\n')
for i in xrange(libs):
handle.write('\t\t' + cppcodebase.lib_name(i) + '\n')
handle.write(';\n')
handle = file("Jamrules.jam", "w")
handle.write ('INCLUDES = $(TOP) ;\n')
def CreateCodebase(libs, classes, internal_includes, external_includes):
cppcodebase.SetDir('jamplus')
cppcodebase.CreateSetOfLibraries(libs, classes, internal_includes, external_includes, CreateLibJamfile)
CreateFullJamfile(libs)
os.chdir('..')
| 2.46875 | 2 |
inbm/dispatcher-agent/dispatcher/fota/fota.py | intel/intel-inb-manageability | 5 | 9972 | """
FOTA update tool which is called from the dispatcher during installation
Copyright (C) 2017-2022 Intel Corporation
SPDX-License-Identifier: Apache-2.0
"""
import logging
import os
import platform
from threading import Timer
from typing import Any, Optional, Mapping
from future.moves.urllib.parse import urlparse
from inbm_common_lib.exceptions import UrlSecurityException
from inbm_common_lib.utility import canonicalize_uri
from inbm_common_lib.constants import REMOTE_SOURCE
from .constants import *
from .fota_error import FotaError
from .manifest import parse_tool_options, parse_guid, parse_hold_reboot_flag
from .os_factory import OsFactory, OsType
from ..common import dispatcher_state
from ..common.result_constants import *
from ..constants import UMASK_OTA
from ..dispatcher_callbacks import DispatcherCallbacks
from ..dispatcher_exception import DispatcherException
from ..downloader import download
from ..packagemanager.local_repo import DirectoryRepo
logger = logging.getLogger(__name__)
class FOTA:
"""AKA FOTA Tool
An instance of this class will be called from the
dispatcher if the requested type of update is FOTA
"""
def __init__(self,
parsed_manifest: Mapping[str, Optional[Any]],
repo_type: str,
dispatcher_callbacks: DispatcherCallbacks) -> None:
"""Base class constructor for variable assignment, to send telemetry info and create a new
directory if no repo is present
@param parsed_manifest: Parsed parameters from manifest
@param repo_type: OTA source location -> local or remote
@param dispatcher_callbacks: DispatcherCallbacks instance
"""
logger.debug(f"parsed_manifest: {parsed_manifest}")
self._ota_element = parsed_manifest.get('resource')
logger.debug(f"ota_element: {self._ota_element}")
self._dispatcher_callbacks = dispatcher_callbacks
self._uri: Optional[str] = parsed_manifest['uri']
self._repo_type = repo_type
repo_path: Optional[str]
"""If repo_type=local, then use path and not URI"""
if self._repo_type == REMOTE_SOURCE:
if not self._uri:
raise FotaError("missing URI.")
else:
self._pkg_filename = os.path.basename(urlparse(self._uri).path)
repo_path = None
else:
if self._ota_element is None or 'path' not in self._ota_element:
raise FotaError('attempting to use local repo for FOTA but no path specified')
self._pkg_filename = os.path.basename(self._ota_element['path'])
path = self._ota_element.get('path', None)
logger.debug(f"path: {path}")
if path is None:
repo_path = None
else:
repo_path = os.path.dirname(path)
logger.debug(f"repo_path: {repo_path}")
self.__signature = parsed_manifest['signature']
self._hash_algorithm = parsed_manifest['hash_algorithm']
self._username = parsed_manifest['username']
self._password = parsed_manifest['password']
if self._dispatcher_callbacks is None:
raise FotaError("dispatcher_callbacks not specified in FOTA constructor")
self._dispatcher_callbacks.broker_core.telemetry("Firmware Update Tool launched")
if repo_path:
logger.debug("Using manifest specified repo path")
self._repo = DirectoryRepo(repo_path)
else:
logger.debug("Using default repo path")
self._repo = DirectoryRepo(CACHE)
def install(self) -> Result:
"""checks current platform versions and then issues download
and install. Performs clean() in failure conditions
@return: (Result) containing status code and message
"""
logger.debug("")
return_message: Result = Result()
hold_reboot = False
try:
factory = OsFactory.get_factory(
self._verify_os_supported(), self._ota_element, self._dispatcher_callbacks)
bios_vendor, platform_product = factory.create_upgrade_checker().check()
if self._repo_type.lower() == REMOTE_SOURCE:
# need to perform this check here because some FOTA commands don't have a URI -- see constructor
# (instead they have a path)
if self._uri is None:
raise FotaError(
"internal error: _uri uninitialized in Fota.install with download requested in manifest")
uri = canonicalize_uri(self._uri)
download(dispatcher_callbacks=self._dispatcher_callbacks,
uri=uri,
repo=self._repo,
umask=UMASK_OTA,
username=self._username,
password=self._password)
else:
logger.debug("Skipping FOTA upgradable check for local repo")
if self._ota_element is None:
raise FotaError("missing ota_element")
tool_options = parse_tool_options(self._ota_element)
logger.debug(f"tool_options: {tool_options}")
guid = parse_guid(self._ota_element)
logger.debug(f"guid: {guid}")
hold_reboot = parse_hold_reboot_flag(self._ota_element)
logger.debug(f"holdReboot: {hold_reboot}; pkg_filename: {self._pkg_filename}")
factory.create_installer(self._repo, FOTA_CONF_PATH, FOTA_CONF_SCHEMA_LOC).\
install(guid=guid,
tool_options=tool_options,
pkg_filename=self._pkg_filename,
signature=self.__signature,
hash_algorithm=self._hash_algorithm,
bios_vendor=bios_vendor,
platform_product=platform_product)
def trigger_reboot() -> None:
"""This method triggers a reboot."""
factory.create_rebooter().reboot()
if not hold_reboot:
logger.debug("")
state = {'restart_reason': "fota"}
dispatcher_state.write_dispatcher_state_to_state_file(state)
time_to_trigger_reboot = Timer(0.1, trigger_reboot)
time_to_trigger_reboot.start()
return_message = COMMAND_SUCCESS
else:
status = 'Reboot on hold after Firmware update...'
state = {'restart_reason': "pota"}
dispatcher_state.write_dispatcher_state_to_state_file(state)
logger.debug(status)
self._dispatcher_callbacks.broker_core.telemetry(status)
except (DispatcherException, FotaError, UrlSecurityException, ValueError, FileNotFoundError) as e:
error = 'Firmware Update Aborted: ' + str(e)
logger.error(error)
self._dispatcher_callbacks.broker_core.telemetry(error)
return_message = INSTALL_FAILURE
self._repo.delete(self._pkg_filename)
# In POTA, mender file needs to be deleted also.
if hold_reboot:
self._repo.delete_all()
finally:
if return_message == COMMAND_SUCCESS:
status = 'Firmware update in process...'
else:
status = 'Firmware Update Aborted'
dispatcher_state.clear_dispatcher_state()
logger.debug('Firmware update status: ' + status)
self._dispatcher_callbacks.broker_core.telemetry(status)
return return_message
@staticmethod
def _verify_os_supported():
"""checks if the current OS is supported.
@return True if OS is supported; otherwise, false.
@raise ValueError Unsupported OS
"""
logger.debug("")
os_type = platform.system()
logger.debug(f"os_type: {os_type}")
if os_type in OsType.__members__:
return os_type
else:
logger.error("Unsupported OS type.")
raise ValueError('Unsupported OS type.')
def check(self) -> None:
"""validate the manifest before FOTA"""
logger.debug("")
factory = OsFactory.get_factory(
self._verify_os_supported(), self._ota_element, self._dispatcher_callbacks)
factory.create_upgrade_checker().check()
| 1.734375 | 2 |
syncflux.py | nagylzs/syncflux | 0 | 9973 | <reponame>nagylzs/syncflux
import copy
import datetime
import sys
import os
import time
import argparse
import traceback
import pytz
import syncthing
from influxdb import InfluxDBClient
import yaml
from yaml2dataclass import Schema, SchemaPath
from typing import Optional, Dict, Type, List
from dataclasses import dataclass, asdict, field
@dataclass
class SyncthingConfiguration(Schema):
name: str
api_key: str
host: str = 'localhost'
port: int = field(default=8384)
timeout: float = field(default=10.0)
is_https: bool = field(default=False)
ssl_cert_file: Optional[str] = field(default=None)
tags: Optional[List[str]] = field(default_factory=lambda: [])
def get_client_params(self):
result = asdict(self)
if "name" in result:
del result["name"]
if "tags" in result:
del result["tags"]
return result
@dataclass
class InfluxDbConfiguration(Schema):
host: str
port: int # Common ports: 443
ssl: bool
verify_ssl: bool
database: str
username: str
password: <PASSWORD>
def get_client_params(self):
result = asdict(self)
if "tags" in result:
del result["tags"]
return result
@dataclass
class MeasurementConfiguration(Schema):
devices: str
folders: str
@dataclass
class AppConfiguration(Schema):
syncthings: Dict[str, SyncthingConfiguration]
influxes: Dict[str, InfluxDbConfiguration]
measurements: MeasurementConfiguration
@classmethod
def _load_dict(cls, props_dict, dest_cls: Type[Schema], add_name: bool = False):
result = {}
for name, value in props_dict.items():
arguments = {}
arguments.update(value)
if add_name:
arguments["name"] = name
result[name] = dest_cls.scm_load_from_dict(arguments)
return result
@classmethod
def scm_convert(cls, values: dict, path: SchemaPath):
values["syncthings"] = cls._load_dict(values["syncthings"], SyncthingConfiguration, True)
values["influxes"] = cls._load_dict(values["influxes"], InfluxDbConfiguration)
return values
def load_app_config(stream) -> AppConfiguration:
"""Load application configuration from a stream."""
obj = yaml.safe_load(stream)
return AppConfiguration.scm_load_from_dict(obj)
def error(message: str):
sys.stderr.write("\nerror: " + message + "\n")
sys.stderr.flush()
raise SystemExit(-1)
def info(*values):
if not args.silent:
print(*values)
def main():
# Collect data
points = []
for sync in config.syncthings.values():
info(" Connect syncthing %s" % sync.name)
proto_tags = {"cfg_name": sync.name}
if sync.tags:
proto_tags.update(sync.tags)
conn_args = sync.get_client_params()
q_started = time.time()
conn = syncthing.Syncthing(**conn_args)
now = datetime.datetime.now(tz=pytz.UTC)
sync_cfg = conn.system.config()
# My own device id
my_device = sync_cfg["defaults"]["folder"]["devices"][0]
my_id = my_device["deviceID"]
proto_tags["my_id"] = my_id
# Collect device stats
device_stats = conn.stats.device()
# List all remote devices
remote_devices = []
for device in sync_cfg["devices"]:
device_id = device["deviceID"]
if device_id == my_id:
proto_tags["my_name"] = device["name"]
else:
stats = device_stats[device_id]
last_seen = syncthing.parse_datetime(stats["lastSeen"])
last_seen_since = now - last_seen
remote_devices.append({
"tags": {
"id": device["deviceID"], # Device ID
"name": device["name"], # Device Name
},
"fields": {
"last_seen_since_sec": last_seen_since.total_seconds(), # Number of seconds last seen
}
})
# Folders
folders = []
for folder in sync_cfg["folders"]:
# Get completion for my own device
completion = conn.database.completion(my_id, folder["id"])
folders.append({
"tags": {"id": folder["id"], "label": folder["label"], "path": folder["path"]},
"fields": {"completion": completion},
})
q_elapsed = time.time() - q_started
proto_fields = {"q_elapsed": q_elapsed}
# Create data points for devices
for device in remote_devices:
tags = copy.copy(proto_tags)
tags.update(device["tags"])
fields = copy.copy(proto_fields)
fields.update(device["fields"])
point = dict(measurement=config.measurements.devices, tags=tags, fields=fields)
points.append(point)
# Create points for folders
for folder in folders:
tags = copy.copy(proto_tags)
tags.update(folder["tags"])
fields = copy.copy(proto_fields)
fields.update(folder["fields"])
point = dict(measurement=config.measurements.folders, tags=tags, fields=fields)
points.append(point)
if not points:
return
for influx_name, influx in config.influxes.items():
info(" Sending %d point(s) to influxdb %s" % (len(points), influx_name))
try:
influx = config.influxes[influx_name]
client = InfluxDBClient(**asdict(influx))
client.write_points(points)
except:
if args.halt_on_send_error:
raise
else:
traceback.print_exc(file=sys.stderr)
parser = argparse.ArgumentParser(description='Monitor your Syncthing instances with influxdb.')
parser.add_argument('-c', "--config", dest="config", default=None,
help="Configuration file for application. Default is syncflux.yml. "
"See syncflux_example.yml for an example.")
parser.add_argument("--config-dir", dest="config_dir", default=None,
help="Configuration directory. All config files with .yml extension will be processed one by one.")
parser.add_argument('-n', "--count", dest="count", default=1, type=int,
help="Number of test runs. Default is one. Use -1 to run indefinitely.")
parser.add_argument('-w', "--wait", dest="wait", default=60, type=float,
help="Number of seconds between test runs.")
parser.add_argument("-s", "--silent", dest='silent', action="store_true", default=False,
help="Supress all messages except errors.")
parser.add_argument("-v", "--verbose", dest='verbose', action="store_true", default=False,
help="Be verbose."
)
parser.add_argument("--halt-on-send-error", dest="halt_on_send_error", default=False, action="store_true",
help="Halt when cannot send data to influxdb. The default is to ignore the error.")
args = parser.parse_args()
if args.silent and args.verbose:
parser.error("Cannot use --silent and --verbose at the same time.")
if args.config is None:
args.config = "syncflux.yml"
if (args.config is not None) and (args.config_dir is not None):
parser.error("You must give either --config or --config-dir (exactly one of them)")
if args.count == 0:
parser.error("Test run count cannot be zero.")
if args.wait <= 0:
parser.error("Wait time must be positive.")
if args.config:
config_files = [args.config]
else:
config_files = []
for file_name in sorted(os.listdir(args.config_dir)):
ext = os.path.splitext(file_name)[1]
if ext.lower() == ".yml":
fpath = os.path.join(args.config_dir, file_name)
config_files.append(fpath)
index = 0
while args.count < 0 or index < args.count:
if args.count != 1:
info("Pass #%d started" % (index + 1))
started = time.time()
for config_file in config_files:
if not os.path.isfile(config_file):
parser.error("Cannot open %s" % config_file)
config = load_app_config(open(config_file, "r"))
main()
elapsed = time.time() - started
index += 1
last_one = (args.count > 0) and (index == args.count)
if not last_one:
remaining = args.wait - elapsed
if remaining > 0:
if not args.silent:
info("Pass #%d elapsed %.2f sec, waiting %.2f sec for next." % (index, elapsed, remaining))
time.sleep(args.wait)
else:
info("Pass #%d elapsed %.2f sec" % (index, elapsed))
info("")
| 2.3125 | 2 |
tests/input/pdf/test_pdf.py | asweeney86/preview-generator | 0 | 9974 | <filename>tests/input/pdf/test_pdf.py<gh_stars>0
# -*- coding: utf-8 -*-
import os
import re
import shutil
import typing
from PIL import Image
from PyPDF2 import PdfFileReader
import PyPDF2.utils
import pytest
from preview_generator.exception import UnavailablePreviewType
from preview_generator.manager import PreviewManager
from tests import test_utils
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
CACHE_DIR = "/tmp/preview-generator-tests/cache"
PDF_FILE_PATH = os.path.join(CURRENT_DIR, "the_pdf.pdf")
PDF_FILE_PATH__ENCRYPTED = os.path.join(CURRENT_DIR, "the_pdf.encrypted.pdf")
PDF_FILE_PATH__A4 = os.path.join(CURRENT_DIR, "qpdfconvert.pdf")
def setup_function(function: typing.Callable) -> None:
shutil.rmtree(CACHE_DIR, ignore_errors=True)
def test_to_jpeg() -> None:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_jpeg_preview(file_path=PDF_FILE_PATH) is True
path_to_file = manager.get_jpeg_preview(
file_path=PDF_FILE_PATH, height=512, width=321, force=True
)
assert os.path.exists(path_to_file) is True
assert os.path.getsize(path_to_file) > 0
assert re.match(test_utils.CACHE_FILE_PATH_PATTERN__JPEG, path_to_file)
with Image.open(path_to_file) as jpeg:
assert jpeg.height in range(453, 455)
assert jpeg.width == 321
def test_to_jpeg__encrypted_pdf() -> None:
with pytest.raises(PyPDF2.utils.PdfReadError): # ensure file is encrpyted
pdf = PdfFileReader(PDF_FILE_PATH__ENCRYPTED)
pdf.getPage(0)
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_jpeg_preview(file_path=PDF_FILE_PATH) is True
path_to_file = manager.get_jpeg_preview(
file_path=PDF_FILE_PATH__ENCRYPTED, height=512, width=321, force=True
)
assert os.path.exists(path_to_file) is True
assert os.path.getsize(path_to_file) > 0
assert re.match(test_utils.CACHE_FILE_PATH_PATTERN__JPEG, path_to_file)
with Image.open(path_to_file) as jpeg:
assert jpeg.height in range(453, 455)
assert jpeg.width == 321
def test_to_jpeg_no_size() -> None:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_jpeg_preview(file_path=PDF_FILE_PATH) is True
path_to_file = manager.get_jpeg_preview(file_path=PDF_FILE_PATH, force=True)
assert os.path.exists(path_to_file) is True
assert os.path.getsize(path_to_file) > 0
assert re.match(test_utils.CACHE_FILE_PATH_PATTERN__JPEG, path_to_file)
with Image.open(path_to_file) as jpeg:
assert jpeg.height == 256
assert jpeg.width in range(180, 182)
def test_to_text() -> None:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_text_preview(file_path=PDF_FILE_PATH) is False
with pytest.raises(UnavailablePreviewType):
manager.get_text_preview(file_path=PDF_FILE_PATH, force=True)
def test_to_json() -> None:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_json_preview(file_path=PDF_FILE_PATH) is True
manager.get_json_preview(file_path=PDF_FILE_PATH, force=True)
# TODO - G.M - 2018-11-06 - To be completed
def test_to_pdf() -> None:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_pdf_preview(file_path=PDF_FILE_PATH) is True
manager.get_pdf_preview(file_path=PDF_FILE_PATH, force=True)
# TODO - G.M - 2018-11-06 - To be completed
def test_to_pdf_one_page() -> None:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_pdf_preview(file_path=PDF_FILE_PATH) is True
path_0 = manager.get_pdf_preview(file_path=PDF_FILE_PATH, page=0, force=True)
assert os.path.exists(path_0) is True
assert os.path.getsize(path_0) > 1000 # verify if the size of the pdf refer to a normal content
assert re.match(test_utils.CACHE_FILE_PATH_PATTERN_WITH_PAGE__PDF, path_0)
pdf = PdfFileReader(open(path_0, "rb"))
assert pdf.getNumPages() == 1
path_1 = manager.get_pdf_preview(file_path=PDF_FILE_PATH, page=1, force=True)
assert os.path.exists(path_1) is True
assert os.path.getsize(path_1) > 1000 # verify if the size of the pdf refer to a normal content
assert re.match(test_utils.CACHE_FILE_PATH_PATTERN_WITH_PAGE__PDF, path_1)
pdf = PdfFileReader(open(path_1, "rb"))
assert pdf.getNumPages() == 1
def test_algorithm4() -> None:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_jpeg_preview(file_path=PDF_FILE_PATH__A4) is True
path_to_file = manager.get_jpeg_preview(file_path=PDF_FILE_PATH__A4, force=True)
with Image.open(path_to_file) as jpeg:
assert jpeg.height == 256
assert jpeg.width in range(180, 182)
def test_get_nb_page() -> None:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
nb_page = manager.get_page_nb(file_path=PDF_FILE_PATH)
assert nb_page == 2
nb_page = manager.get_page_nb(file_path=PDF_FILE_PATH__ENCRYPTED)
assert nb_page == 2
nb_page = manager.get_page_nb(file_path=PDF_FILE_PATH__A4)
assert nb_page == 2
| 2.390625 | 2 |
ipt/ipt_filter_contour_by_size.py | tpmp-inra/ipapi | 1 | 9975 | from ipso_phen.ipapi.base.ipt_abstract import IptBase
from ipso_phen.ipapi.tools import regions
import numpy as np
import cv2
import logging
logger = logging.getLogger(__name__)
from ipso_phen.ipapi.base import ip_common as ipc
class IptFilterContourBySize(IptBase):
def build_params(self):
self.add_enabled_checkbox()
self.add_spin_box(
name="min_threshold",
desc="Lower bound limit",
default_value=0,
minimum=0,
maximum=100000000,
hint="Only contours bigger than lower limit bound will be kept",
)
self.add_spin_box(
name="max_threshold",
desc="Upper bound limit",
default_value=100000000,
minimum=0,
maximum=100000000,
hint="Only contours smaller than lower limit bound will be kept",
)
self.add_roi_selector()
def process_wrapper(self, **kwargs):
"""
Filter contour by size:
'Keep or descard contours according to their size
Real time: False
Keyword Arguments (in parentheses, argument name):
* Activate tool (enabled): Toggle whether or not tool is active
* Lower bound limit (min_threshold): Only contours bigger than lower limit bound will be kept
* Upper bound limit (max_threshold): Only contours smaller than lower limit bound will be kept
* Name of ROI to be used (roi_names): Operation will only be applied inside of ROI
* ROI selection mode (roi_selection_mode):
"""
wrapper = self.init_wrapper(**kwargs)
if wrapper is None:
return False
res = False
try:
if self.get_value_of("enabled") == 1:
mask = self.get_mask()
if mask is None:
logger.error(f"FAIL {self.name}: mask must be initialized")
return
lt, ut = self.get_value_of("min_threshold"), self.get_value_of(
"max_threshold"
)
# Get source contours
contours = [
c
for c in ipc.get_contours(
mask=mask,
retrieve_mode=cv2.RETR_LIST,
method=cv2.CHAIN_APPROX_SIMPLE,
)
if cv2.contourArea(c, True) < 0
]
contours.sort(key=lambda x: cv2.contourArea(x), reverse=True)
colors = ipc.build_color_steps(step_count=len(contours))
dbg_img = np.dstack(
(np.zeros_like(mask), np.zeros_like(mask), np.zeros_like(mask))
)
for clr, cnt in zip(colors, contours):
cv2.drawContours(dbg_img, [cnt], 0, clr, -1)
dbg_img = np.dstack(
(
cv2.bitwise_and(dbg_img[:, :, 0], mask),
cv2.bitwise_and(dbg_img[:, :, 1], mask),
cv2.bitwise_and(dbg_img[:, :, 2], mask),
)
)
wrapper.store_image(
image=dbg_img,
text="all_contours",
)
fnt = (cv2.FONT_HERSHEY_SIMPLEX, 0.6)
for cnt in contours:
area_ = cv2.contourArea(cnt)
x, y, w, h = cv2.boundingRect(cnt)
x += w // 2 - 10
y += h // 2
if area_ > 0:
cv2.putText(
dbg_img,
f"{area_}",
(x, y),
fnt[0],
fnt[1],
(255, 255, 255),
2,
)
wrapper.store_image(
image=dbg_img,
text="all_contours_with_sizes",
)
dbg_img = np.dstack(
(np.zeros_like(mask), np.zeros_like(mask), np.zeros_like(mask))
)
out_mask = np.zeros_like(mask)
# Discarded contours
size_cnts = np.dstack(
(np.zeros_like(mask), np.zeros_like(mask), np.zeros_like(mask))
)
for cnt in contours:
area_ = cv2.contourArea(cnt)
if area_ < lt:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_RED, -1)
elif area_ > ut:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_BLUE, -1)
else:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_WHITE, -1)
wrapper.store_image(image=size_cnts, text="cnts_by_size")
# Discarded contours
size_cnts = np.dstack(
(np.zeros_like(mask), np.zeros_like(mask), np.zeros_like(mask))
)
for cnt in sorted(
contours, key=lambda x: cv2.contourArea(x), reverse=True
):
area_ = cv2.contourArea(cnt)
if area_ < lt:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_RED, -1)
elif area_ > ut:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_BLUE, -1)
else:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_WHITE, -1)
wrapper.store_image(image=size_cnts, text="cnts_by_size_reversed")
for cnt in contours:
area_ = cv2.contourArea(cnt)
if not (lt < area_ < ut):
cv2.drawContours(dbg_img, [cnt], 0, ipc.C_RED, -1)
# Discarded contours borders
for cnt in contours:
area_ = cv2.contourArea(cnt)
if not (lt < area_ < ut):
cv2.drawContours(dbg_img, [cnt], 0, ipc.C_MAROON, 4)
# Kept contours
for cnt in contours:
area_ = cv2.contourArea(cnt)
if lt < area_ < ut:
cv2.drawContours(out_mask, [cnt], 0, 255, -1)
cv2.drawContours(dbg_img, [cnt], 0, ipc.C_GREEN, -1)
else:
cv2.drawContours(out_mask, [cnt], 0, 0, -1)
cv2.drawContours(dbg_img, [cnt], 0, ipc.C_RED, -1)
dbg_img = np.dstack(
(
cv2.bitwise_and(dbg_img[:, :, 0], mask),
cv2.bitwise_and(dbg_img[:, :, 1], mask),
cv2.bitwise_and(dbg_img[:, :, 2], mask),
)
)
# Discarded sizes
for cnt in contours:
area_ = cv2.contourArea(cnt)
if not (lt < area_ < ut):
x, y, w, h = cv2.boundingRect(cnt)
x += w // 2 - 10
y += h // 2
cv2.putText(
dbg_img,
f"{area_}",
(x, y),
fnt[0],
fnt[1],
ipc.C_RED,
thickness=2,
)
# Kept sizes
for cnt in contours:
area_ = cv2.contourArea(cnt)
if lt < area_ < ut:
x, y, w, h = cv2.boundingRect(cnt)
x += w // 2 - 10
y += h // 2
cv2.putText(
dbg_img,
f"{area_}",
(x, y),
fnt[0],
fnt[1],
ipc.C_LIME,
thickness=2,
)
out_mask = cv2.bitwise_and(
out_mask,
mask,
)
# Apply ROIs if needed
rois = self.get_ipt_roi(
wrapper=wrapper,
roi_names=self.get_value_of("roi_names").replace(" ", "").split(","),
selection_mode=self.get_value_of("roi_selection_mode"),
)
if rois:
untouched_mask = regions.delete_rois(rois=rois, image=self.get_mask())
self.result = cv2.bitwise_or(
untouched_mask, regions.keep_rois(rois=rois, image=out_mask)
)
self.demo_image = cv2.bitwise_or(
dbg_img,
np.dstack((untouched_mask, untouched_mask, untouched_mask)),
)
else:
self.result = out_mask
self.demo_image = dbg_img
wrapper.store_image(image=self.result, text="filtered_contours")
wrapper.store_image(image=self.demo_image, text="tagged_contours")
res = True
else:
wrapper.store_image(wrapper.current_image, "current_image")
res = True
except Exception as e:
res = False
logger.exception(f"Filter contour by size FAILED, exception: {repr(e)}")
else:
pass
finally:
return res
@property
def name(self):
return "Filter contour by size"
@property
def package(self):
return "TPMP"
@property
def real_time(self):
return False
@property
def result_name(self):
return "mask"
@property
def output_kind(self):
return "mask"
@property
def use_case(self):
return [ipc.ToolFamily.MASK_CLEANUP]
@property
def description(self):
return """'Keep or descard contours according to their size"""
| 2.328125 | 2 |
tests/main.py | deeso/json-search-replace | 1 | 9976 | from wrapper_tests.upsert_test import *
from wrapper_tests.upsertvaluedict_test import *
import os
import logging
import sys
import argparse
import signal
logging.getLogger().setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s - %(name)s] %(message)s')
ch.setFormatter(formatter)
logging.getLogger().addHandler(ch)
parser = argparse.ArgumentParser(
description='Unit testing for fiery snap.')
parser.add_argument('-config', type=str, default=None,
help='toml config for keys and such, see key.toml')
if __name__ == '__main__':
unittest.main()
os.kill(os.getpid(), signal.SIGKILL)
| 2.265625 | 2 |
Sending_email/email.py | Satyam-Bhalla/Python-Scripts | 8 | 9977 | <reponame>Satyam-Bhalla/Python-Scripts
import smtplib
gmail_user = 'your email'
gmail_password = '<PASSWORD>'
sent_from = gmail_user
to = ['reciever email'] #Create a list for all the recievers
subject = 'OMG Super Important Message'
body = 'Hey, what\'s up?\n- You'
email_text = """\
From: %s
To: %s
Subject: %s
%s
""" % (sent_from, ", ".join(to), subject, body)
try:
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.ehlo()
server.login(gmail_user, gmail_password)
server.sendmail(sent_from, to, email_text)
server.close()
print('Email sent!')
except Exception as e:
print(e)
| 2.6875 | 3 |
FlaskDaemon/load_test.py | caffeinate/test-pylot | 0 | 9978 | <filename>FlaskDaemon/load_test.py
'''
Created on 11 Sep 2015
@author: si
'''
import json
import random
import time
from threading import Thread
# import urllib
import urllib2
from Queue import Queue
import logging
logger = logging.getLogger(__name__)
API_URL = "http://127.0.0.1:5000/"
class LoadTest(object):
"""
Create a single process with one thread per test user.
"""
def __init__(self, test_users_count, requests_per_user):
"""
@param test_users_count: int
@param requests_per_user: int
"""
self.thread_table = []
self.test_users_count = test_users_count
self.requests_per_user = requests_per_user
self.stats = { 'return_codes' : {},
'requests_made' : 0,
'total_seconds_waiting' : 0.0
}
self.stats_q = Queue(0)
def go(self):
start_time = time.time()
msg = "%s test users with %s requests each..." % \
(self.test_users_count, self.requests_per_user)
self.logger(msg)
for i in range(self.test_users_count):
p = TestUser(i, self.requests_per_user, self.stats_q)
p.start()
self.thread_table.append(p)
end_time = time.time()
self.logger("time taken to create threads : %s" % (end_time-start_time,))
start_time = time.time()
# wait for threads to complete
while True:
alive_count = len(self.thread_table)
# could time.sleep(0.5) or just wait for all threads to finish
for p in self.thread_table:
if not p.is_alive():
alive_count -= 1
else:
p.join()
if alive_count == 0:
break
#print "alive:%s" % alive_count
end_time = time.time()
time_taken = end_time-start_time
self.logger("finished. Time taken : %s" % time_taken)
while not self.stats_q.empty():
user_stats = self.stats_q.get()
for http_status, count in user_stats['return_codes'].iteritems():
if http_status not in self.stats['return_codes']:
self.stats['return_codes'][http_status] = 0
self.stats['return_codes'][http_status] += count
self.stats['requests_made'] += user_stats['requests_made']
self.stats['total_seconds_waiting'] += user_stats['total_seconds_waiting']
print self.stats
# time_taken is real time not CPU
req_per_sec = float(self.stats['requests_made'])/time_taken
print "Requests per second: %s" % req_per_sec
def logger(self, msg):
logger.info(msg)
print msg
class TestUser(Thread):
"""
Act like a user. Bit over simplified at the moment.
"""
def __init__(self, user_id, requests_count, stats_queue):
super(TestUser, self).__init__()
self.remaining_request = requests_count
self.base_url = API_URL
self.stats_queue = stats_queue
self.user_id = user_id
def logger(self, msg):
logger.info(msg)
#print msg
def run(self):
"""
@return: dictionary of stats to be collected by main process
"""
stats = { 'return_codes' : {},
'requests_made': self.remaining_request,
'total_seconds_waiting' : 0.0, # waiting for requests
}
while self.remaining_request > 0:
# sleep for average of half a second
time.sleep(random.random())
start_time = time.time()
# for POST
#raw = {}
#d = json.dumps(raw)
#h = {'Content-type': 'application/json'}
#req = urllib2.Request(self.base_url, data=d, headers=h)
# for GET
req = urllib2.Request(self.base_url)
f = urllib2.urlopen(req)
end_time = time.time()
d = end_time-start_time
stats['total_seconds_waiting'] += d
http_status = f.getcode()
if http_status not in stats['return_codes']:
stats['return_codes'][http_status] = 0
stats['return_codes'][http_status] += 1
self.remaining_request -= 1
self.logger("Thread %s finished: %s" % (self.user_id, stats))
self.stats_queue.put(stats, False)
if __name__ == '__main__':
l = LoadTest(10,30)
l.go()
| 2.609375 | 3 |
urls.py | jeylani99/Real-Estate | 0 | 9979 | <reponame>jeylani99/Real-Estate
from django.contrib import admin
from django.conf.urls import include,url
from .import views
urlpatterns = [
url(r'^$', views.IndexView.as_view(),name='index'),
#homeapp_detail_view_url
url(r'^(?P<pk>[0-9]+)/$',views.LocationView.as_view(),name='property'),
#homeapp/detailview/moredetailview
url(r'^([0-9]+)/(?P<pk>[0-9]+)/$',views.PropertyView.as_view(),name='propertyview'),
]
| 1.859375 | 2 |
main.py | ngh3053/auto_spacing_with_tensorflow | 0 | 9980 | <filename>main.py
from utils import *
from model import Model2
if __name__ == '__main__':
train_data = DataLoader('../data/trainX.txt', '../data/trainY.txt')
test_data = DataLoader('../data/testX.txt', '../data/testY.txt')
train_data.set_batch(100)
test_data.set_batch(100)
char_dic = CharDic([train_data])
model = Model2(train_data=train_data,
test_data=test_data,
char_dic=char_dic,
model_name='bilstm_crf_n3_e300_h2002')
model.train()
model.test() | 2.234375 | 2 |
src/sardana/taurus/qt/qtgui/extra_macroexecutor/macrodescriptionviewer.py | marc2332/sardana | 43 | 9981 | #!/usr/bin/env python
##############################################################################
##
# This file is part of Sardana
##
# http://www.sardana-controls.org/
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Sardana is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Sardana is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Sardana. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
"""
macrodescriptionviewer.py:
"""
import taurus.core
from taurus.external.qt import Qt
from taurus.qt.qtgui.base import TaurusBaseWidget
class TaurusMacroDescriptionViewer(Qt.QTextEdit, TaurusBaseWidget):
__pyqtSignals__ = ("modelChanged(const QString &)",)
def __init__(self, parent=None, designMode=False):
name = "TaurusMacroDescriptionView"
self.call__init__wo_kw(Qt.QTextEdit, parent)
self.call__init__(TaurusBaseWidget, name)
self.setReadOnly(True)
self.setFont(Qt.QFont("Courier", 9))
def defineStyle(self):
""" Defines the initial style for the widget """
self.updateStyle()
def getModelClass(self):
return taurus.core.taurusdevice.TaurusDevice
def updateStyle(self):
self.update()
def onMacroNameChanged(self, macroName):
"""Can be connected to an event emitted after macro name was changed.
As an argument receives macroName and ask BaseMacroServer object
about already prepared and stored in MacroInfoObj object macro description"""
macroServer = self.getModelObj()
if macroServer is None or macroName is None or macroName == "":
self.setText("")
return
self.setText(str(macroServer.getMacroInfoObj(macroName).doc))
def getFormatedToolTip(self, cache=True):
"""This method was overridden to get rid of the default tooltip of TaurusWidget"""
return ""
model = Qt.pyqtProperty("QString",
TaurusBaseWidget.getModel,
TaurusBaseWidget.setModel,
TaurusBaseWidget.resetModel)
useParentModel = Qt.pyqtProperty("bool",
TaurusBaseWidget.getUseParentModel,
TaurusBaseWidget.setUseParentModel,
TaurusBaseWidget.resetUseParentModel)
def test():
import sys
from sardana.taurus.core.tango.sardana.macroserver import registerExtensions
registerExtensions()
app = Qt.QApplication(sys.argv)
taurusMacroDescriptionView = TaurusMacroDescriptionViewer(designMode=1)
if len(sys.argv) != 2:
taurusMacroDescriptionView.setModel("macroserver/zreszela/1")
else:
taurusMacroDescriptionView.setModel(sys.argv[1])
taurusMacroDescriptionView.onMacroChanged("mv")
taurusMacroDescriptionView.show()
sys.exit(app.exec_())
if __name__ == "__main__":
test()
| 1.710938 | 2 |
torch/metrics/accuracy_score.py | LilDataScientist/PyTorch-From-Scratch | 0 | 9982 | <gh_stars>0
import numpy as np
def accuracy_score(y_true, y_pred):
a = np.argmax(y_true, axis=1)
b = np.argmax(y_pred, axis=1)
return np.count_nonzero(a == b) / y_true.shape[0]
| 2.328125 | 2 |
asaas/financial_transactions.py | marlonjsilva/asaas_sdk_python | 0 | 9983 | from asaas.typing import SyncAsync
from typing import Any, Optional, Dict
class FinancialTransactions:
def __init__(self, parent: Any) -> None:
self.parent = parent
def list(
self, query: Optional[Dict[Any, Any]] = None, **kwars: Any
) -> SyncAsync[Any]:
return self.parent.request(
path="/financialTransactions",
method="GET",
query=query,
auth=kwars.get("auth"),
)
| 2.234375 | 2 |
datahub/core/serializers.py | uktrade/data-hub-api | 6 | 9984 | from functools import partial
from uuid import UUID
from dateutil.parser import parse as dateutil_parse
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from rest_framework.fields import ReadOnlyField, UUIDField
from datahub.core.constants import Country as CountryEnum
from datahub.core.validate_utils import DataCombiner
from datahub.core.validators import InRule, OperatorRule, RulesBasedValidator, ValidationRule
from datahub.metadata.models import AdministrativeArea, Country
MAX_LENGTH = settings.CHAR_FIELD_MAX_LENGTH
class ConstantModelSerializer(serializers.Serializer):
"""Constant models serializer."""
id = serializers.ReadOnlyField()
name = serializers.ReadOnlyField()
disabled_on = serializers.ReadOnlyField()
class PermittedFieldsModelSerializer(serializers.ModelSerializer):
"""Lets you get permitted fields only.
Needs 'permissions' attribute on Meta class in following format:
permissions = {
'app_name.permission': 'field'
}
If user doesn't have required permission, corresponding field will be filtered out.
Note: The current implementation does not allow access to the field if request.user is None.
"""
def get_fields(self):
"""Gets filtered dictionary of fields based on permissions."""
assert hasattr(self.Meta, 'permissions'), (
'Class {serializer_class} missing "Meta.permissions" attribute'.format(
serializer_class=self.__class__.__name__,
)
)
fields = super().get_fields()
request = self.context.get('request', None)
if request:
permissions = self.Meta.permissions
for permission, field in permissions.items():
if not request.user or not request.user.has_perm(permission):
del fields[field]
return fields
class NestedRelatedField(serializers.RelatedField):
"""DRF serialiser field for foreign keys and many-to-many fields.
Serialises as a dict with 'id' plus other specified keys.
"""
default_error_messages = {
'required': 'This field is required.',
'missing_pk': 'pk not provided.',
'does_not_exist': 'Invalid pk "{pk_value}" - object does not exist.',
'incorrect_type': 'Incorrect type. Expected object, received {'
'data_type}.',
}
def __init__(self, model, extra_fields=('name',), **kwargs):
"""Initialises the related field.
:param model: Model of the related field.
:param extra_fields: List of extra fields to include in the representation.
Can contain field names as strings or as tuples of
(field name, DRF field).
E.g. ['field1', ('field2', CharField())]
:param kwargs: Keyword arguments to pass to
RelatedField.__init__()
"""
super().__init__(**kwargs)
model_class = (apps.get_model(model) if isinstance(model, str) else
model)
self.pk_field = UUIDField()
self._fields = [
field if isinstance(field, tuple) else (field, ReadOnlyField())
for field in extra_fields
]
self._model = model_class
def get_queryset(self):
"""Returns the queryset corresponding to the model."""
return self._model.objects.all()
def to_internal_value(self, data):
"""Converts a user-provided value to a model instance."""
try:
if isinstance(data, (str, UUID)):
id_repr = data
else:
id_repr = data['id']
data = self.pk_field.to_internal_value(id_repr)
return self.get_queryset().get(pk=data)
except ObjectDoesNotExist:
self.fail('does_not_exist', pk_value=data)
except KeyError:
self.fail('missing_pk')
except (TypeError, ValueError):
self.fail('incorrect_type', data_type=type(data).__name__)
def to_representation(self, value):
"""Converts a model instance to a dict representation."""
if not value:
return value
extra = {
field_name: field.to_representation(getattr(value, field_name))
for field_name, field in self._fields
}
return {
**extra,
'id': self.pk_field.to_representation(value.pk),
}
def get_choices(self, cutoff=None):
"""Returns choices for DRF UI.
Standard implementation uses a dict, but that doesn't work as our
representation isn't hashable.
"""
queryset = self.get_queryset()
if queryset is None:
return ()
if cutoff is not None:
queryset = queryset[:cutoff]
return _Choices(
(
self.pk_field.to_representation(item.pk),
self.display_value(item),
)
for item in queryset
)
RelaxedDateField = partial(serializers.DateField, input_formats=('iso-8601', '%Y/%m/%d'))
class RelaxedDateTimeField(serializers.Field):
"""
Relaxed DateTime field.
Front end uses free text field for data filters, that's why
we need to accept date/datetime in various different formats.
DRF DateTimeField doesn't offer that flexibility.
"""
default_error_messages = {
'invalid': 'Date is in incorrect format.',
}
def to_internal_value(self, data):
"""Parses data into datetime."""
try:
data = dateutil_parse(data)
except ValueError:
self.fail('invalid', value=data)
return data
def to_representation(self, value):
"""Formats the datetime using a normal DateTimeField."""
repr_field = serializers.DateTimeField()
return repr_field.to_representation(value)
class RelaxedURLField(serializers.URLField):
"""URLField subclass that prepends http:// to input and output when a scheme is not present."""
def to_internal_value(self, data):
"""Converts a user-provided value to an internal value."""
return super().to_internal_value(self._fix_missing_url_scheme(data))
def to_representation(self, value):
"""Converts a stored value to the external representation."""
return super().to_representation(self._fix_missing_url_scheme(value))
@staticmethod
def _fix_missing_url_scheme(value):
if value and '://' not in value:
return f'http://{value}'
return value
class _Choices:
"""Wrapper for choices to make them compatible with DRF."""
def __init__(self, choices):
self._choices = choices
def items(self):
"""Returns the choices."""
return self._choices
class AddressSerializer(serializers.ModelSerializer):
"""
ModelSerializer that can be used to simulate nested address objects.
E.g.
Model:
class MultiAddressModel(models.Model):
primary_address_1 = models.CharField(max_length=MAX_LENGTH)
primary_address_2 = models.CharField(max_length=MAX_LENGTH, blank=True)
primary_address_town = models.CharField(max_length=MAX_LENGTH)
primary_address_county = models.CharField(max_length=MAX_LENGTH, blank=True)
primary_address_country = models.ForeignKey(
Country, on_delete=models.PROTECT, related_name='+',
)
primary_address_postcode = models.CharField(max_length=MAX_LENGTH, blank=True)
secondary_address_1 = models.CharField(max_length=MAX_LENGTH, blank=True)
secondary_address_2 = models.CharField(max_length=MAX_LENGTH, blank=True, null=True)
secondary_address_town = models.CharField(max_length=MAX_LENGTH, blank=True)
secondary_address_county = models.CharField(max_length=MAX_LENGTH, blank=True)
secondary_address_country = models.ForeignKey(
Country, null=True, on_delete=models.SET_NULL, related_name='+',
)
secondary_address_postcode = models.CharField(max_length=MAX_LENGTH, blank=True)
Serializer:
class MultiAddressModelSerializer(serializers.ModelSerializer):
primary_address = AddressSerializer(
source_model=MultiAddressModel,
address_source_prefix='primary_address',
)
secondary_address = AddressSerializer(
source_model=MultiAddressModel,
address_source_prefix='secondary_address',
required=False,
allow_null=True,
)
class Meta:
model = MultiAddressModel
fields = ['primary_address', 'secondary_address']
Will produce the following API response:
{
'primary_address': {
'line_1': '2',
'line_2': '',
'town': 'London',
'county': '',
'postcode': '',
'country': {
'id': '80756b9a-5d95-e211-a939-e4115bead28a',
'name': 'United Kingdom',
},
},
'secondary_address': {
'line_1': '1',
'line_2': '',
'town': 'Muckamore',
'county': '',
'postcode': '',
'country': {
'id': '736a9ab2-5d95-e211-a939-e4115bead28a',
'name': 'Ireland',
},
},
},
Please note:
1. None values for CharFields will be converted to ''
2. If all address field values are blank the nested object in the response will return None
E.g. Fiven the following fields' values:
secondary_address_1=''
secondary_address_2=''
secondary_address_town=''
secondary_address_county=''
secondary_address_postcode=''
secondary_address_country_id=None
The equivalent API response body will be:
'secondary_address': None
The same applies for changing the data.
3. If AddressSerializer has required=False, the validation is triggered only if at least
one of the fields is passed in.
"""
line_1 = serializers.CharField(
max_length=MAX_LENGTH,
allow_blank=True,
required=False,
default='',
source='{source_prefix}_1',
)
line_2 = serializers.CharField(
max_length=MAX_LENGTH,
allow_blank=True,
required=False,
default='',
source='{source_prefix}_2',
)
town = serializers.CharField(
max_length=MAX_LENGTH,
allow_blank=True,
required=False,
default='',
source='{source_prefix}_town',
)
county = serializers.CharField(
max_length=MAX_LENGTH,
allow_blank=True,
required=False,
default='',
source='{source_prefix}_county',
)
postcode = serializers.CharField(
max_length=MAX_LENGTH,
allow_blank=True,
required=False,
default='',
source='{source_prefix}_postcode',
)
area = NestedRelatedField(
AdministrativeArea,
allow_null=True,
required=False,
source='{source_prefix}_area',
)
country = NestedRelatedField(
Country,
allow_null=True,
required=False,
source='{source_prefix}_country',
)
REQUIRED_FIELDS = (
'line_1',
'town',
'country',
)
def __init__(
self, source_model, *args,
address_source_prefix='address', area_can_be_required=False,
postcode_can_be_required=False, **kwargs,
):
"""
Initialises the serializer.
It populates all necessary parts (e.g. Meta model, source, fields' source).
"""
# Define a custom Meta so that the Meta model can be specified as an argument
class MultiAddressMeta(self.Meta):
model = source_model
self.Meta = MultiAddressMeta
kwargs.setdefault('source', '*')
super().__init__(*args, **kwargs)
# populate fields' source
for field in self.fields.values():
field.source = field.source.format(source_prefix=address_source_prefix)
field.source_attrs = field.source.split('.')
self.area_can_be_required = area_can_be_required
self.postcode_can_be_required = postcode_can_be_required
self.address_source_prefix = address_source_prefix
def add_area_validator(self, validators):
"""
Mark area as required for US and Canadian companies.
"""
validators.append(
RulesBasedValidator(
ValidationRule(
'required',
OperatorRule(f'{self.address_source_prefix}_area', bool),
when=InRule(
f'{self.address_source_prefix}_country',
(
CountryEnum.united_states.value.id,
CountryEnum.canada.value.id,
),
),
),
),
)
def add_postcode_validator(self, validators):
"""
Mark postcode as required for US and Canadian companies.
"""
validators.append(
RulesBasedValidator(
ValidationRule(
'required',
OperatorRule(f'{self.address_source_prefix}_postcode', bool),
when=InRule(
f'{self.address_source_prefix}_country',
(
CountryEnum.united_states.value.id,
CountryEnum.canada.value.id,
),
),
),
),
)
def get_validators(self):
"""
Append ValidationRule for area/postcode depending on feature flag/context
Only mark area/postcode required if country is US/Canada & called from context where area
is safe to require, and if feature flag enabled. Currently the only context where area is
safe to require is CompanySerializer
"""
validators = super().get_validators()
if self.area_can_be_required:
self.add_area_validator(validators)
if self.postcode_can_be_required:
self.add_postcode_validator(validators)
return validators
def run_validation(self, data=serializers.empty):
"""
Converts None to dict with default values so that those values can be used to
reset the fields on the model.
"""
if data or not self.allow_null:
normalised_data = data
else:
normalised_data = {
field_name: None if (field.default == serializers.empty) else field.default
for field_name, field in self.fields.items()
}
return super().run_validation(data=normalised_data)
def to_representation(self, value):
"""
It returns None if none of the address values is set.
E.g.
{
'address': None
}
instead of
{
'address': {
'line_1': '',
'line_2': '',
'town': '',
'county': '',
'postcode': '',
'country': None
}
}
"""
address_dict = super().to_representation(value)
if not any(address_dict.values()):
return None
# for each address field, replace None with default if possible
for field_name, value in address_dict.items():
field_default = self.fields[field_name].default
if value is None and field_default is not serializers.empty:
address_dict[field_name] = field_default
return address_dict
def should_validate(self, data_combiner):
"""
Returns true if the data should be validated.
"""
if self.required:
return True
return any(
data_combiner.get_value(field.source)
for field in self.fields.values()
)
def validate(self, attrs):
"""
Validates the data if necessary.
This is needed because some addresses only need to be validated
if they are passed in.
"""
validated_data = super().validate(attrs)
data_combiner = DataCombiner(self.parent.instance, validated_data)
if self.should_validate(data_combiner):
errors = {}
for field_name in self.REQUIRED_FIELDS:
field = self.fields[field_name]
value = data_combiner.get_value(field.source)
if not value:
errors[field_name] = self.error_messages['required']
if errors:
raise ValidationError(errors)
return validated_data
class Meta:
"""Meta options."""
model = None
fields = (
'line_1',
'line_2',
'town',
'county',
'postcode',
'area',
'country',
)
| 2.125 | 2 |
samples/s07-rigid-objects/main.py | nomadsinteractive/ark | 5 | 9985 | <reponame>nomadsinteractive/ark<filename>samples/s07-rigid-objects/main.py
import math
import random
from ark import dear_imgui, ApplicationFacade, Arena, Event, Integer, Collider, RenderObject, Size, Camera, Vec3, Numeric
class Application:
def __init__(self, application: ApplicationFacade):
self._down_x = 0
self._down_y = 0
self._application = application
self._light_position = Vec3(100, 500, 0)
self._resource_loader = self._application.create_resource_loader('main.xml')
self._arena = self._resource_loader.load(Arena, 'main', c=self._application.camera, lp=self._light_position)
self._application.arena = self._arena
self._arena.add_event_listener(self.on_event)
self._imgui = self._arena.resource_loader.refs.imgui
self._world_box2d = self._resource_loader.load(Collider, 'b2World')
self._world_bullet = self._resource_loader.load(Collider, 'btWorld')
self._l001 = self._resource_loader.layers.l001
self._l003 = self._resource_loader.layers.l003
self._shape_id = Integer(0)
self._collider_id = Integer(0)
self._body_size = Numeric(50)
self._body_ttl = Numeric(5)
self._rigid_body_ground_b2 = self._world_box2d.create_body(Collider.BODY_TYPE_STATIC, Collider.BODY_SHAPE_BOX, (4.8, 0), Size(6.0, 1.0))
self._rigid_body_ground_bt = self._world_bullet.create_body(Collider.BODY_TYPE_STATIC, Collider.BODY_SHAPE_BOX, (480, 0, 0), Size(600, 100, 600))
self._shapes = [Collider.BODY_SHAPE_BALL, Collider.BODY_SHAPE_BOX, 3]
@property
def imgui(self):
return self._imgui
def on_event(self, event):
action = event.action
if action == Event.ACTION_DOWN:
(self._down_x, self._down_y) = event.xy
elif action == Event.ACTION_UP:
if abs(event.x - self._down_x) + abs(event.y - self._down_y) < 10:
self.on_click(event)
return True
return False
def on_click(self, event: Event):
shape_id = self._shape_id.val
collider_id = self._collider_id.val
render_object = [self.make_object_box2d, self.make_object_bullet][collider_id](shape_id, event)
self._defer_dispose(render_object)
def make_object_box2d(self, shape_id: int, event: Event) -> RenderObject:
xy = (event.x / 100, event.y / 100)
s = self._body_size / 100
shape = self._shapes[shape_id]
rigid_body = self._world_box2d.create_body(Collider.BODY_TYPE_DYNAMIC, shape, xy, Size(s, s))
render_object = RenderObject(random.randrange(1, 100), None, Size(self._body_size, self._body_size), None)
rigid_body.bind(render_object)
self._l003.add_render_object(render_object)
return render_object
def make_object_bullet(self, shape_id: int, event: Event) -> RenderObject:
xy = event.xy
shape = self._shapes[shape_id]
s = self._body_size.val
s1 = s / [2, 100, 50][shape_id]
rigid_body = self._world_bullet.create_body(Collider.BODY_TYPE_DYNAMIC, shape, xy, Size(s, s, s))
render_object = RenderObject(self._shape_id.val + 1, None, Size(s1, s1, s1))
rigid_body.bind(render_object)
self._l001.add_render_object(render_object)
return render_object
def create_toolbox(self):
builder = dear_imgui.RendererBuilder(self._imgui)
builder.begin('RigidBodies')
builder.text('Which collider engine shall we use?')
builder.radio_button('Box2D', self._collider_id, 0)
builder.same_line()
builder.radio_button('Bullet3', self._collider_id, 1)
builder.separator()
builder.text('Click somewhere to create a RigidBody typed below:')
builder.radio_button('Ball', self._shape_id, 0)
builder.same_line()
builder.radio_button('Box', self._shape_id, 1)
builder.same_line()
builder.radio_button('Duck', self._shape_id, 2)
builder.slider_float('RigidBody size', self._body_size, 10, 100, '%.1f')
builder.slider_float('RigidBody TTL', self._body_ttl, 5, 50, '%.1f')
builder.slider_float3('Light Position', self._light_position, 0, 1000, '%.1f')
builder.end()
self._imgui.add_renderer(builder.build())
@staticmethod
def _make_camera() -> Camera:
e = 500
camera = Camera()
camera.perspective(math.radians(45), 16 / 9, 0.1, 2000)
camera.look_at(Vec3(0, 0, e), Vec3(0, 0, e - 100), Vec3(0, 1, 0))
return camera
def _defer_dispose(self, render_object: RenderObject):
self._application.post(lambda: render_object.dispose(), self._body_ttl.val)
def main(app: Application):
app.create_toolbox()
if __name__ == '__main__':
main(Application(_application))
| 2.125 | 2 |
jamf/models/computer_extension_attribute.py | jensenbox/python-jamf | 1 | 9986 | <reponame>jensenbox/python-jamf
# coding: utf-8
"""
Jamf Pro API
## Overview This is a sample Jamf Pro server which allows for usage without any authentication. The Jamf Pro environment which supports the Try it Out functionality does not run the current beta version of Jamf Pro, thus any newly added endpoints will result in an error and should be used soley for documentation purposes. # noqa: E501
The version of the OpenAPI document: 10.25.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from jamf.configuration import Configuration
class ComputerExtensionAttribute(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'definition_id': 'str',
'name': 'str',
'description': 'str',
'enabled': 'bool',
'multi_value': 'bool',
'values': 'list[str]',
'data_type': 'str',
'options': 'list[str]',
'input_type': 'str'
}
attribute_map = {
'definition_id': 'definitionId',
'name': 'name',
'description': 'description',
'enabled': 'enabled',
'multi_value': 'multiValue',
'values': 'values',
'data_type': 'dataType',
'options': 'options',
'input_type': 'inputType'
}
def __init__(self, definition_id=None, name=None, description=None, enabled=None, multi_value=None, values=None, data_type=None, options=None, input_type=None, local_vars_configuration=None): # noqa: E501
"""ComputerExtensionAttribute - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._definition_id = None
self._name = None
self._description = None
self._enabled = None
self._multi_value = None
self._values = None
self._data_type = None
self._options = None
self._input_type = None
self.discriminator = None
if definition_id is not None:
self.definition_id = definition_id
if name is not None:
self.name = name
self.description = description
if enabled is not None:
self.enabled = enabled
if multi_value is not None:
self.multi_value = multi_value
self.values = values
self.data_type = data_type
self.options = options
self.input_type = input_type
@property
def definition_id(self):
"""Gets the definition_id of this ComputerExtensionAttribute. # noqa: E501
An identifier of extension attribute definition. # noqa: E501
:return: The definition_id of this ComputerExtensionAttribute. # noqa: E501
:rtype: str
"""
return self._definition_id
@definition_id.setter
def definition_id(self, definition_id):
"""Sets the definition_id of this ComputerExtensionAttribute.
An identifier of extension attribute definition. # noqa: E501
:param definition_id: The definition_id of this ComputerExtensionAttribute. # noqa: E501
:type definition_id: str
"""
self._definition_id = definition_id
@property
def name(self):
"""Gets the name of this ComputerExtensionAttribute. # noqa: E501
A human-readable name by which attribute can be referred to. # noqa: E501
:return: The name of this ComputerExtensionAttribute. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ComputerExtensionAttribute.
A human-readable name by which attribute can be referred to. # noqa: E501
:param name: The name of this ComputerExtensionAttribute. # noqa: E501
:type name: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this ComputerExtensionAttribute. # noqa: E501
An additional explanation of exact attribute meaning, possible values, etc. # noqa: E501
:return: The description of this ComputerExtensionAttribute. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this ComputerExtensionAttribute.
An additional explanation of exact attribute meaning, possible values, etc. # noqa: E501
:param description: The description of this ComputerExtensionAttribute. # noqa: E501
:type description: str
"""
self._description = description
@property
def enabled(self):
"""Gets the enabled of this ComputerExtensionAttribute. # noqa: E501
:return: The enabled of this ComputerExtensionAttribute. # noqa: E501
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""Sets the enabled of this ComputerExtensionAttribute.
:param enabled: The enabled of this ComputerExtensionAttribute. # noqa: E501
:type enabled: bool
"""
self._enabled = enabled
@property
def multi_value(self):
"""Gets the multi_value of this ComputerExtensionAttribute. # noqa: E501
:return: The multi_value of this ComputerExtensionAttribute. # noqa: E501
:rtype: bool
"""
return self._multi_value
@multi_value.setter
def multi_value(self, multi_value):
"""Sets the multi_value of this ComputerExtensionAttribute.
:param multi_value: The multi_value of this ComputerExtensionAttribute. # noqa: E501
:type multi_value: bool
"""
self._multi_value = multi_value
@property
def values(self):
"""Gets the values of this ComputerExtensionAttribute. # noqa: E501
A value of extension attribute, in some rare cases there may be multiple values present, hence the array. # noqa: E501
:return: The values of this ComputerExtensionAttribute. # noqa: E501
:rtype: list[str]
"""
return self._values
@values.setter
def values(self, values):
"""Sets the values of this ComputerExtensionAttribute.
A value of extension attribute, in some rare cases there may be multiple values present, hence the array. # noqa: E501
:param values: The values of this ComputerExtensionAttribute. # noqa: E501
:type values: list[str]
"""
self._values = values
@property
def data_type(self):
"""Gets the data_type of this ComputerExtensionAttribute. # noqa: E501
A data type of extension attribute. # noqa: E501
:return: The data_type of this ComputerExtensionAttribute. # noqa: E501
:rtype: str
"""
return self._data_type
@data_type.setter
def data_type(self, data_type):
"""Sets the data_type of this ComputerExtensionAttribute.
A data type of extension attribute. # noqa: E501
:param data_type: The data_type of this ComputerExtensionAttribute. # noqa: E501
:type data_type: str
"""
allowed_values = [None,"STRING", "INTEGER", "DATE_TIME"] # noqa: E501
if self.local_vars_configuration.client_side_validation and data_type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `data_type` ({0}), must be one of {1}" # noqa: E501
.format(data_type, allowed_values)
)
self._data_type = data_type
@property
def options(self):
"""Gets the options of this ComputerExtensionAttribute. # noqa: E501
A closed list of possible values (applies to `popup` input type). # noqa: E501
:return: The options of this ComputerExtensionAttribute. # noqa: E501
:rtype: list[str]
"""
return self._options
@options.setter
def options(self, options):
"""Sets the options of this ComputerExtensionAttribute.
A closed list of possible values (applies to `popup` input type). # noqa: E501
:param options: The options of this ComputerExtensionAttribute. # noqa: E501
:type options: list[str]
"""
self._options = options
@property
def input_type(self):
"""Gets the input_type of this ComputerExtensionAttribute. # noqa: E501
The input method. `text` is most common and means simply free text, `popup` i a closed list of values from which one or many can be selected and `script` value is calculated and can never be set directly. # noqa: E501
:return: The input_type of this ComputerExtensionAttribute. # noqa: E501
:rtype: str
"""
return self._input_type
@input_type.setter
def input_type(self, input_type):
"""Sets the input_type of this ComputerExtensionAttribute.
The input method. `text` is most common and means simply free text, `popup` i a closed list of values from which one or many can be selected and `script` value is calculated and can never be set directly. # noqa: E501
:param input_type: The input_type of this ComputerExtensionAttribute. # noqa: E501
:type input_type: str
"""
allowed_values = [None,"TEXT", "POPUP", "SCRIPT", "LDAP"] # noqa: E501
if self.local_vars_configuration.client_side_validation and input_type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `input_type` ({0}), must be one of {1}" # noqa: E501
.format(input_type, allowed_values)
)
self._input_type = input_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ComputerExtensionAttribute):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ComputerExtensionAttribute):
return True
return self.to_dict() != other.to_dict()
| 1.742188 | 2 |
SSOKeyGen/ssokeygendialog.py | chrcoe/sso-keygen | 0 | 9987 | <reponame>chrcoe/sso-keygen<filename>SSOKeyGen/ssokeygendialog.py<gh_stars>0
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ssokeygendialog.ui'
#
# Created: Sun Feb 1 12:33:36 2015
# by: PyQt5 UI code generator 5.4
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(400, 300)
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setGeometry(QtCore.QRect(30, 240, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.testLabel = QtWidgets.QLabel(Dialog)
self.testLabel.setGeometry(QtCore.QRect(50, 40, 181, 31))
self.testLabel.setObjectName("testLabel")
self.retranslateUi(Dialog)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog Test"))
self.testLabel.setText(_translate("Dialog", "TextLabel"))
| 1.617188 | 2 |
util/n_download_util.py | TwrFyr/n-hen.py | 0 | 9988 | <reponame>TwrFyr/n-hen.py
import urllib.request
import os
from typing import List
from util.n_util import NUser
from util.n_util import get_n_entry
import time
import threading
from util.array_util import slice_array
delay: float = 2.5
class ProgressWrapper:
"""The progress wrapper keeps track of the progress of a operation by wrapping a current number and a total number.
It also wraps an optional function, which uses the current values and has to have the form 'func(current, total)'."""
def __init__(self, start, total, update):
self.current = start
self.total = total
self.update_callback = update
def update(self):
if self.update_callback is not None:
self.update_callback(self.current, self.total)
def download_images(lock, file_url_list: List[str], path: str, progress=None):
for file_url in file_url_list:
filename = os.path.join(path, file_url.split('/')[-1])
print('writing {} to {}'.format(file_url, filename))
urllib.request.urlretrieve(file_url, filename)
if progress is not None:
with lock:
progress.current += 1
progress.update()
def save_files_to_dir(file_url_list: List[str], path: str, update=None, thread_count: int = 1) -> None:
"""Saves all files represented by a list of url resources to the folder specified.
The files are being named after the last part of the url.
The number of threads can be increased to use more threads for the downloading of the images."""
# pretend to be normal user
# opener=urllib.request.build_opener()
# opener.addheaders=[('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36')]
# urllib.request.install_opener(opener)
progress = ProgressWrapper(0, len(file_url_list), update)
progress.update()
if thread_count < 1 or thread_count > 16:
print(f'invalid thread count: {thread_count} not in [1, 16]')
return
else:
lock = threading.Lock()
threads = []
for i in range(thread_count):
slices = slice_array(file_url_list, thread_count)
t = threading.Thread(target=download_images, kwargs=dict(lock=lock, file_url_list=slices[i], path=path,
progress=progress),
daemon=True)
threads.append(t)
t.start()
for t in threads:
t.join()
def download_all_favorites(n_user: NUser, base_dir: str, update_entry=None, update_page=None, thread_count=1) -> None:
"""Downloads all entries favorited by `n_user` using the number of `thread_count` threads."""
print('downloading {}\'s {} favorites...'.format(n_user.username, n_user.fav_count))
current_entry = 1
total_entries = n_user.fav_count
for min_entry in n_user.favorite_list:
if update_entry is not None:
update_entry(current_entry=min_entry, current=current_entry, total=total_entries)
# get entry data
print('downloading entry with id {}'.format(min_entry.n_id))
entry = get_n_entry(min_entry.n_id)
if entry is None:
print('no connection possible, skipping...')
current_entry += 1
continue
# check directory is valid
if not os.path.exists(base_dir):
print('base directory does not exist, aborting...')
break
save_dir = os.path.join(base_dir, entry.digits)
if os.path.exists(save_dir):
print('entry already exists, skipping...')
current_entry += 1
continue
else:
os.mkdir(save_dir)
# download images
save_files_to_dir(entry.image_url_list, save_dir, update=update_page, thread_count=thread_count)
print('waiting for {} seconds...'.format(delay))
time.sleep(delay)
current_entry += 1
if update_entry is not None:
update_entry(current_entry=None, current=current_entry, total=total_entries)
print('download finished')
| 3.109375 | 3 |
stable-baselines/tests/test_deterministic.py | princeton-vl/PackIt | 49 | 9989 | import pytest
from stable_baselines import A2C, ACER, ACKTR, DeepQ, DDPG, PPO1, PPO2, TRPO
from stable_baselines.ddpg import AdaptiveParamNoiseSpec
from stable_baselines.common.identity_env import IdentityEnv, IdentityEnvBox
from stable_baselines.common.vec_env import DummyVecEnv
PARAM_NOISE_DDPG = AdaptiveParamNoiseSpec(initial_stddev=float(0.2), desired_action_stddev=float(0.2))
# Hyperparameters for learning identity for each RL model
LEARN_FUNC_DICT = {
'a2c': lambda e: A2C(policy="MlpPolicy", env=e).learn(total_timesteps=1000),
'acer': lambda e: ACER(policy="MlpPolicy", env=e).learn(total_timesteps=1000),
'acktr': lambda e: ACKTR(policy="MlpPolicy", env=e).learn(total_timesteps=1000),
'deepq': lambda e: DeepQ(policy="MlpPolicy", env=e).learn(total_timesteps=1000),
'ddpg': lambda e: DDPG(policy="MlpPolicy", env=e, param_noise=PARAM_NOISE_DDPG).learn(total_timesteps=1000),
'ppo1': lambda e: PPO1(policy="MlpPolicy", env=e).learn(total_timesteps=1000),
'ppo2': lambda e: PPO2(policy="MlpPolicy", env=e).learn(total_timesteps=1000),
'trpo': lambda e: TRPO(policy="MlpPolicy", env=e).learn(total_timesteps=1000),
}
@pytest.mark.slow
@pytest.mark.parametrize("model_name", ['a2c', 'acer', 'acktr', 'deepq', 'ppo1', 'ppo2', 'trpo'])
def test_identity(model_name):
"""
Test if the algorithm (with a given policy)
can learn an identity transformation (i.e. return observation as an action)
:param model_name: (str) Name of the RL model
"""
env = DummyVecEnv([lambda: IdentityEnv(10)])
model = LEARN_FUNC_DICT[model_name](env)
n_trials = 1000
obs = env.reset()
action_shape = model.predict(obs, deterministic=False)[0].shape
action, _ = model.predict(obs, deterministic=True)
assert action.shape == action_shape
for _ in range(n_trials):
new_action = model.predict(obs, deterministic=True)[0]
assert action == model.predict(obs, deterministic=True)[0]
assert new_action.shape == action_shape
# Free memory
del model, env
@pytest.mark.slow
@pytest.mark.parametrize("model_name", ['a2c', 'ddpg', 'ppo1', 'ppo2', 'trpo'])
def test_identity_continuous(model_name):
"""
Test if the algorithm (with a given policy)
can learn an identity transformation (i.e. return observation as an action)
:param model_name: (str) Name of the RL model
"""
env = DummyVecEnv([lambda: IdentityEnvBox(eps=0.5)])
model = LEARN_FUNC_DICT[model_name](env)
n_trials = 1000
obs = env.reset()
action_shape = model.predict(obs, deterministic=False)[0].shape
action, _ = model.predict(obs, deterministic=True)
assert action.shape == action_shape
for _ in range(n_trials):
new_action = model.predict(obs, deterministic=True)[0]
assert action == model.predict(obs, deterministic=True)[0]
assert new_action.shape == action_shape
| 2.03125 | 2 |
tests/models/tensorflow/convert_to_tensorflow_serving.py | filipecosta90/dlbench | 14 | 9990 | import tensorflow as tf
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
export_dir = './reference/00000002'
graph_pb = './creditcardfraud.pb'
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
with tf.gfile.GFile(graph_pb, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sigs = {}
with tf.Session(graph=tf.Graph()) as sess:
# name="" is important to ensure we don't get spurious prefixing
tf.import_graph_def(graph_def, name="")
g = tf.get_default_graph()
inp1 = g.get_tensor_by_name("transaction:0")
inp2 = g.get_tensor_by_name("reference:0")
out = g.get_tensor_by_name("output:0")
sigs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = \
tf.saved_model.signature_def_utils.predict_signature_def(
{"transaction": inp1, "reference": inp2}, {"output": out})
builder.add_meta_graph_and_variables(sess,
[tag_constants.SERVING],
signature_def_map=sigs)
builder.save()
| 2.34375 | 2 |
pycuda/characterize.py | grlee77/pycuda | 0 | 9991 | <gh_stars>0
from __future__ import division
from __future__ import absolute_import
from pycuda.tools import context_dependent_memoize
import numpy as np
def platform_bits():
return tuple.__itemsize__ * 8
def has_stack():
from pycuda.driver import Context
return Context.get_device().compute_capability() >= (2, 0)
def has_double_support():
from pycuda.driver import Context
return Context.get_device().compute_capability() >= (1, 3)
@context_dependent_memoize
def sizeof(type_name, preamble=""):
from pycuda.compiler import SourceModule
mod = SourceModule("""
%s
extern "C"
__global__ void write_size(size_t *output)
{
*output = sizeof(%s);
}
""" % (preamble, type_name), no_extern_c=True)
import pycuda.gpuarray as gpuarray
output = gpuarray.empty((), dtype=np.uintp)
mod.get_function("write_size")(output, block=(1, 1, 1), grid=(1, 1))
return int(output.get())
| 2.125 | 2 |
bopt/transforms.py | georgedeath/bomean | 2 | 9992 | import torch
from scipy.stats import median_absolute_deviation
class Transform_Base(object):
"""
Base class for transformations based on some data.
"""
def __init__(self, Ytr):
self.Ytr = Ytr
# Transform the mean
def scale_mean(self, mu):
return mu
# Reverse the transformation to the mean
def unscale_mean(self, mu):
return mu
# Reverse the transformation to the variance
def unscale_var(self, var):
return var
class Transform_Standardize(Transform_Base):
"""
Standardize the data
"""
def __init__(self, Ytr):
super().__init__(Ytr)
self.Ytr_mean = Ytr.mean()
self.Ytr_std = Ytr.std()
self.Ytr_var = Ytr.var()
def scale_mean(self, mu):
return (mu - self.Ytr_mean) / self.Ytr_std
def unscale_mean(self, mu):
return mu * self.Ytr_std + self.Ytr_mean
def unscale_var(self, var):
return var * self.Ytr_var
class Transform_StandardizeRobustly(Transform_Base):
"""
Robustly standardize the data by estimating its scale
"""
def __init__(self, Ytr):
super().__init__(Ytr)
self.Ytr_median = Ytr.median()
Ytr_numpy = Ytr.numpy().ravel()
self.Ytr_scale = torch.tensor(median_absolute_deviation(Ytr_numpy))
self.Ytr_scaleSQR = self.Ytr_scale**2
def scale_mean(self, mu):
return (mu - self.Ytr_median) / self.Ytr_scale
def unscale_mean(self, mu):
return mu * self.Ytr_scale + self.Ytr_median
def unscale_var(self, var):
return var * self.Ytr_scaleSQR
| 2.65625 | 3 |
pygs/graphserver/compiler/dedupe.py | abyrd/graphserver | 2 | 9993 | # eliminate duplicate service periods from a GTFS database
from graphserver.ext.gtfs.gtfsdb import GTFSDatabase
import sys
from optparse import OptionParser
def main():
usage = """usage: python dedupe.py <graphdb_filename>"""
parser = OptionParser(usage=usage)
(options, args) = parser.parse_args()
if len(args) != 1:
parser.print_help()
exit(-1)
graphdb_filename = args[0]
gtfsdb = GTFSDatabase( graphdb_filename )
query = """
SELECT count(*), monday, tuesday, wednesday, thursday, friday, saturday, sunday, start_date, end_date
FROM calendar
GROUP BY monday, tuesday, wednesday, thursday, friday, saturday, sunday, start_date, end_date"""
duped_periods = gtfsdb.execute( query )
equivilants = []
for count, m,t,w,th,f,s,su,start_date,end_date in duped_periods:
# no need to check for dupes if there's only one
if count==1:
continue
#print count, m, t, w, th, f, s, su, start_date, end_date
# get service_ids for this dow/start_date/end_date combination
service_ids = [x[0] for x in list( gtfsdb.execute( "SELECT service_id FROM calendar where monday=? and tuesday=? and wednesday=? and thursday=? and friday=? and saturday=? and sunday=? and start_date=? and end_date=?", (m,t,w,th,f,s,su,start_date,end_date) ) ) ]
# group by service periods with the same set of exceptions
exception_set_grouper = {}
for service_id in service_ids:
exception_set = list(gtfsdb.execute( "SELECT date, exception_type FROM calendar_dates WHERE service_id=?", (service_id,) ) )
exception_set.sort()
exception_set = tuple(exception_set)
exception_set_grouper[exception_set] = exception_set_grouper.get(exception_set,[])
exception_set_grouper[exception_set].append( service_id )
# extend list of equivilants
for i, exception_set_group in enumerate( exception_set_grouper.values() ):
equivilants.append( ("%d%d%d%d%d%d%d-%s-%s-%d"%(m,t,w,th,f,s,su,start_date,end_date,i), exception_set_group) )
for new_name, old_names in equivilants:
for old_name in old_names:
print old_name, new_name
c = gtfsdb.conn.cursor()
c.execute( "UPDATE calendar SET service_id=? WHERE service_id=?", (new_name, old_name) )
c.execute( "UPDATE calendar_dates SET service_id=? WHERE service_id=?", (new_name, old_name) )
c.execute( "UPDATE trips SET service_id=? WHERE service_id=?", (new_name, old_name) )
gtfsdb.conn.commit()
c.close()
if __name__=='__main__':
| 2.90625 | 3 |
10/01/03/2.py | pylangstudy/201707 | 0 | 9994 | class MyClass:
def __repr__(self): return self.__class__.__name__ + '()'
print(MyClass().__repr__())
| 3.015625 | 3 |
tests/tasks/core/test_core.py | andykawabata/prefect | 2 | 9995 | import pytest
from prefect.core import Edge, Flow, Parameter, Task
from prefect.tasks.core import collections
from prefect.tasks.core.constants import Constant
from prefect.tasks.core.function import FunctionTask
class IdentityTask(Task):
def run(self, x):
return x
class TestConstant:
def test_constant_task_returns_its_value(self):
x = Constant("x")
assert x.run() == "x"
y = Constant(100)
assert y.run() == 100
def test_automatic_create_constant_task(self):
with Flow(name="test") as flow:
t = Task()
t.set_dependencies(upstream_tasks=[4])
assert len(flow.tasks) == 2
assert any(isinstance(t, Constant) for t in flow.tasks)
class TestFunctionTask:
def test_function_task_requires_callable(self):
with pytest.raises(TypeError):
FunctionTask(fn=1)
def test_function_task_takes_name_from_callable(self):
def my_fn():
pass
f = FunctionTask(fn=my_fn)
assert f.name == "my_fn"
def test_function_task_takes_name_from_arg_if_provided(self):
def my_fn():
pass
f = FunctionTask(fn=my_fn, name="test")
assert f.name == "test"
def test_function_task_docstring(self):
def my_fn():
"""An example docstring."""
pass
# Original docstring available on class
assert "FunctionTask" in FunctionTask.__doc__
# Wrapped function is docstring on instance
f = FunctionTask(fn=my_fn)
assert f.__doc__ == my_fn.__doc__
# Except when no docstring on wrapped function
f = FunctionTask(fn=lambda x: x + 1)
assert "FunctionTask" in f.__doc__
def test_function_task_sets__wrapped__(self):
def my_fn():
"""An example function"""
pass
t = FunctionTask(fn=my_fn)
assert t.__wrapped__ == my_fn
assert not hasattr(FunctionTask, "__wrapped__")
class TestCollections:
def test_list_returns_a_list(self):
l = collections.List()
with Flow(name="test") as f:
l.bind(1, 2)
assert f.run().result[l].result == [1, 2]
def test_list_binds_varargs(self):
t1 = Task()
t2 = Task()
l = collections.List()
with Flow(name="test") as f:
l.bind(t1, t2)
assert set([t1, t2, l]) == f.tasks
assert Edge(t1, l, key="arg_1") in f.edges
assert Edge(t2, l, key="arg_2") in f.edges
def test_tuple_returns_a_tuple(self):
l = collections.Tuple()
with Flow(name="test") as f:
l.bind(1, 2)
assert f.run().result[l].result == (1, 2)
def test_tuple_binds_varargs(self):
t1 = Task()
t2 = Task()
l = collections.Tuple()
with Flow(name="test") as f:
l.bind(t1, t2)
assert set([t1, t2, l]) == f.tasks
assert Edge(t1, l, key="arg_1") in f.edges
assert Edge(t2, l, key="arg_2") in f.edges
def test_set_returns_a_set(self):
l = collections.Set()
with Flow(name="test") as f:
l.bind(1, 2)
assert f.run().result[l].result == set([1, 2])
def test_set_binds_varargs(self):
t1 = Task()
t2 = Task()
l = collections.Set()
with Flow(name="test") as f:
l.bind(t1, t2)
assert set([t1, t2, l]) == f.tasks
assert Edge(t1, l, key="arg_1") in f.edges
assert Edge(t2, l, key="arg_2") in f.edges
def test_dict_returns_a_dict(self):
l = collections.Dict()
with Flow(name="test") as f:
l.bind(keys=["a", "b"], values=[1, 2])
assert f.run().result[l].result == dict(a=1, b=2)
def test_dict_handles_non_string_keys(self):
l = collections.Dict()
with Flow(name="test") as f:
l.bind(keys=[None, 55], values=[1, 2])
assert f.run().result[l].result == {None: 1, 55: 2}
def test_dict_raises_for_differing_length_key_value_pairs(self):
l = collections.Dict()
with Flow(name="test") as f:
l.bind(keys=["a"], values=[1, 2])
state = f.run()
assert state.result[l].is_failed()
assert isinstance(state.result[l].result, ValueError)
def test_list_automatically_applied_to_callargs(self):
x = Parameter("x")
y = Parameter("y")
identity = IdentityTask()
with Flow(name="test") as f:
identity.bind(x=[x, y])
state = f.run(parameters=dict(x=1, y=2))
assert len(f.tasks) == 4
assert sum(isinstance(t, collections.List) for t in f.tasks) == 1
assert state.result[identity].result == [1, 2]
def test_list_automatically_applied_to_callargs_imperative(self):
x = Parameter("x")
y = Parameter("y")
identity = IdentityTask()
f = Flow(name="test")
f.add_task(identity)
identity.bind(x=[x, y], flow=f)
state = f.run(parameters=dict(x=1, y=2))
assert len(f.tasks) == 4
assert sum(isinstance(t, collections.List) for t in f.tasks) == 1
assert state.result[identity].result == [1, 2]
def test_tuple_automatically_applied_to_callargs(self):
x = Parameter("x")
y = Parameter("y")
identity = IdentityTask()
with Flow(name="test") as f:
identity.bind(x=(x, y))
state = f.run(parameters=dict(x=1, y=2))
assert len(f.tasks) == 4
assert sum(isinstance(t, collections.Tuple) for t in f.tasks) == 1
assert state.result[identity].result == (1, 2)
def test_tuple_automatically_applied_to_callargs_imperative(self):
x = Parameter("x")
y = Parameter("y")
identity = IdentityTask()
f = Flow(name="test")
f.add_task(identity)
identity.bind(x=(x, y), flow=f)
state = f.run(parameters=dict(x=1, y=2))
assert len(f.tasks) == 4
assert sum(isinstance(t, collections.Tuple) for t in f.tasks) == 1
assert state.result[identity].result == (1, 2)
def test_set_automatically_applied_to_callargs(self):
x = Parameter("x")
y = Parameter("y")
identity = IdentityTask()
with Flow(name="test") as f:
identity.bind(x=set([x, y]))
state = f.run(parameters=dict(x=1, y=2))
assert len(f.tasks) == 4
assert sum(isinstance(t, collections.Set) for t in f.tasks) == 1
assert state.result[identity].result == set([1, 2])
def test_set_automatically_applied_to_callargs_imperative(self):
x = Parameter("x")
y = Parameter("y")
identity = IdentityTask()
f = Flow(name="test")
f.add_task(identity)
identity.bind(x=set([x, y]), flow=f)
state = f.run(parameters=dict(x=1, y=2))
assert len(f.tasks) == 4
assert sum(isinstance(t, collections.Set) for t in f.tasks) == 1
assert state.result[identity].result == set([1, 2])
def test_dict_automatically_applied_to_callargs(self):
x = Parameter("x")
y = Parameter("y")
identity = IdentityTask()
with Flow(name="test") as f:
identity.bind(x=dict(a=x, b=y))
state = f.run(parameters=dict(x=1, y=2))
assert len(f.tasks) == 5 # 2 params, identity, Dict, List of dict values
assert sum(isinstance(t, collections.Dict) for t in f.tasks) == 1
assert state.result[identity].result == dict(a=1, b=2)
def test_dict_automatically_applied_to_callargs_imperative(self):
x = Parameter("x")
y = Parameter("y")
identity = IdentityTask()
f = Flow(name="test")
f.add_task(identity)
identity.bind(x=dict(a=x, b=y), flow=f)
state = f.run(parameters=dict(x=1, y=2))
assert len(f.tasks) == 5 # 2 params, identity, Dict, List of dict values
assert sum(isinstance(t, collections.Dict) for t in f.tasks) == 1
assert state.result[identity].result == dict(a=1, b=2)
def test_nested_collection_automatically_applied_to_callargs(self):
x = Parameter("x")
y = Parameter("y")
identity = IdentityTask()
with Flow(name="test") as f:
identity.bind(x=dict(a=[x, dict(y=y)], b=(y, set([x]))))
state = f.run(parameters=dict(x=1, y=2))
assert len(f.tasks) == 10
assert state.result[identity].result == dict(a=[1, dict(y=2)], b=(2, set([1])))
def test_nested_collection_automatically_applied_to_callargs_imperative(self):
x = Parameter("x")
y = Parameter("y")
identity = IdentityTask()
f = Flow(name="test")
f.add_task(identity)
identity.bind(x=dict(a=[x, dict(y=y)], b=(y, set([x]))), flow=f)
state = f.run(parameters=dict(x=1, y=2))
assert len(f.tasks) == 10
assert state.result[identity].result == dict(a=[1, dict(y=2)], b=(2, set([1])))
def test_list_maintains_sort_order_for_more_than_10_items(self):
# https://github.com/PrefectHQ/prefect/issues/2451
l = collections.List()
with Flow(name="test") as f:
l.bind(*list(range(15)))
assert f.run().result[l].result == list(range(15))
def test_tuple_maintains_sort_order_for_more_than_10_items(self):
# https://github.com/PrefectHQ/prefect/issues/2451
t = collections.Tuple()
with Flow(name="test") as f:
t.bind(*list(range(15)))
assert f.run().result[t].result == tuple(range(15))
| 2.21875 | 2 |
Tree/node.py | philipwerner/python_data_structures | 0 | 9996 | """Node class module for Binary Tree."""
class Node(object):
"""The Node class."""
def __init__(self, value):
"""Initialization of node object."""
self.value = value
self.left = None
self.right = None
def __str__(self):
"""Return a string representation of the node object."""
return f'{self.value}'
def __repr__(self):
"""Return a representation of the node object."""
return f'<Node | Value: {self.value} | Left: {self.left} | Right: {self.right}>'
| 3.9375 | 4 |
qutip/operators.py | pschindler/qutip | 1 | 9997 | # This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, <NAME> and <NAME>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
"""
This module contains functions for generating Qobj representation of a variety
of commonly occuring quantum operators.
"""
__all__ = ['jmat', 'spin_Jx', 'spin_Jy', 'spin_Jz', 'spin_Jm', 'spin_Jp',
'spin_J_set', 'sigmap', 'sigmam', 'sigmax', 'sigmay', 'sigmaz',
'destroy', 'create', 'qeye', 'identity', 'position', 'momentum',
'num', 'squeeze', 'squeezing', 'displace', 'commutator',
'qutrit_ops', 'qdiags', 'phase', 'qzero', 'enr_destroy',
'enr_identity', 'charge', 'tunneling']
import numbers
import numpy as np
import scipy
import scipy.sparse as sp
from qutip.qobj import Qobj
from qutip.fastsparse import fast_csr_matrix, fast_identity
from qutip.dimensions import flatten
#
# Spin operators
#
def jmat(j, *args):
"""Higher-order spin operators:
Parameters
----------
j : float
Spin of operator
args : str
Which operator to return 'x','y','z','+','-'.
If no args given, then output is ['x','y','z']
Returns
-------
jmat : qobj / ndarray
``qobj`` for requested spin operator(s).
Examples
--------
>>> jmat(1) # doctest: +SKIP
[ Quantum object: dims = [[3], [3]], \
shape = [3, 3], type = oper, isHerm = True
Qobj data =
[[ 0. 0.70710678 0. ]
[ 0.70710678 0. 0.70710678]
[ 0. 0.70710678 0. ]]
Quantum object: dims = [[3], [3]], \
shape = [3, 3], type = oper, isHerm = True
Qobj data =
[[ 0.+0.j 0.-0.70710678j 0.+0.j ]
[ 0.+0.70710678j 0.+0.j 0.-0.70710678j]
[ 0.+0.j 0.+0.70710678j 0.+0.j ]]
Quantum object: dims = [[3], [3]], \
shape = [3, 3], type = oper, isHerm = True
Qobj data =
[[ 1. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. -1.]]]
Notes
-----
If no 'args' input, then returns array of ['x','y','z'] operators.
"""
if (np.fix(2 * j) != 2 * j) or (j < 0):
raise TypeError('j must be a non-negative integer or half-integer')
if not args:
return jmat(j, 'x'), jmat(j, 'y'), jmat(j, 'z')
if args[0] == '+':
A = _jplus(j)
elif args[0] == '-':
A = _jplus(j).getH()
elif args[0] == 'x':
A = 0.5 * (_jplus(j) + _jplus(j).getH())
elif args[0] == 'y':
A = -0.5 * 1j * (_jplus(j) - _jplus(j).getH())
elif args[0] == 'z':
A = _jz(j)
else:
raise TypeError('Invalid type')
return Qobj(A)
def _jplus(j):
"""
Internal functions for generating the data representing the J-plus
operator.
"""
m = np.arange(j, -j - 1, -1, dtype=complex)
data = (np.sqrt(j * (j + 1.0) - (m + 1.0) * m))[1:]
N = m.shape[0]
ind = np.arange(1, N, dtype=np.int32)
ptr = np.array(list(range(N-1))+[N-1]*2, dtype=np.int32)
ptr[-1] = N-1
return fast_csr_matrix((data,ind,ptr), shape=(N,N))
def _jz(j):
"""
Internal functions for generating the data representing the J-z operator.
"""
N = int(2*j+1)
data = np.array([j-k for k in range(N) if (j-k)!=0], dtype=complex)
# Even shaped matrix
if (N % 2 == 0):
ind = np.arange(N, dtype=np.int32)
ptr = np.arange(N+1,dtype=np.int32)
ptr[-1] = N
# Odd shaped matrix
else:
j = int(j)
ind = np.array(list(range(j))+list(range(j+1,N)), dtype=np.int32)
ptr = np.array(list(range(j+1))+list(range(j,N)), dtype=np.int32)
ptr[-1] = N-1
return fast_csr_matrix((data,ind,ptr), shape=(N,N))
#
# Spin j operators:
#
def spin_Jx(j):
"""Spin-j x operator
Parameters
----------
j : float
Spin of operator
Returns
-------
op : Qobj
``qobj`` representation of the operator.
"""
return jmat(j, 'x')
def spin_Jy(j):
"""Spin-j y operator
Parameters
----------
j : float
Spin of operator
Returns
-------
op : Qobj
``qobj`` representation of the operator.
"""
return jmat(j, 'y')
def spin_Jz(j):
"""Spin-j z operator
Parameters
----------
j : float
Spin of operator
Returns
-------
op : Qobj
``qobj`` representation of the operator.
"""
return jmat(j, 'z')
def spin_Jm(j):
"""Spin-j annihilation operator
Parameters
----------
j : float
Spin of operator
Returns
-------
op : Qobj
``qobj`` representation of the operator.
"""
return jmat(j, '-')
def spin_Jp(j):
"""Spin-j creation operator
Parameters
----------
j : float
Spin of operator
Returns
-------
op : Qobj
``qobj`` representation of the operator.
"""
return jmat(j, '+')
def spin_J_set(j):
"""Set of spin-j operators (x, y, z)
Parameters
----------
j : float
Spin of operators
Returns
-------
list : list of Qobj
list of ``qobj`` representating of the spin operator.
"""
return jmat(j)
#
# Pauli spin 1/2 operators:
#
def sigmap():
"""Creation operator for Pauli spins.
Examples
--------
>>> sigmap() # doctest: +SKIP
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = False
Qobj data =
[[ 0. 1.]
[ 0. 0.]]
"""
return jmat(1 / 2., '+')
def sigmam():
"""Annihilation operator for Pauli spins.
Examples
--------
>>> sigmam() # doctest: +SKIP
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = False
Qobj data =
[[ 0. 0.]
[ 1. 0.]]
"""
return jmat(1 / 2., '-')
def sigmax():
"""Pauli spin 1/2 sigma-x operator
Examples
--------
>>> sigmax() # doctest: +SKIP
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = False
Qobj data =
[[ 0. 1.]
[ 1. 0.]]
"""
return 2.0 * jmat(1.0 / 2, 'x')
def sigmay():
"""Pauli spin 1/2 sigma-y operator.
Examples
--------
>>> sigmay() # doctest: +SKIP
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = True
Qobj data =
[[ 0.+0.j 0.-1.j]
[ 0.+1.j 0.+0.j]]
"""
return 2.0 * jmat(1.0 / 2, 'y')
def sigmaz():
"""Pauli spin 1/2 sigma-z operator.
Examples
--------
>>> sigmaz() # doctest: +SKIP
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = True
Qobj data =
[[ 1. 0.]
[ 0. -1.]]
"""
return 2.0 * jmat(1.0 / 2, 'z')
#
# DESTROY returns annihilation operator for N dimensional Hilbert space
# out = destroy(N), N is integer value & N>0
#
def destroy(N, offset=0):
'''Destruction (lowering) operator.
Parameters
----------
N : int
Dimension of Hilbert space.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper : qobj
Qobj for lowering operator.
Examples
--------
>>> destroy(4) # doctest: +SKIP
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isHerm = False
Qobj data =
[[ 0.00000000+0.j 1.00000000+0.j 0.00000000+0.j 0.00000000+0.j]
[ 0.00000000+0.j 0.00000000+0.j 1.41421356+0.j 0.00000000+0.j]
[ 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j 1.73205081+0.j]
[ 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j]]
'''
if not isinstance(N, (int, np.integer)): # raise error if N not integer
raise ValueError("Hilbert space dimension must be integer value")
data = np.sqrt(np.arange(offset+1, N+offset, dtype=complex))
ind = np.arange(1,N, dtype=np.int32)
ptr = np.arange(N+1, dtype=np.int32)
ptr[-1] = N-1
return Qobj(fast_csr_matrix((data,ind,ptr),shape=(N,N)), isherm=False)
#
# create returns creation operator for N dimensional Hilbert space
# out = create(N), N is integer value & N>0
#
def create(N, offset=0):
'''Creation (raising) operator.
Parameters
----------
N : int
Dimension of Hilbert space.
Returns
-------
oper : qobj
Qobj for raising operator.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Examples
--------
>>> create(4) # doctest: +SKIP
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isHerm = False
Qobj data =
[[ 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j]
[ 1.00000000+0.j 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j]
[ 0.00000000+0.j 1.41421356+0.j 0.00000000+0.j 0.00000000+0.j]
[ 0.00000000+0.j 0.00000000+0.j 1.73205081+0.j 0.00000000+0.j]]
'''
if not isinstance(N, (int, np.integer)): # raise error if N not integer
raise ValueError("Hilbert space dimension must be integer value")
qo = destroy(N, offset=offset) # create operator using destroy function
return qo.dag()
def _implicit_tensor_dimensions(dimensions):
"""
Total flattened size and operator dimensions for operator creation routines
that automatically perform tensor products.
Parameters
----------
dimensions : (int) or (list of int) or (list of list of int)
First dimension of an operator which can create an implicit tensor
product. If the type is `int`, it is promoted first to `[dimensions]`.
From there, it should be one of the two-elements `dims` parameter of a
`qutip.Qobj` representing an `oper` or `super`, with possible tensor
products.
Returns
-------
size : int
Dimension of backing matrix required to represent operator.
dimensions : list
Dimension list in the form required by ``Qobj`` creation.
"""
if not isinstance(dimensions, list):
dimensions = [dimensions]
flat = flatten(dimensions)
if not all(isinstance(x, numbers.Integral) and x >= 0 for x in flat):
raise ValueError("All dimensions must be integers >= 0")
return np.prod(flat), [dimensions, dimensions]
def qzero(dimensions):
"""
Zero operator.
Parameters
----------
dimensions : (int) or (list of int) or (list of list of int)
Dimension of Hilbert space. If provided as a list of ints, then the
dimension is the product over this list, but the ``dims`` property of
the new Qobj are set to this list. This can produce either `oper` or
`super` depending on the passed `dimensions`.
Returns
-------
qzero : qobj
Zero operator Qobj.
"""
size, dimensions = _implicit_tensor_dimensions(dimensions)
# A sparse matrix with no data is equal to a zero matrix.
return Qobj(fast_csr_matrix(shape=(size, size), dtype=complex),
dims=dimensions, isherm=True)
#
# QEYE returns identity operator for a Hilbert space with dimensions dims.
# a = qeye(N), N is integer or list of integers & all elements >= 0
#
def qeye(dimensions):
"""
Identity operator.
Parameters
----------
dimensions : (int) or (list of int) or (list of list of int)
Dimension of Hilbert space. If provided as a list of ints, then the
dimension is the product over this list, but the ``dims`` property of
the new Qobj are set to this list. This can produce either `oper` or
`super` depending on the passed `dimensions`.
Returns
-------
oper : qobj
Identity operator Qobj.
Examples
--------
>>> qeye(3) # doctest: +SKIP
Quantum object: dims = [[3], [3]], shape = (3, 3), type = oper, \
isherm = True
Qobj data =
[[ 1. 0. 0.]
[ 0. 1. 0.]
[ 0. 0. 1.]]
>>> qeye([2,2]) # doctest: +SKIP
Quantum object: dims = [[2, 2], [2, 2]], shape = (4, 4), type = oper, \
isherm = True
Qobj data =
[[1. 0. 0. 0.]
[0. 1. 0. 0.]
[0. 0. 1. 0.]
[0. 0. 0. 1.]]
"""
size, dimensions = _implicit_tensor_dimensions(dimensions)
return Qobj(fast_identity(size),
dims=dimensions, isherm=True, isunitary=True)
def identity(dims):
"""Identity operator. Alternative name to :func:`qeye`.
Parameters
----------
dimensions : (int) or (list of int) or (list of list of int)
Dimension of Hilbert space. If provided as a list of ints, then the
dimension is the product over this list, but the ``dims`` property of
the new Qobj are set to this list. This can produce either `oper` or
`super` depending on the passed `dimensions`.
Returns
-------
oper : qobj
Identity operator Qobj.
"""
return qeye(dims)
def position(N, offset=0):
"""
Position operator x=1/sqrt(2)*(a+a.dag())
Parameters
----------
N : int
Number of Fock states in Hilbert space.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper : qobj
Position operator as Qobj.
"""
a = destroy(N, offset=offset)
return 1.0 / np.sqrt(2.0) * (a + a.dag())
def momentum(N, offset=0):
"""
Momentum operator p=-1j/sqrt(2)*(a-a.dag())
Parameters
----------
N : int
Number of Fock states in Hilbert space.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper : qobj
Momentum operator as Qobj.
"""
a = destroy(N, offset=offset)
return -1j / np.sqrt(2.0) * (a - a.dag())
def num(N, offset=0):
"""Quantum object for number operator.
Parameters
----------
N : int
The dimension of the Hilbert space.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper: qobj
Qobj for number operator.
Examples
--------
>>> num(4) # doctest: +SKIP
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isHerm = True
Qobj data =
[[0 0 0 0]
[0 1 0 0]
[0 0 2 0]
[0 0 0 3]]
"""
if offset == 0:
data = np.arange(1,N, dtype=complex)
ind = np.arange(1,N, dtype=np.int32)
ptr = np.array([0]+list(range(0,N)), dtype=np.int32)
ptr[-1] = N-1
else:
data = np.arange(offset, offset + N, dtype=complex)
ind = np.arange(N, dtype=np.int32)
ptr = np.arange(N+1,dtype=np.int32)
ptr[-1] = N
return Qobj(fast_csr_matrix((data,ind,ptr), shape=(N,N)), isherm=True)
def squeeze(N, z, offset=0):
"""Single-mode Squeezing operator.
Parameters
----------
N : int
Dimension of hilbert space.
z : float/complex
Squeezing parameter.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper : :class:`qutip.qobj.Qobj`
Squeezing operator.
Examples
--------
>>> squeeze(4, 0.25) # doctest: +SKIP
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isHerm = False
Qobj data =
[[ 0.98441565+0.j 0.00000000+0.j 0.17585742+0.j 0.00000000+0.j]
[ 0.00000000+0.j 0.95349007+0.j 0.00000000+0.j 0.30142443+0.j]
[-0.17585742+0.j 0.00000000+0.j 0.98441565+0.j 0.00000000+0.j]
[ 0.00000000+0.j -0.30142443+0.j 0.00000000+0.j 0.95349007+0.j]]
"""
a = destroy(N, offset=offset)
op = (1 / 2.0) * np.conj(z) * (a ** 2) - (1 / 2.0) * z * (a.dag()) ** 2
return op.expm()
def squeezing(a1, a2, z):
"""Generalized squeezing operator.
.. math::
S(z) = \\exp\\left(\\frac{1}{2}\\left(z^*a_1a_2
- za_1^\\dagger a_2^\\dagger\\right)\\right)
Parameters
----------
a1 : :class:`qutip.qobj.Qobj`
Operator 1.
a2 : :class:`qutip.qobj.Qobj`
Operator 2.
z : float/complex
Squeezing parameter.
Returns
-------
oper : :class:`qutip.qobj.Qobj`
Squeezing operator.
"""
b = 0.5 * (np.conj(z) * (a1 * a2) - z * (a1.dag() * a2.dag()))
return b.expm()
def displace(N, alpha, offset=0):
"""Single-mode displacement operator.
Parameters
----------
N : int
Dimension of Hilbert space.
alpha : float/complex
Displacement amplitude.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper : qobj
Displacement operator.
Examples
---------
>>> displace(4,0.25) # doctest: +SKIP
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isHerm = False
Qobj data =
[[ 0.96923323+0.j -0.24230859+0.j 0.04282883+0.j -0.00626025+0.j]
[ 0.24230859+0.j 0.90866411+0.j -0.33183303+0.j 0.07418172+0.j]
[ 0.04282883+0.j 0.33183303+0.j 0.84809499+0.j -0.41083747+0.j]
[ 0.00626025+0.j 0.07418172+0.j 0.41083747+0.j 0.90866411+0.j]]
"""
a = destroy(N, offset=offset)
D = (alpha * a.dag() - np.conj(alpha) * a).expm()
return D
def commutator(A, B, kind="normal"):
"""
Return the commutator of kind `kind` (normal, anti) of the
two operators A and B.
"""
if kind == 'normal':
return A * B - B * A
elif kind == 'anti':
return A * B + B * A
else:
raise TypeError("Unknown commutator kind '%s'" % kind)
def qutrit_ops():
"""
Operators for a three level system (qutrit).
Returns
-------
opers: array
`array` of qutrit operators.
"""
from qutip.states import qutrit_basis
one, two, three = qutrit_basis()
sig11 = one * one.dag()
sig22 = two * two.dag()
sig33 = three * three.dag()
sig12 = one * two.dag()
sig23 = two * three.dag()
sig31 = three * one.dag()
return np.array([sig11, sig22, sig33, sig12, sig23, sig31],
dtype=object)
def qdiags(diagonals, offsets, dims=None, shape=None):
"""
Constructs an operator from an array of diagonals.
Parameters
----------
diagonals : sequence of array_like
Array of elements to place along the selected diagonals.
offsets : sequence of ints
Sequence for diagonals to be set:
- k=0 main diagonal
- k>0 kth upper diagonal
- k<0 kth lower diagonal
dims : list, optional
Dimensions for operator
shape : list, tuple, optional
Shape of operator. If omitted, a square operator large enough
to contain the diagonals is generated.
See Also
--------
scipy.sparse.diags : for usage information.
Notes
-----
This function requires SciPy 0.11+.
Examples
--------
>>> qdiags(sqrt(range(1, 4)), 1) # doctest: +SKIP
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isherm = False
Qobj data =
[[ 0. 1. 0. 0. ]
[ 0. 0. 1.41421356 0. ]
[ 0. 0. 0. 1.73205081]
[ 0. 0. 0. 0. ]]
"""
data = sp.diags(diagonals, offsets, shape, format='csr', dtype=complex)
if not dims:
dims = [[], []]
if not shape:
shape = []
return Qobj(data, dims, list(shape))
def phase(N, phi0=0):
"""
Single-mode Pegg-Barnett phase operator.
Parameters
----------
N : int
Number of basis states in Hilbert space.
phi0 : float
Reference phase.
Returns
-------
oper : qobj
Phase operator with respect to reference phase.
Notes
-----
The Pegg-Barnett phase operator is Hermitian on a truncated Hilbert space.
"""
phim = phi0 + (2.0 * np.pi * np.arange(N)) / N # discrete phase angles
n = np.arange(N).reshape((N, 1))
states = np.array([np.sqrt(kk) / np.sqrt(N) * np.exp(1.0j * n * kk)
for kk in phim])
ops = np.array([np.outer(st, st.conj()) for st in states])
return Qobj(np.sum(ops, axis=0))
def enr_destroy(dims, excitations):
"""
Generate annilation operators for modes in a excitation-number-restricted
state space. For example, consider a system consisting of 4 modes, each
with 5 states. The total hilbert space size is 5**4 = 625. If we are
only interested in states that contain up to 2 excitations, we only need
to include states such as
(0, 0, 0, 0)
(0, 0, 0, 1)
(0, 0, 0, 2)
(0, 0, 1, 0)
(0, 0, 1, 1)
(0, 0, 2, 0)
...
This function creates annihilation operators for the 4 modes that act
within this state space:
a1, a2, a3, a4 = enr_destroy([5, 5, 5, 5], excitations=2)
From this point onwards, the annihiltion operators a1, ..., a4 can be
used to setup a Hamiltonian, collapse operators and expectation-value
operators, etc., following the usual pattern.
Parameters
----------
dims : list
A list of the dimensions of each subsystem of a composite quantum
system.
excitations : integer
The maximum number of excitations that are to be included in the
state space.
Returns
-------
a_ops : list of qobj
A list of annihilation operators for each mode in the composite
quantum system described by dims.
"""
from qutip.states import enr_state_dictionaries
nstates, state2idx, idx2state = enr_state_dictionaries(dims, excitations)
a_ops = [sp.lil_matrix((nstates, nstates), dtype=np.complex)
for _ in range(len(dims))]
for n1, state1 in idx2state.items():
for n2, state2 in idx2state.items():
for idx, a in enumerate(a_ops):
s1 = [s for idx2, s in enumerate(state1) if idx != idx2]
s2 = [s for idx2, s in enumerate(state2) if idx != idx2]
if (state1[idx] == state2[idx] - 1) and (s1 == s2):
a_ops[idx][n1, n2] = np.sqrt(state2[idx])
return [Qobj(a, dims=[dims, dims]) for a in a_ops]
def enr_identity(dims, excitations):
"""
Generate the identity operator for the excitation-number restricted
state space defined by the `dims` and `exciations` arguments. See the
docstring for enr_fock for a more detailed description of these arguments.
Parameters
----------
dims : list
A list of the dimensions of each subsystem of a composite quantum
system.
excitations : integer
The maximum number of excitations that are to be included in the
state space.
state : list of integers
The state in the number basis representation.
Returns
-------
op : Qobj
A Qobj instance that represent the identity operator in the
exication-number-restricted state space defined by `dims` and
`exciations`.
"""
from qutip.states import enr_state_dictionaries
nstates, _, _ = enr_state_dictionaries(dims, excitations)
data = sp.eye(nstates, nstates, dtype=np.complex)
return Qobj(data, dims=[dims, dims])
def charge(Nmax, Nmin=None, frac = 1):
"""
Generate the diagonal charge operator over charge states
from Nmin to Nmax.
Parameters
----------
Nmax : int
Maximum charge state to consider.
Nmin : int (default = -Nmax)
Lowest charge state to consider.
frac : float (default = 1)
Specify fractional charge if needed.
Returns
-------
C : Qobj
Charge operator over [Nmin,Nmax].
Notes
-----
.. versionadded:: 3.2
"""
if Nmin is None:
Nmin = -Nmax
diag = np.arange(Nmin, Nmax+1, dtype=float)
if frac != 1:
diag *= frac
C = sp.diags(diag, 0, format='csr', dtype=complex)
return Qobj(C, isherm=True)
def tunneling(N, m=1):
"""
Tunneling operator with elements of the form
:math:`\\sum |N><N+m| + |N+m><N|`.
Parameters
----------
N : int
Number of basis states in Hilbert space.
m : int (default = 1)
Number of excitations in tunneling event.
Returns
-------
T : Qobj
Tunneling operator.
Notes
-----
.. versionadded:: 3.2
"""
diags = [np.ones(N-m,dtype=int),np.ones(N-m,dtype=int)]
T = sp.diags(diags,[m,-m],format='csr', dtype=complex)
return Qobj(T, isherm=True)
# Break circular dependencies by a trailing import.
# Note that we use a relative import here to deal with that
# qutip.tensor is the *function* tensor, not the module.
from qutip.tensor import tensor
| 1.515625 | 2 |
tests/test_deepsv.py | lsantuari/deepsv | 0 | 9998 | <filename>tests/test_deepsv.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from deepsv import deepsv
from unittest.mock import patch
"""Tests for the deepsv module.
"""
def test_something():
assert True
def test_adding_numbers():
assert deepsv.add_numbers(1, 1) == 2
assert deepsv.add_numbers(1, 2) != 2
def test_with_error():
with pytest.raises(ValueError):
# Do something that raises a ValueError
raise ValueError
# Fixture example
@pytest.fixture
def an_object():
return {}
def test_deepsv(an_object):
assert an_object == {}
def side_effect_function(mock):
print('This part of the code runs when patched')
return 'Some text that I want to test with'
def test_word_count_of_book_base():
book = 'https://www.gutenberg.org/files/59560/59560-0.txt'
wc = deepsv.word_count(book)
assert wc == 30577
@patch('deepsv.deepsv.download_text', side_effect=side_effect_function)
def test_word_count_of_book(mock):
# book = 'https://www.gutenberg.org/files/59560/59560-0.txt'
wc = deepsv.word_count(mock.text)
assert wc == 8
def test_count_single_base():
sequence = 'TTAGGACCA'
assert deepsv.count_single_base('A', sequence) == 3
assert deepsv.count_single_base('C', sequence) == 2
assert deepsv.count_single_base('G', sequence) == 2
assert deepsv.count_single_base('T', sequence) == 2
def side_effect_get_sequence():
return 'GTACGTCAG'
@patch('deepsv.deepsv.get_sequence', return_value='GTACGTCAG')
def test_count_bases(sequence):
seq_dict = {'A': 2, 'C': 2, 'G': 3, 'T': 2}
assert deepsv.count_bases(sequence) == seq_dict
| 3.3125 | 3 |
tcex/bin/dep.py | phuerta-tc/tcex | 0 | 9999 | <filename>tcex/bin/dep.py
#!/usr/bin/env python
"""TcEx Dependencies Command"""
# standard library
import os
import platform
import shutil
import subprocess # nosec
import sys
from distutils.version import StrictVersion # pylint: disable=no-name-in-module
from pathlib import Path
from typing import List
from urllib.parse import quote
# third-party
import typer
# first-party
from tcex.app_config.models.tcex_json_model import LibVersionModel
from tcex.bin.bin_abc import BinABC
class Dep(BinABC):
"""Install dependencies for App."""
def __init__(
self,
branch: str,
no_cache_dir: bool,
proxy_host: str,
proxy_port: int,
proxy_user: str,
proxy_pass: str,
) -> None:
"""Initialize Class properties."""
super().__init__()
self.branch = branch
self.no_cache_dir = no_cache_dir
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.proxy_user = proxy_user
self.proxy_pass = proxy_pass
# properties
self.latest_version = None
self.lib_directory = (
f'lib_{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}'
)
self.proxy_enabled = False
self.proxy_env = {}
self.requirements_fqfn = Path('requirements.txt')
self.static_lib_dir = 'lib_latest'
# update tcex.json
self.tj.update.multiple()
def _build_command(self, python_executable: Path, lib_dir: Path) -> str:
"""Build the pip command for installing dependencies.
Args:
python_executable: The fully qualified path of the Python executable.
lib_dir: The fully qualified path of the lib directory.
Returns:
list: The Python pip command with all required args.
"""
exe_command = [
str(python_executable),
'-m',
'pip',
'install',
'-r',
str(self.requirements_fqfn),
'--ignore-installed',
'--quiet',
'--target',
lib_dir.name,
]
if self.no_cache_dir:
exe_command.append('--no-cache-dir')
if self.proxy_enabled:
# trust the pypi hosts to avoid ssl errors
trusted_hosts = ['pypi.org', 'pypi.python.org', 'files.pythonhosted.org']
for host in trusted_hosts:
exe_command.append('--trusted-host')
exe_command.append(host)
return exe_command
def _create_lib_latest(self) -> None:
"""Create the lib_latest symlink for App Builder."""
if platform.system() == 'Windows':
shutil.copytree(f'lib_{self.latest_version}', self.static_lib_dir)
else:
if os.path.islink(self.static_lib_dir):
os.unlink(self.static_lib_dir)
elif os.path.isfile(self.static_lib_dir):
os.rmdir(self.static_lib_dir)
os.symlink(f'lib_{self.latest_version}', self.static_lib_dir)
@staticmethod
def _remove_previous(fqpn: Path) -> None:
"""Remove previous lib directory recursively."""
if os.access(fqpn, os.W_OK):
shutil.rmtree(fqpn)
def configure_proxy(self) -> None:
"""Configure proxy settings using environment variables."""
if os.getenv('HTTP_PROXY') or os.getenv('HTTPS_PROXY'):
# don't change proxy settings if the OS already has them configured.
return
if self.proxy_host is not None and self.proxy_port is not None:
# proxy url without auth
proxy_url = f'{self.proxy_host}:{self.proxy_port}'
if self.proxy_user is not None and self.proxy_pass is not None:
proxy_user = quote(self.proxy_user, safe='~')
proxy_pass = quote(self.proxy_pass, safe='~')
# proxy url with auth
proxy_url = f'{proxy_user}:{proxy_pass}@{proxy_url}'
# update proxy properties
self.proxy_enabled = True
self.proxy_env = {
'HTTP_PROXY': f'http://{proxy_url}',
'HTTPS_PROXY': f'http://{proxy_url}',
}
# display proxy setting
self.print_setting('Using Proxy Server', f'{self.proxy_host}:{self.proxy_port}')
def create_temp_requirements(self) -> None:
"""Create a temporary requirements.txt.
This allows testing again a git branch instead of pulling from pypi.
"""
# Replace tcex version with develop branch of tcex
with self.requirements_fqfn.open() as fh:
current_requirements = fh.read().strip().split('\n')
self.requirements_fqfn = Path(f'temp-{self.requirements_fqfn}')
with self.requirements_fqfn.open(mode='w') as fh:
requirements = []
for line in current_requirements:
if not line:
continue
if line.startswith('tcex'):
line = (
'git+https://github.com/ThreatConnect-Inc/tcex.git@'
f'{self.branch}#egg=tcex'
)
requirements.append(line)
fh.write('\n'.join(requirements))
# display branch setting
self.print_setting('Using Branch', self.branch)
def install_deps(self) -> None:
"""Install Required Libraries using pip."""
# check for requirements.txt
if not self.requirements_fqfn.is_file():
self.handle_error('A requirements.txt file is required to install modules.')
# install all requested lib directories
for lib_version in self.lib_versions:
# remove lib directory from previous runs
self._remove_previous(lib_version.lib_dir)
if (
not lib_version.python_executable.is_file()
and not lib_version.python_executable.is_symlink()
):
# display error
typer.secho(
f'The Python executable ({lib_version.python_executable}) could not be found. '
'Skipping building lib directory for this Python version.',
fg=typer.colors.YELLOW,
)
continue
# display lib dir setting
self.print_setting('Lib Dir', f'{lib_version.lib_dir.name}')
# build the sub process command
exe_command = self._build_command(lib_version.python_executable, lib_version.lib_dir)
# display command setting
self.print_setting('Running', f'''{' '.join(exe_command)}''', fg_color='GREEN')
# recommended -> https://pip.pypa.io/en/latest/user_guide/#using-pip-from-your-program
p = subprocess.Popen( # pylint: disable=consider-using-with
exe_command,
shell=False, # nosec
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self.proxy_env,
)
_, err = p.communicate() # pylint: disable=unused-variable
if p.returncode != 0:
# display error
err = err.decode('utf-8')
failure_display = typer.style(
f'Failure: {err}', fg=typer.colors.WHITE, bg=typer.colors.RED
)
typer.echo(f'{failure_display}')
sys.exit(1)
# TODO: [low] can this be updated to use version from model?
# version comparison
try:
python_version = lib_version.lib_dir.name.split('_', 1)[1]
except IndexError:
python_version = None
self.handle_error('Could not determine version from lib string.')
# TODO: [low] investigate using sematic_version package
# track the latest Python version
if self.latest_version is None or StrictVersion(python_version) > StrictVersion(
self.latest_version
):
self.latest_version = python_version
if self.branch != 'master':
# remove temp requirements.txt file
self.requirements_fqfn.unlink()
# create lib_latest directory
self._create_lib_latest()
@property
def lib_versions(self) -> List[LibVersionModel]:
"""Return the lib_version data required to build lib directories."""
if self.tj.model.lib_versions:
self.print_setting('Python Version', 'using version(s) defined in tcex.json')
# return the python versions defined in the tcex.json file
return self.tj.model.lib_versions
# return the current python version
return [
LibVersionModel(**{'python_executable': sys.executable, 'lib_dir': self.lib_directory})
]
| 2.09375 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.