max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
ppml/trusted-big-data-ml/python/docker-graphene/examples/orca_api_example.py
|
DirkFi/BigDL
| 2,970 |
96284
|
<reponame>DirkFi/BigDL
from zoo.orca import init_orca_context, stop_orca_context
from tensorflow import keras
from zoo.pipeline.api.keras.layers import *
import argparse
import tensorflow as tf
import os
def bigdl_estimator():
from zoo.orca.learn.bigdl.estimator import Estimator
from tensorflow.python.keras.datasets import imdb
from tensorflow.python.keras.preprocessing import sequence
from zoo.pipeline.api.keras.models import Model
from zoo.pipeline.api.keras.objectives import SparseCategoricalCrossEntropy
from zoo.orca.data import XShards
from zoo.orca.learn.metrics import Accuracy
import numpy as np
# conf = {"spark.executor.extraJavaOptions": "-Xss512m", "spark.driver.extraJavaOptions": "-Xss512m"}
# init_orca_context(cluster_mode="local", cores=8, memory="16g")
init_orca_context(cluster_mode="local", cores=4, memory="16g")
max_features = 200
max_len = 20
print("running bigdl estimator")
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
x_train = x_train[:1000]
y_train = y_train[:1000]
x_test = x_test[-1000:]
y_test = y_test[-1000:]
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=max_len)
x_test = sequence.pad_sequences(x_test, maxlen=max_len)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
train_pos = np.zeros((len(x_train), max_len), dtype=np.int32)
val_pos = np.zeros((len(x_test), max_len), dtype=np.int32)
for i in range(0, len(x_train)):
train_pos[i, :] = np.arange(max_len)
val_pos[i, :] = np.arange(max_len)
train_dataset = XShards.partition({"x": (x_train, train_pos), "y": np.array(y_train)})
val_dataset = XShards.partition({"x": (x_test, val_pos), "y": np.array(y_test)})
token_shape = (max_len,)
position_shape = (max_len,)
token_input = Input(shape=token_shape)
position_input = Input(shape=position_shape)
O_seq = TransformerLayer.init(vocab=max_features, hidden_size=128, n_head=8, seq_len=max_len)([token_input, position_input])
# Select the first output of the Transformer. The second is the pooled output.
O_seq = SelectTable(0)(O_seq)
O_seq = GlobalAveragePooling1D()(O_seq)
O_seq = Dropout(0.2)(O_seq)
outputs = Dense(2, activation='softmax')(O_seq)
model = Model([token_input, position_input], outputs)
model.summary()
batch_size = 64
print("Train started")
est = Estimator.from_bigdl(model=model, loss=SparseCategoricalCrossEntropy(), optimizer=Adam(), metrics=[Accuracy()])
est.set_constant_gradient_clipping(0.1, 0.2)
est.fit(data=train_dataset, batch_size=batch_size, epochs=1)
result = est.evaluate(val_dataset)
print(result)
est.clear_gradient_clipping()
est.set_l2_norm_gradient_clipping(0.5)
est.fit(data=train_dataset, batch_size=batch_size, epochs=1)
print("Train finished")
print("Evaluating started")
result = est.evaluate(val_dataset)
print(result)
print("Evaluating finished")
est.save('work/saved_model')
# est.load('work/saved_model')
print("load and save API finished")
est.get_train_summary(tag='Loss')
est.get_validation_summary(tag='Top1Accuracy')
print("get summary API finished")
stop_orca_context()
def tf_estimator():
from zoo.orca.learn.tf.estimator import Estimator
init_orca_context(cluster_mode="local", cores=4, memory="3g")
os.environ["HDF5_USE_FILE_LOCKING"] = 'FALSE'
print("running tf estimator")
imdb = keras.datasets.imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=1000)
# print(train_data)
word_index = imdb.get_word_index()
word_index = {k: (v + 3) for k, v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2 # unknown
word_index["<UNUSED>"] = 3
train_data = keras.preprocessing.sequence.pad_sequences(train_data, value=word_index["<PAD>"], padding='post',
maxlen=256)
test_data = keras.preprocessing.sequence.pad_sequences(test_data, value=word_index["<PAD>"], padding='post',
maxlen=256)
model = keras.Sequential()
model.add(keras.layers.Embedding(1000, 16))
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation=tf.nn.relu))
model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))
model.summary()
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['acc'])
x_val = train_data[:1000]
partial_x_train = train_data[1000:]
y_val = train_labels[:1000]
partial_y_train = train_labels[1000:]
train_dataset = tf.data.Dataset.from_tensor_slices((partial_x_train, partial_y_train))
validation_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
est = Estimator.from_keras(keras_model=model)
est.set_constant_gradient_clipping(0.1, 0.2)
est.fit(data=train_dataset, batch_size=512, epochs=5, validation_data=validation_dataset)
results = est.evaluate(validation_dataset)
print(results)
est.clear_gradient_clipping()
est.set_l2_norm_gradient_clipping(0.1)
est.fit(data=train_dataset, batch_size=512, epochs=5, validation_data=validation_dataset)
results = est.evaluate(validation_dataset)
print(results)
est.save('work/saved_model')
print("save API finished")
# est.save_tf_checkpoint('work/checkpoint')
# est.load_tf_checkpoint('work/checkpoint')
print("checkpoint save and load API finished")
est.save_keras_model('work/keras_model')
est.save_keras_weights('work/keras_weights')
print("keras model and weights save API finished")
# est.load_keras_model('work/keras_model')
# est.load_keras_weights('work')
print("keras model and weights load API finished")
est.get_train_summary(tag='Loss')
est.get_validation_summary(tag='Top1Accuracy')
# Estimator.load(est, model_path='work/') # Has not been implemented
# resutls = est.predict(validation_dataset)
# print(results)
stop_orca_context()
def tf2_estimator():
from zoo.orca.learn.tf2.estimator import Estimator
# import ray
init_orca_context(cluster_mode="local", cores=4, memory="3g")
print("running tf2 estimator")
imdb = keras.datasets.imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=1000)
# print(train_data)
word_index = imdb.get_word_index()
word_index = {k: (v + 3) for k, v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2 # unknown
word_index["<UNUSED>"] = 3
train_data = keras.preprocessing.sequence.pad_sequences(train_data, value=word_index["<PAD>"], padding='post',
maxlen=256)
test_data = keras.preprocessing.sequence.pad_sequences(test_data, value=word_index["<PAD>"], padding='post',
maxlen=256)
model = keras.Sequential()
model.add(keras.layers.Embedding(1000, 16))
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation=tf.nn.relu))
model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))
model.summary()
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['acc'])
x_val = train_data[:1000]
partial_x_train = train_data[1000:]
y_val = train_labels[:1000]
partial_y_train = train_labels[1000:]
train_dataset = tf.data.Dataset.from_tensor_slices((partial_x_train, partial_y_train))
validation_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
est = Estimator.from_keras(model_creator=model)
est.fit(data=train_dataset, batch_size=512, epochs=100, validation_data=validation_dataset)
results = est.evaluate(validation_dataset)
print(results)
est.save('work/saved_model')
est.get_train_summary(tag='Loss')
est.get_validation_summary(tag='Top1Accuracy')
stop_orca_context()
def pytorch_estimator():
print("running pytorch estimator")
return
def openvino_estimator():
print("running openvino estimator")
return
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default="bigdl",
help='The mode for the examples. bigdl, tf, tf2, pytorch or openvino.')
args = parser.parse_args()
mode = args.mode
if mode == "bigdl":
bigdl_estimator()
elif mode == "tf":
tf_estimator()
elif mode == "tf2":
tf2_estimator()
elif mode == "pytorch":
pytorch_estimator()
else:
openvino_estimator()
|
gym_chess/test/v1/test_run_moves.py
|
genyrosk/gym-chess
| 115 |
96294
|
from copy import copy
import numpy as np
from gym_chess import ChessEnvV1
from gym_chess.envs.chess_v1 import (
KING_ID,
QUEEN_ID,
ROOK_ID,
BISHOP_ID,
KNIGHT_ID,
PAWN_ID,
)
from gym_chess.test.utils import run_test_funcs
# Blank board
BASIC_BOARD = np.array([[0] * 8] * 8, dtype=np.int8)
# Pawn basic movements
def test_pawn_basic_moves():
BOARD = copy(BASIC_BOARD)
BOARD[6, 0] = PAWN_ID
BOARD[1, 0] = -PAWN_ID
env = ChessEnvV1(opponent="none", initial_state=BOARD)
# player_1
actions = env.get_possible_actions()
env.step(actions[0])
# player_2
actions = env.get_possible_actions()
env.step(actions[0])
# player_3
actions = env.get_possible_actions()
env.step(actions[0])
# player_4
actions = env.get_possible_actions()
env.step(actions[0])
EXPECTED_BOARD = copy(BASIC_BOARD)
EXPECTED_BOARD[4, 0] = PAWN_ID
EXPECTED_BOARD[3, 0] = -PAWN_ID
assert (env.state == EXPECTED_BOARD).all()
if __name__ == "__main__":
run_test_funcs(__name__)
|
reddit2telegram/channels/r_00ag9603/app.py
|
mainyordle/reddit2telegram
| 187 |
96302
|
#encoding:utf-8
subreddit = '00ag9603'
t_channel = '@r_00ag9603'
def send_post(submission, r2t):
return r2t.send_simple(submission)
|
benchmarks/scripts-report/plot_warmup_graalphp_native.py
|
avierr/graalphp
| 267 |
96346
|
from bench_db import get_timings_by_id
from bench_graphs import do_warmup_plot
num_iter = 7
color_copy_by_ref = 'green'
name = 'native-'
def warmup_all_plots():
warmup_plot_fannkuch()
warmup_plot_spectralnorm()
warmup_plot_bintree()
def warmup_plot_fannkuch():
ids = [
100 # fannkuchredux-1, 2020-08-30 21:14, graalphp-native
, 102 # fannkuchredux-1, 2020-08-30 21:39, graalphp-native
, 104 # fannkuchredux-1, 2020-08-30 22:03, graalphp-native
, 106 # fannkuchredux-1, 2020-08-30 22:28, graalphp-native
, 108 # fannkuchredux-1, 2020-08-30 22:52, graalphp-native
, 110 # fannkuchredux-1, 2020-08-30 23:16, graalphp-native
, 112 # fannkuchredux-1, 2020-08-30 23:40, graalphp-native
, 114 # fannkuchredux-1, 2020-08-31 00:04, graalphp-native
, 116 # fannkuchredux-1, 2020-08-31 00:28, graalphp-native
]
runs = [get_timings_by_id(i, warmup=0) for i in ids]
do_warmup_plot('fannkuchredux \ncopy-by-val', runs, num_iter=num_iter, subtitle='',
file_prefix=name)
pass
def warmup_plot_spectralnorm():
ids_by_val = [
118 # spectralnorm-by-val, 2020-08-31 00:31:52, graalphp-native
, 122 # spectralnorm-by-val, 2020-08-31 00:37:51, graalphp-native
, 126 # spectralnorm-by-val, 2020-08-31 00:43:51, graalphp-native
, 130 # spectralnorm-by-val, 2020-08-31 00:49:50, graalphp-native
, 134 # spectralnorm-by-val, 2020-08-31 00:55:49, graalphp-native
, 138 # spectralnorm-by-val, 2020-08-31 01:01:48, graalphp-native
, 142 # spectralnorm-by-val, 2020-08-31 01:07:47, graalphp-native
, 146 # spectralnorm-by-val, 2020-08-31 01:13:47, graalphp-native
, 150 # spectralnorm-by-val, 2020-08-31 01:19:46, graalphp-native
]
runs = [get_timings_by_id(i, warmup=0) for i in ids_by_val]
do_warmup_plot('spectralnorm \ncopy-by-val', runs, num_iter=num_iter,
file_prefix=name)
ids_by_ref = [
120 # spectralnorm-by-ref 2020-08-31 00:34:57, graalphp-native
, 124 # spectralnorm-by-ref 2020-08-31 00:40:56, graalphp-native
, 128 # spectralnorm-by-ref 2020-08-31 00:46:55, graalphp-native
, 132 # spectralnorm-by-ref 2020-08-31 00:52:54, graalphp-native
, 136 # spectralnorm-by-ref 2020-08-31 00:58:54, graalphp-native
, 140 # spectralnorm-by-ref 2020-08-31 01:04:53, graalphp-native
, 144 # spectralnorm-by-ref 2020-08-31 01:10:52, graalphp-native
, 148 # spectralnorm-by-ref 2020-08-31 01:16:51, graalphp-native
, 152 # spectralnorm-by-ref 2020-08-31 01:22:50, graalphp-native
]
runs = [get_timings_by_id(i, warmup=0) for i in ids_by_ref]
do_warmup_plot('spectralnorm \ncopy-by-ref', runs, num_iter=num_iter,
color=color_copy_by_ref,
file_prefix=name)
pass
def warmup_plot_bintree():
ids_by_val = [
156 # binary-trees-by-val, 2020-09-01 01:57:02, graalphp-native
, 160 # binary-trees-by-val, 2020-09-01 07:20:34, graalphp-native
, 164 # binary-trees-by-val, 2020-09-01 12:48:38, graalphp-native
]
runs = [get_timings_by_id(i, warmup=0) for i in ids_by_val]
do_warmup_plot('binary-trees \ncopy-by-val', runs, num_iter=num_iter,
file_prefix=name)
ids_by_ref = [
169 # binary-trees-by-ref, 2020-09-01 14:40:30, graalphp-native
, 171 # binary-trees-by-ref, 2020-09-01 14:48:10, graalphp-native
, 173 # binary-trees-by-ref, 2020-09-01 14:55:38, graalphp-native
, 175 # binary-trees-by-ref, 2020-09-01 15:03:06, graalphp-native
, 177 # binary-trees-by-ref, 2020-09-01 15:10:40, graalphp-native
, 179 # binary-trees-by-ref, 2020-09-01 15:18:15, graalphp-native
, 181 # binary-trees-by-ref, 2020-09-01 15:25:48, graalphp-native
, 183 # binary-trees-by-ref, 2020-09-01 15:33:23, graalphp-native
, 185 # binary-trees-by-ref, 2020-09-01 15:40:56, graalphp-native
]
runs = [get_timings_by_id(i, warmup=0) for i in ids_by_ref]
do_warmup_plot('binary-trees \ncopy-by-ref', runs, num_iter=num_iter,
color=color_copy_by_ref,
file_prefix=name)
pass
if __name__ == '__main__':
warmup_plot_fannkuch()
warmup_plot_spectralnorm()
warmup_plot_bintree()
|
Python/Tests/TestData/DebuggerProject/InfiniteRun.py
|
techkey/PTVS
| 404 |
96357
|
x = 1000000
while True:
y = x
z = x + 1
x = z + 1
|
demos/chat-group/handlers/rest.py
|
karldoenitz/karlooper
| 161 |
96370
|
# -*- coding: utf-8 -*-
from karlooper.config import get_global_conf
from karlooper.web.request import Request
__author__ = "<EMAIL>"
class MessageHandler(Request):
def get(self):
redis_manager = get_global_conf("redis")
value = redis_manager.get_value()
result = {
"status": 0,
"desc": "ok",
"data": value
}
return self.response_as_json(result)
def post(self):
print(self.get_http_request_message())
from_user = self.get_parameter("from")
new_value = self.decode_parameter_plus("value")
redis_manager = get_global_conf("redis")
redis_manager.set_value([str(from_user)+":"+new_value])
result = {
"status": 0,
"desc": "ok"
}
return self.response_as_json(result)
|
flatdata-generator/tests/generators/py_expectations/archives/subarchive.py
|
heremaps/flatdata
| 140 |
96383
|
<reponame>heremaps/flatdata
class n_X(flatdata.archive.Archive):
_SCHEMA = """namespace n {
archive X
{
payload : raw_data;
}
}
"""
_PAYLOAD_SCHEMA = """namespace n {
archive X
{
payload : raw_data;
}
}
"""
_PAYLOAD_DOC = """"""
_NAME = "X"
_RESOURCES = {
"X.archive" : flatdata.archive.ResourceSignature(
container=flatdata.resources.RawData,
initializer=None,
schema=_SCHEMA,
is_optional=False,
doc="Archive signature"),
"payload": flatdata.archive.ResourceSignature(container=flatdata.resources.RawData,
initializer=None,
schema=_PAYLOAD_SCHEMA,
is_optional=False,
doc=_PAYLOAD_DOC),
}
def __init__(self, resource_storage):
flatdata.archive.Archive.__init__(self, resource_storage)
class n_XBuilder(flatdata.archive_builder.ArchiveBuilder):
_SCHEMA = """namespace n {
archive X
{
payload : raw_data;
}
}
"""
_PAYLOAD_SCHEMA = """namespace n {
archive X
{
payload : raw_data;
}
}
"""
_PAYLOAD_DOC = """"""
_NAME = "X"
_RESOURCES = {
"X.archive" : flatdata.archive_builder.ResourceSignature(
container=flatdata.resources.RawData,
initializer=None,
schema=_SCHEMA,
is_optional=False,
doc="Archive signature"),
"payload": flatdata.archive_builder.ResourceSignature(container=flatdata.resources.RawData,
initializer=None,
schema=_PAYLOAD_SCHEMA,
is_optional=False,
doc=_PAYLOAD_DOC),
}
def __init__(self, resource_storage):
flatdata.archive_builder.ArchiveBuilder.__init__(self, resource_storage)
class n_A(flatdata.archive.Archive):
_SCHEMA = """namespace n {
archive X
{
payload : raw_data;
}
}
namespace n {
archive A
{
data : archive .n.X;
@optional
optional_data : archive .n.X;
}
}
"""
_DATA_SCHEMA = """namespace n {
archive X
{
payload : raw_data;
}
}
namespace n {
archive A
{
data : archive .n.X;
}
}
"""
_DATA_DOC = """"""
_OPTIONAL_DATA_SCHEMA = """namespace n {
archive X
{
payload : raw_data;
}
}
namespace n {
archive A
{
@optional
optional_data : archive .n.X;
}
}
"""
_OPTIONAL_DATA_DOC = """"""
_NAME = "A"
_RESOURCES = {
"A.archive" : flatdata.archive.ResourceSignature(
container=flatdata.resources.RawData,
initializer=None,
schema=_SCHEMA,
is_optional=False,
doc="Archive signature"),
"data": flatdata.archive.ResourceSignature(container=flatdata.archive.Archive,
initializer=n_X,
schema=_DATA_SCHEMA,
is_optional=False,
doc=_DATA_DOC),
"optional_data": flatdata.archive.ResourceSignature(container=flatdata.archive.Archive,
initializer=n_X,
schema=_OPTIONAL_DATA_SCHEMA,
is_optional=True,
doc=_OPTIONAL_DATA_DOC),
}
def __init__(self, resource_storage):
flatdata.archive.Archive.__init__(self, resource_storage)
class n_ABuilder(flatdata.archive_builder.ArchiveBuilder):
_SCHEMA = """namespace n {
archive X
{
payload : raw_data;
}
}
namespace n {
archive A
{
data : archive .n.X;
@optional
optional_data : archive .n.X;
}
}
"""
_DATA_SCHEMA = """namespace n {
archive X
{
payload : raw_data;
}
}
namespace n {
archive A
{
data : archive .n.X;
}
}
"""
_DATA_DOC = """"""
_OPTIONAL_DATA_SCHEMA = """namespace n {
archive X
{
payload : raw_data;
}
}
namespace n {
archive A
{
@optional
optional_data : archive .n.X;
}
}
"""
_OPTIONAL_DATA_DOC = """"""
_NAME = "A"
_RESOURCES = {
"A.archive" : flatdata.archive_builder.ResourceSignature(
container=flatdata.resources.RawData,
initializer=None,
schema=_SCHEMA,
is_optional=False,
doc="Archive signature"),
"data": flatdata.archive_builder.ResourceSignature(container=flatdata.archive.Archive,
initializer=n_X,
schema=_DATA_SCHEMA,
is_optional=False,
doc=_DATA_DOC),
"optional_data": flatdata.archive_builder.ResourceSignature(container=flatdata.archive.Archive,
initializer=n_X,
schema=_OPTIONAL_DATA_SCHEMA,
is_optional=True,
doc=_OPTIONAL_DATA_DOC),
}
def __init__(self, resource_storage):
flatdata.archive_builder.ArchiveBuilder.__init__(self, resource_storage)
|
tensorflow_constrained_optimization/python/rates/helpers_test.py
|
RMKruse/tensorflow_constrained_optimization
| 276 |
96449
|
# Copyright 2018 The TensorFlow Constrained Optimization Authors. All Rights
# Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for helpers.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_constrained_optimization.python.rates import helpers
# These tests use some placeholder Tensors, so we want to make sure that they
# execute in graph mode.
tf.compat.v1.disable_eager_execution()
class HelpersTest(tf.test.TestCase):
"""Tests for helper functions in helpers.py."""
def test_convert_to_1d_tensor(self):
"""Tests the "convert_to_1d_tensor" function."""
self.assertFalse(tf.executing_eagerly())
# Trying to make a rank-1 Tensor from a 0d Tensor should succeed.
expected = [2.7]
actual = helpers.convert_to_1d_tensor(2.7)
with self.session() as session:
self.assertAllClose(expected, session.run(actual), rtol=0, atol=1e-6)
# Trying to make a rank-1 Tensor from a rank-1 Tensor should succeed.
expected = [-6.3, 1.0, 5.1]
actual = helpers.convert_to_1d_tensor(expected)
with self.session() as session:
self.assertAllClose(expected, session.run(actual), rtol=0, atol=1e-6)
# Trying to make a rank-1 Tensor from a shape-(1,2,1) Tensor should succeed
# (only one of the dimensions is nontrivial).
expected = [3.6, -1.7]
actual = helpers.convert_to_1d_tensor([[[3.6], [-1.7]]])
with self.session() as session:
self.assertAllClose(expected, session.run(actual), rtol=0, atol=1e-6)
# Trying to make a rank-1 Tensor from a shape-(1,None,1) Tensor should
# succeed (only one of the dimensions is nontrivial).
expected = [0.2, -2.4, 0.0]
placeholder = tf.compat.v1.placeholder(tf.float32, shape=(1, None, 1))
actual = helpers.convert_to_1d_tensor(placeholder)
with self.session() as session:
self.assertAllClose(
expected,
session.run(
actual, feed_dict={placeholder: [[[0.2], [-2.4], [0.0]]]}),
rtol=0,
atol=1e-6)
# Trying to make a rank-1 Tensor from a rank-2 Tensor should fail.
with self.assertRaises(ValueError):
_ = helpers.convert_to_1d_tensor([[1, 2], [3, 4]])
# Trying to make a rank-1 Tensor from a shape-(None,2) Tensor should fail.
placeholder = tf.compat.v1.placeholder(tf.float32, shape=(None, 2))
with self.assertRaises(ValueError):
_ = helpers.convert_to_1d_tensor(placeholder)
def test_get_num_columns_of_2d_tensor(self):
"""Tests the "get_num_columns_of_2d_tensor" function."""
self.assertFalse(tf.executing_eagerly())
# Trying to get the number of columns from a non-tensor should fail.
with self.assertRaises(TypeError):
_ = helpers.get_num_columns_of_2d_tensor([[1, 2], [3, 4]])
# Trying to get the number of columns from a rank-1 tensor should fail.
tensor = tf.convert_to_tensor([1, 2, 3, 4])
with self.assertRaises(ValueError):
_ = helpers.get_num_columns_of_2d_tensor(tensor)
# Make sure that we successfully get the number of columns.
tensor = tf.convert_to_tensor([[1, 2, 3]])
self.assertEqual(3, helpers.get_num_columns_of_2d_tensor(tensor))
def test_get_num_elements_of_tensor(self):
"""Tests the "get_num_elements_of_tensor" function."""
self.assertFalse(tf.executing_eagerly())
# Trying to get the number of elements of a non-tensor should fail.
with self.assertRaises(TypeError):
_ = helpers.get_num_elements_of_tensor([[1, 2], [3, 4]])
# Trying to get the number of elements of a tensor with unknown shape should
# fail.
tensor = tf.compat.v1.placeholder(tf.float32, shape=None)
with self.assertRaises(ValueError):
_ = helpers.get_num_elements_of_tensor(tensor)
# Trying to get the number of elements of a tensor with partially-unknown
# shape should fail.
tensor = tf.compat.v1.placeholder(tf.float32, shape=(1, None, 1))
with self.assertRaises(ValueError):
_ = helpers.get_num_elements_of_tensor(tensor)
# Make sure that we successfully get the number of elements.
tensor = tf.convert_to_tensor([[1, 2, 3], [4, 5, 6]])
self.assertEqual(6, helpers.get_num_elements_of_tensor(tensor))
class UniqueListTest(tf.test.TestCase):
"""Tests for `UniqueList` classes."""
def test_construct(self):
"""Tests the `UniqueList` constructor."""
element1 = 1
element2 = 2
element3 = element1
element4 = 4
element5 = element4
element6 = 6
unique_list = helpers.UniqueList(
[element1, element2, element3, element4, element5, element6])
self.assertEqual(4, len(unique_list))
self.assertEqual([element1, element2, element4, element6], unique_list.list)
def test_append_raises(self):
"""Tests that "append" raises when given the wrong type."""
unique_list = helpers.UniqueList(element_type=list)
self.assertEqual(0, len(unique_list))
self.assertEqual([], unique_list.list)
with self.assertRaises(TypeError):
# Since we passed element_type=list to the UniqueList constructor,
# attempting to add any non-list should raise.
unique_list.append(42)
def test_add(self):
"""Tests `UniqueList`'s "__add__" method."""
element1 = 1
element2 = 2
element3 = element1
element4 = 4
element5 = element4
element6 = 6
lhs = [element1, element2]
rhs = [element3, element4, element5, element6]
unique_list = helpers.UniqueList(lhs)
self.assertEqual(2, len(unique_list))
self.assertEqual([element1, element2], unique_list.list)
unique_list += rhs
self.assertEqual(4, len(unique_list))
self.assertEqual([element1, element2, element4, element6], unique_list.list)
def test_radd(self):
"""Tests `UniqueList`'s "__radd__" method."""
element1 = 1
element2 = 2
element3 = element1
element4 = 4
element5 = element4
element6 = 6
lhs = [element1, element2]
rhs = [element3, element4, element5, element6]
unique_list = helpers.UniqueList(rhs)
self.assertEqual(3, len(unique_list))
self.assertEqual([element1, element4, element6], unique_list.list)
unique_list = lhs + unique_list
self.assertEqual(4, len(unique_list))
self.assertEqual([element1, element2, element4, element6], unique_list.list)
if __name__ == "__main__":
tf.test.main()
|
configs/_dynamic_/rules/ar50to101_ft1x_rules.py
|
zengming16/GAIA-det
| 149 |
96459
|
model_space_filename = 'path/to/metrics.json'
model_sampling_rules = dict(
type='sequential',
rules=[
# 1. select model with best performance, could replace with your own metrics
dict(
type='sample',
operation='top',
# replace with customized metric in your own tasks, e.g. `metric.finetune.bdd100k_bbox_mAP`
key='metric.finetune.coco_bbox_mAP',
value=1,
mode='number',
),
])
|
pyperformance/utils.py
|
cvautounix/pyperformance
| 266 |
96484
|
import contextlib
import errno
import os
import sys
import tempfile
MS_WINDOWS = (sys.platform == 'win32')
@contextlib.contextmanager
def temporary_file():
tmp_filename = tempfile.mktemp()
try:
yield tmp_filename
finally:
try:
os.unlink(tmp_filename)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
|
sqlite3_db/push_request.py
|
spirit1431007/qiandao-1
| 763 |
96514
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 Binux <<EMAIL>>
import config
from db.push_request import PRDB as _PRDB
from .basedb import BaseDB
class PRDB(_PRDB, BaseDB):
def __init__(self, path=config.sqlite3.path):
self.path = path
self._execute('''CREATE TABLE IF NOT EXISTS `%s` (
`id` INTEGER PRIMARY KEY,
`from_tplid` INT UNSIGNED NOT NULL,
`from_userid` INT UNSIGNED NOT NULL,
`to_tplid` INT UNSIGNED NULL,
`to_userid` INT UNSIGNED NULL,
`status` TINYINT NOT NULL DEFAULT 0,
`msg` VARCHAR(1024) NULL,
`ctime` INT UNSIGNED NOT NULL,
`mtime` INT UNSIGNED NOT NULL,
`atime` INT UNSIGNED NOT NULL
)''' % self.__tablename__)
for each in ('to_userid', 'status'):
self._execute('''CREATE INDEX IF NOT EXISTS `ix_%s_%s` ON %s (%s)''' % (
self.__tablename__, each, self.__tablename__, each))
|
matrix-python-project/cover_generator/typesetting/model/four.py
|
hokaso/hocassian-media-matrix
| 141 |
96541
|
<filename>matrix-python-project/cover_generator/typesetting/model/four.py
import sys, os, time, json, random
from PIL import Image, ImageDraw, ImageFont, ImageFilter
from cover_generator.typesetting.more import More
from cover_generator.typesetting.mark import Mark
from cover_generator.typesetting.build import Build
from utils.snow_id import SnowId
sys.path.append(os.getcwd())
class Four(object):
def __init__(self, folder_key):
self.image_list = None
self.rank_model = None
self.tb = None
with open("cover_generator/typesetting/style.json", 'r') as f0:
style_config = json.load(f0)
self.model = style_config["four"]
self.func_map = {
1: self.quadruple_vertical_build,
2: self.quadruple_horizontal_build,
3: self.chairs_build,
4: self.chairs_spin_build,
5: self.h2v2_build,
6: self.h2v2_spin_build,
7: self.windows_build,
8: self.windows_vertical_build,
9: self.windows_horizontal_build,
}
self._build = Build(folder_key, folder_key + "_temp")
def quadruple_vertical(self, image_list):
return More(image_list, self.model[0]["unit_detail"], "41").main()
def quadruple_horizontal(self, image_list):
return More(image_list, self.model[1]["unit_detail"], "42").main()
def chairs(self, image_list):
return More(image_list, self.model[2]["unit_detail"], "43").main()
def chairs_spin(self, image_list):
return More(image_list, self.model[3]["unit_detail"], "44").main()
def h2v2(self, image_list):
return More(image_list, self.model[4]["unit_detail"], "45").main()
def h2v2_spin(self, image_list):
return More(image_list, self.model[5]["unit_detail"], "46").main()
def windows(self, image_list):
return More(image_list, self.model[6]["unit_detail"], "47").main()
def windows_vertical(self, image_list):
return More(image_list, self.model[7]["unit_detail"], "48").main()
def windows_horizontal(self, image_list):
return More(image_list, self.model[8]["unit_detail"], "49").main()
def build(self, image_list, model):
self.tb = Image.open("cover_generator/background.jpg")
self.image_list = image_list
self.rank_model = model
self.func_map[int(model["model_id"][1])]()
def quadruple_vertical_build(self):
# 贴第一张图
image = self.image_list[self.rank_model["model_match"][0][1]]
model = self.model[0]["unit_detail"][0]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_1 = self._build.build_up(image["filename"], rate, area)
# 贴第二张图
image = self.image_list[self.rank_model["model_match"][1][1]]
model = self.model[0]["unit_detail"][1]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_2 = self._build.build_up(image["filename"], rate, area)
# 贴第三张图
image = self.image_list[self.rank_model["model_match"][2][1]]
model = self.model[0]["unit_detail"][2]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_3 = self._build.build_up(image["filename"], rate, area)
# 贴第四张图
image = self.image_list[self.rank_model["model_match"][3][1]]
model = self.model[0]["unit_detail"][3]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_4 = self._build.build_up(image["filename"], rate, area)
# 随机对相同宽高的图片进行shuffle
pic_list = [pic_1, pic_2, pic_3]
random.shuffle(pic_list)
# 结构也需要shuffle
kind = random.randint(0, 1)
# 保存
if kind == 0:
self.tb.paste(pic_list[0], (0, 0))
self.tb.paste(pic_list[1], (0, 480))
self.tb.paste(pic_list[2], (0, 960))
self.tb.paste(pic_4, (540, 0))
else:
self.tb.paste(pic_list[0], (540, 0))
self.tb.paste(pic_list[1], (540, 480))
self.tb.paste(pic_list[2], (540, 960))
self.tb.paste(pic_4, (0, 0))
self._build.save(self.tb)
def quadruple_horizontal_build(self):
# 贴第一张图
image = self.image_list[self.rank_model["model_match"][0][1]]
model = self.model[1]["unit_detail"][0]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_1 = self._build.build_up(image["filename"], rate, area)
# 贴第二张图
image = self.image_list[self.rank_model["model_match"][1][1]]
model = self.model[1]["unit_detail"][1]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_2 = self._build.build_up(image["filename"], rate, area)
# 贴第三张图
image = self.image_list[self.rank_model["model_match"][2][1]]
model = self.model[1]["unit_detail"][2]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_3 = self._build.build_up(image["filename"], rate, area)
# 贴第四张图
image = self.image_list[self.rank_model["model_match"][3][1]]
model = self.model[1]["unit_detail"][3]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_4 = self._build.build_up(image["filename"], rate, area)
# 随机对相同宽高的图片进行shuffle
pic_list = [pic_1, pic_2, pic_3]
random.shuffle(pic_list)
# 结构也需要shuffle
kind = random.randint(0, 1)
# {
# "width": 360,
# "height": 720
# },
# {
# "width": 360,
# "height": 720
# },
# {
# "width": 360,
# "height": 720
# },
# {
# "width": 1080,
# "height": 720
# }
# 保存
if kind == 0:
self.tb.paste(pic_list[0], (0, 0))
self.tb.paste(pic_list[1], (360, 0))
self.tb.paste(pic_list[2], (720, 0))
self.tb.paste(pic_4, (0, 720))
else:
self.tb.paste(pic_list[0], (0, 720))
self.tb.paste(pic_list[1], (360, 720))
self.tb.paste(pic_list[2], (720, 720))
self.tb.paste(pic_4, (0, 0))
self._build.save(self.tb)
def chairs_build(self):
# 贴第一张图
image = self.image_list[self.rank_model["model_match"][0][1]]
model = self.model[2]["unit_detail"][0]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_1 = self._build.build_up(image["filename"], rate, area)
# 贴第二张图
image = self.image_list[self.rank_model["model_match"][1][1]]
model = self.model[2]["unit_detail"][1]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_2 = self._build.build_up(image["filename"], rate, area)
# 贴第三张图
image = self.image_list[self.rank_model["model_match"][2][1]]
model = self.model[2]["unit_detail"][2]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_3 = self._build.build_up(image["filename"], rate, area)
# 贴第四张图
image = self.image_list[self.rank_model["model_match"][3][1]]
model = self.model[2]["unit_detail"][3]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_4 = self._build.build_up(image["filename"], rate, area)
# 随机对相同宽高的图片进行shuffle
pic_list = [pic_2, pic_3]
random.shuffle(pic_list)
# 结构也需要shuffle
kind = random.randint(0, 3)
# {
# "width": 720,
# "height": 720
# },
# {
# "width": 360,
# "height": 720
# },
# {
# "width": 360,
# "height": 720
# },
# {
# "width": 360,
# "height": 1440
# }
# 保存
if kind == 0:
self.tb.paste(pic_1, (0, 0))
self.tb.paste(pic_list[1], (0, 720))
self.tb.paste(pic_list[0], (360, 720))
self.tb.paste(pic_4, (720, 0))
elif kind == 1:
self.tb.paste(pic_1, (360, 0))
self.tb.paste(pic_list[1], (360, 720))
self.tb.paste(pic_list[0], (720, 720))
self.tb.paste(pic_4, (0, 0))
elif kind == 2:
self.tb.paste(pic_1, (0, 720))
self.tb.paste(pic_list[1], (0, 0))
self.tb.paste(pic_list[0], (360, 0))
self.tb.paste(pic_4, (720, 0))
else:
self.tb.paste(pic_1, (360, 720))
self.tb.paste(pic_list[1], (360, 0))
self.tb.paste(pic_list[0], (720, 0))
self.tb.paste(pic_4, (0, 0))
self._build.save(self.tb)
def chairs_spin_build(self):
# 贴第一张图
image = self.image_list[self.rank_model["model_match"][0][1]]
model = self.model[3]["unit_detail"][0]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_1 = self._build.build_up(image["filename"], rate, area)
# 贴第二张图
image = self.image_list[self.rank_model["model_match"][1][1]]
model = self.model[3]["unit_detail"][1]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_2 = self._build.build_up(image["filename"], rate, area)
# 贴第三张图
image = self.image_list[self.rank_model["model_match"][2][1]]
model = self.model[3]["unit_detail"][2]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_3 = self._build.build_up(image["filename"], rate, area)
# 贴第四张图
image = self.image_list[self.rank_model["model_match"][3][1]]
model = self.model[3]["unit_detail"][3]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_4 = self._build.build_up(image["filename"], rate, area)
# 随机对相同宽高的图片进行shuffle
pic_list = [pic_3, pic_4]
random.shuffle(pic_list)
# 结构也需要shuffle
kind = random.randint(0, 3)
# 保存
# {
# "width": 1080,
# "height": 480
# },
# {
# "width": 540,
# "height": 960
# },
# {
# "width": 540,
# "height": 480
# },
# {
# "width": 540,
# "height": 480
# }
if kind == 0:
self.tb.paste(pic_1, (0, 0))
self.tb.paste(pic_2, (0, 480))
self.tb.paste(pic_list[1], (540, 480))
self.tb.paste(pic_list[0], (540, 960))
elif kind == 1:
self.tb.paste(pic_1, (0, 0))
self.tb.paste(pic_2, (540, 480))
self.tb.paste(pic_list[1], (0, 480))
self.tb.paste(pic_list[0], (0, 960))
elif kind == 2:
self.tb.paste(pic_1, (0, 960))
self.tb.paste(pic_2, (0, 0))
self.tb.paste(pic_list[1], (540, 0))
self.tb.paste(pic_list[0], (540, 480))
else:
self.tb.paste(pic_1, (0, 960))
self.tb.paste(pic_2, (540, 0))
self.tb.paste(pic_list[1], (0, 480))
self.tb.paste(pic_list[0], (0, 0))
self._build.save(self.tb)
def h2v2_build(self):
# 贴第一张图
image = self.image_list[self.rank_model["model_match"][0][1]]
model = self.model[4]["unit_detail"][0]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_1 = self._build.build_up(image["filename"], rate, area)
# 贴第二张图
image = self.image_list[self.rank_model["model_match"][1][1]]
model = self.model[4]["unit_detail"][1]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_2 = self._build.build_up(image["filename"], rate, area)
# 贴第三张图
image = self.image_list[self.rank_model["model_match"][2][1]]
model = self.model[4]["unit_detail"][2]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_3 = self._build.build_up(image["filename"], rate, area)
# 贴第四张图
image = self.image_list[self.rank_model["model_match"][3][1]]
model = self.model[4]["unit_detail"][3]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_4 = self._build.build_up(image["filename"], rate, area)
# 随机对相同宽高的图片进行shuffle
pic_list_1 = [pic_1, pic_2]
random.shuffle(pic_list_1)
pic_list_2 = [pic_3, pic_4]
random.shuffle(pic_list_2)
# 结构也需要shuffle,此处三种结构
kind = random.randint(0, 2)
# 保存
if kind == 0:
self.tb.paste(pic_list_1[0], (0, 0))
self.tb.paste(pic_list_1[1], (0, 720))
self.tb.paste(pic_list_2[0], (360, 0))
self.tb.paste(pic_list_2[1], (720, 0))
elif kind == 1:
self.tb.paste(pic_list_1[0], (720, 0))
self.tb.paste(pic_list_1[1], (720, 720))
self.tb.paste(pic_list_2[0], (0, 0))
self.tb.paste(pic_list_2[1], (360, 0))
else:
self.tb.paste(pic_list_1[0], (360, 0))
self.tb.paste(pic_list_1[1], (360, 720))
self.tb.paste(pic_list_2[0], (0, 0))
self.tb.paste(pic_list_2[1], (720, 0))
self._build.save(self.tb)
def h2v2_spin_build(self):
# 贴第一张图
image = self.image_list[self.rank_model["model_match"][0][1]]
model = self.model[5]["unit_detail"][0]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_1 = self._build.build_up(image["filename"], rate, area)
# 贴第二张图
image = self.image_list[self.rank_model["model_match"][1][1]]
model = self.model[5]["unit_detail"][1]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_2 = self._build.build_up(image["filename"], rate, area)
# 贴第三张图
image = self.image_list[self.rank_model["model_match"][2][1]]
model = self.model[5]["unit_detail"][2]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_3 = self._build.build_up(image["filename"], rate, area)
# 贴第四张图
image = self.image_list[self.rank_model["model_match"][3][1]]
model = self.model[5]["unit_detail"][3]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_4 = self._build.build_up(image["filename"], rate, area)
# 随机对相同宽高的图片进行shuffle
pic_list_1 = [pic_1, pic_2]
random.shuffle(pic_list_1)
pic_list_2 = [pic_3, pic_4]
random.shuffle(pic_list_2)
# 结构也需要shuffle,此处三种结构
kind = random.randint(0, 2)
# 保存
# {
# "width": 1080,
# "height": 480
# },
# {
# "width": 1080,
# "height": 480
# },
# {
# "width": 540,
# "height": 480
# },
# {
# "width": 540,
# "height": 480
# }
if kind == 0:
self.tb.paste(pic_list_1[0], (0, 0))
self.tb.paste(pic_list_1[1], (0, 480))
self.tb.paste(pic_list_2[0], (0, 960))
self.tb.paste(pic_list_2[1], (540, 960))
elif kind == 1:
self.tb.paste(pic_list_1[0], (0, 480))
self.tb.paste(pic_list_1[1], (0, 960))
self.tb.paste(pic_list_2[0], (0, 0))
self.tb.paste(pic_list_2[1], (540, 0))
else:
self.tb.paste(pic_list_1[0], (0, 0))
self.tb.paste(pic_list_1[1], (0, 960))
self.tb.paste(pic_list_2[0], (0, 480))
self.tb.paste(pic_list_2[1], (540, 480))
self._build.save(self.tb)
def windows_build(self):
# 贴第一张图
image = self.image_list[self.rank_model["model_match"][0][1]]
model = self.model[6]["unit_detail"][0]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_1 = self._build.build_up(image["filename"], rate, area)
# 贴第二张图
image = self.image_list[self.rank_model["model_match"][1][1]]
model = self.model[6]["unit_detail"][1]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_2 = self._build.build_up(image["filename"], rate, area)
# 贴第三张图
image = self.image_list[self.rank_model["model_match"][2][1]]
model = self.model[6]["unit_detail"][2]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_3 = self._build.build_up(image["filename"], rate, area)
# 贴第四张图
image = self.image_list[self.rank_model["model_match"][3][1]]
model = self.model[6]["unit_detail"][3]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_4 = self._build.build_up(image["filename"], rate, area)
# 随机对相同宽高的图片进行shuffle
pic_list = [pic_1, pic_2, pic_3, pic_4]
random.shuffle(pic_list)
self.tb.paste(pic_list[0], (0, 0))
self.tb.paste(pic_list[1], (540, 0))
self.tb.paste(pic_list[2], (0, 720))
self.tb.paste(pic_list[3], (540, 720))
self._build.save(self.tb)
def windows_vertical_build(self):
# 贴第一张图
image = self.image_list[self.rank_model["model_match"][0][1]]
model = self.model[7]["unit_detail"][0]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_1 = self._build.build_up(image["filename"], rate, area)
# 贴第二张图
image = self.image_list[self.rank_model["model_match"][1][1]]
model = self.model[7]["unit_detail"][1]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_2 = self._build.build_up(image["filename"], rate, area)
# 贴第三张图
image = self.image_list[self.rank_model["model_match"][2][1]]
model = self.model[7]["unit_detail"][2]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_3 = self._build.build_up(image["filename"], rate, area)
# 贴第四张图
image = self.image_list[self.rank_model["model_match"][3][1]]
model = self.model[7]["unit_detail"][3]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_4 = self._build.build_up(image["filename"], rate, area)
# 随机对相同宽高的图片进行shuffle
pic_list_1 = [pic_1, pic_2]
random.shuffle(pic_list_1)
pic_list_2 = [pic_3, pic_4]
random.shuffle(pic_list_2)
# 结构也需要shuffle,此处2种结构
kind = random.randint(0, 1)
# 保存
if kind == 0:
self.tb.paste(pic_list_1[0], (0, 0))
self.tb.paste(pic_list_1[1], (360, 720))
self.tb.paste(pic_list_2[0], (720, 0))
self.tb.paste(pic_list_2[1], (0, 720))
else:
self.tb.paste(pic_list_1[0], (360, 0))
self.tb.paste(pic_list_1[1], (0, 720))
self.tb.paste(pic_list_2[0], (0, 0))
self.tb.paste(pic_list_2[1], (720, 720))
self._build.save(self.tb)
def windows_horizontal_build(self):
# 贴第一张图
image = self.image_list[self.rank_model["model_match"][0][1]]
model = self.model[8]["unit_detail"][0]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_1 = self._build.build_up(image["filename"], rate, area)
# 贴第二张图
image = self.image_list[self.rank_model["model_match"][1][1]]
model = self.model[8]["unit_detail"][1]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_2 = self._build.build_up(image["filename"], rate, area)
# 贴第三张图
image = self.image_list[self.rank_model["model_match"][2][1]]
model = self.model[8]["unit_detail"][2]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_3 = self._build.build_up(image["filename"], rate, area)
# 贴第四张图
image = self.image_list[self.rank_model["model_match"][3][1]]
model = self.model[8]["unit_detail"][3]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_4 = self._build.build_up(image["filename"], rate, area)
# 随机对相同宽高的图片进行shuffle
pic_list_1 = [pic_1, pic_2]
random.shuffle(pic_list_1)
pic_list_2 = [pic_3, pic_4]
random.shuffle(pic_list_2)
# 结构也需要shuffle,此处2种结构
kind = random.randint(0, 1)
# 保存
if kind == 0:
self.tb.paste(pic_list_1[0], (0, 0))
self.tb.paste(pic_list_1[1], (540, 1080))
self.tb.paste(pic_list_2[0], (540, 0))
self.tb.paste(pic_list_2[1], (0, 360))
else:
self.tb.paste(pic_list_1[0], (540, 0))
self.tb.paste(pic_list_1[1], (0, 1080))
self.tb.paste(pic_list_2[0], (0, 0))
self.tb.paste(pic_list_2[1], (540, 360))
self._build.save(self.tb)
|
src/seabreeze/pyseabreeze/features/ethernetconfiguration.py
|
TeamPiccolo/python-seabreeze
| 159 |
96561
|
<filename>src/seabreeze/pyseabreeze/features/ethernetconfiguration.py
from seabreeze.pyseabreeze.features._base import SeaBreezeFeature
# Definition
# ==========
#
# TODO: This feature needs to be implemented for pyseabreeze
#
class SeaBreezeEthernetConfigurationFeature(SeaBreezeFeature):
identifier = "ethernet_configuration"
def get_mac_address(self, interface_index):
raise NotImplementedError("implement in derived class")
def set_mac_address(self, interface_index, mac_address):
raise NotImplementedError("implement in derived class")
def get_gbe_enable_status(self, interface_index):
raise NotImplementedError("implement in derived class")
def set_gbe_enable_status(self, interface_index, enable_state):
raise NotImplementedError("implement in derived class")
|
selfdrive/car/mock/values.py
|
Neptos/openpilot
| 37,508 |
96574
|
<reponame>Neptos/openpilot
class CAR:
MOCK = 'mock'
|
python/example_code/dynamodb/batching/test/test_dynamo_batching.py
|
iconara/aws-doc-sdk-examples
| 5,166 |
96608
|
<gh_stars>1000+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Unit tests for Amazon DynamoDB batching code example.
"""
import time
import unittest.mock
from botocore.exceptions import ClientError
import pytest
import dynamo_batching
@pytest.mark.parametrize('error_code,stop_on_method', [
(None, None),
('TestException', 'stub_create_table')])
def test_create_table(
make_stubber, make_unique_name, stub_runner, error_code, stop_on_method):
dyn_stubber = make_stubber(dynamo_batching.dynamodb.meta.client)
table_name = make_unique_name('table-')
schema = [
{'name': 'hash_item', 'type': 'N', 'key_type': 'HASH'},
{'name': 'range_item', 'type': 'S', 'key_type': 'RANGE'}]
with stub_runner(error_code, stop_on_method) as runner:
runner.add(
dyn_stubber.stub_create_table, table_name, schema,
{'read': 10, 'write': 10})
runner.add(dyn_stubber.stub_describe_table, table_name)
if error_code is None:
got_table = dynamo_batching.create_table(table_name, schema)
assert got_table.name == table_name
else:
with pytest.raises(ClientError) as exc_info:
dynamo_batching.create_table(table_name, schema)
assert exc_info.value.response['Error']['Code'] == error_code
def test_do_batch_get(make_stubber, monkeypatch):
dyn_stubber = make_stubber(dynamo_batching.dynamodb.meta.client)
item_count = 5
request_keys = {
'test-table1': {
'Keys': [{'test': {'S': f'test-{index}'}} for index in range(item_count)]},
'test-table2': {
'Keys': [{'test': {'S': f'test-{index}'}} for index in range(item_count)]}
}
response_items = {
'test-table1':
[{'test': {'S': f'test-{index}' for index in range(item_count)}}],
'test-table2':
[{'test': {'S': f'test-{index}' for index in range(item_count)}}],
}
monkeypatch.setattr(time, 'sleep', lambda x: None)
dyn_stubber.stub_batch_get_item(request_keys, unprocessed_keys=request_keys)
dyn_stubber.stub_batch_get_item(request_keys, response_items=response_items)
got_data = dynamo_batching.do_batch_get(request_keys)
for key in request_keys:
assert got_data[key] == response_items[key]
@pytest.mark.parametrize(
'item_count,error_code',
[(0, None),
(10, None),
(25, None),
(100, None),
(13, 'TestException')])
def test_fill_table(make_stubber, item_count, error_code):
dyn_stubber = make_stubber(dynamo_batching.dynamodb.meta.client)
table = dynamo_batching.dynamodb.Table('test-table')
table_data = [{'test': f'test-{index}'} for index in range(item_count)]
max_batch_size = 25 # Amazon DynamoDB limit
data_index = 0
while data_index < item_count:
dyn_stubber.stub_batch_write_item({
table.name: [{
'PutRequest': {'Item': item}}
for item in table_data[data_index:data_index+max_batch_size]]
}, error_code=error_code)
data_index += max_batch_size
if error_code is None:
dynamo_batching.fill_table(table, table_data)
else:
with pytest.raises(ClientError) as exc_info:
dynamo_batching.fill_table(table, table_data)
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize(
'item_count,error_code',
[(10, None),
(500, None),
(dynamo_batching.MAX_GET_SIZE, None),
(13, 'TestException')])
def test_get_batch_data(monkeypatch, item_count, error_code):
movie_table = unittest.mock.MagicMock()
movie_table.name = 'movie-test'
movie_list = [(index, f'title-{index}') for index in range(item_count)]
actor_table = unittest.mock.MagicMock()
actor_table.name = 'actor-test'
actor_list = [f'actor-{index}' for index in range(item_count)]
test_data = {movie_table.name: movie_list, actor_table.name: actor_list}
def mock_do_batch_get(batch_keys):
if error_code is not None:
raise ClientError({'Error': {'Code': error_code}}, 'test_op')
assert len(batch_keys[movie_table.name]['Keys']) == len(movie_list)
assert len(batch_keys[actor_table.name]['Keys']) == len(actor_list)
return test_data
monkeypatch.setattr(dynamo_batching, 'do_batch_get', mock_do_batch_get)
if error_code is None:
got_data = dynamo_batching.get_batch_data(
movie_table, movie_list, actor_table, actor_list)
assert got_data == test_data
else:
with pytest.raises(ClientError) as exc_info:
dynamo_batching.get_batch_data(
movie_table, movie_list, actor_table, actor_list)
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('item_count,error_code,stop_on_method', [
(20, None, None),
(10, 'TestException', 'stub_create_table'),
(10, 'TestException', 'stub_batch_write_item'),
])
def test_archive_movies(
make_stubber, stub_runner, item_count, error_code, stop_on_method):
dyn_stubber = make_stubber(dynamo_batching.dynamodb.meta.client)
movie_table = dynamo_batching.dynamodb.Table('movie-test')
movie_list = [
{'year': index, 'title': f'title-{index}'} for index in range(item_count)]
table_schema = [
{'name': 'year', 'type': 'N', 'key_type': 'HASH'},
{'name': 'title', 'type': 'S', 'key_type': 'RANGE'}]
archive_table_name = f'{movie_table.name}-archive'
with stub_runner(error_code, stop_on_method) as runner:
runner.add(
dyn_stubber.stub_describe_table, movie_table.name, schema=table_schema,
provisioned_throughput={'ReadCapacityUnits': 10, 'WriteCapacityUnits': 10})
runner.add(
dyn_stubber.stub_create_table, archive_table_name, table_schema,
{'read': 10, 'write': 10})
runner.add(dyn_stubber.stub_describe_table, archive_table_name)
runner.add(
dyn_stubber.stub_batch_write_item, {
archive_table_name: [{
'PutRequest': {'Item': item}} for item in movie_list]},
error_code='ValidationException')
runner.add(
dyn_stubber.stub_batch_write_item, {
archive_table_name: [{
'PutRequest': {'Item': item}} for item in movie_list]})
runner.add(
dyn_stubber.stub_batch_write_item, {
movie_table.name: [{
'DeleteRequest': {'Key': item}} for item in movie_list]})
if error_code is None:
got_table = dynamo_batching.archive_movies(movie_table, movie_list)
assert got_table.name == archive_table_name
else:
with pytest.raises(ClientError) as exc_info:
dynamo_batching.archive_movies(movie_table, movie_list)
assert exc_info.value.response['Error']['Code'] == error_code
|
uq360/algorithms/classification_calibration/__init__.py
|
Sclare87/UQ360
| 148 |
96615
|
<filename>uq360/algorithms/classification_calibration/__init__.py
from .classification_calibration import ClassificationCalibration
|
model/pose/loss/mse_loss.py
|
qrsforever/torchcv
| 171 |
96621
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: <NAME>(<EMAIL>)
# Loss function for Pose Estimation.
import torch.nn as nn
class MseLoss(nn.Module):
def __init__(self, configer):
super(MseLoss, self).__init__()
self.configer = configer
self.reduction = self.configer.get('loss.params.mse_loss.reduction', default='mean')
self.mse_loss = nn.MSELoss(reduction=self.reduction)
def forward(self, pred, target):
loss = self.mse_loss(pred, target)
return loss / pred.size(0) if self.reduction == 'sum' else loss
|
doubles/targets/expectation_target.py
|
fakeNetflix/uber-repo-doubles
| 150 |
96628
|
import inspect
from doubles.class_double import ClassDouble
from doubles.exceptions import ConstructorDoubleError
from doubles.lifecycle import current_space
def expect(target):
"""
Prepares a target object for a method call expectation (mock). The name of the method to expect
should be called as a method on the return value of this function::
expect(foo).bar
Accessing the ``bar`` attribute will return an ``Expectation`` which provides additional methods
to configure the mock.
:param object target: The object that will be mocked.
:return: An ``ExpectationTarget`` for the target object.
"""
return ExpectationTarget(target)
def expect_constructor(target):
"""
Set an expectation on a ``ClassDouble`` constructor
:param ClassDouble target: The ClassDouble to set the expectation on.
:return: an ``Expectation`` for the __new__ method.
:raise: ``ConstructorDoubleError`` if target is not a ClassDouble.
"""
if not isinstance(target, ClassDouble):
raise ConstructorDoubleError(
'Cannot allow_constructor of {} since it is not a ClassDouble.'.format(target),
)
return expect(target)._doubles__new__
class ExpectationTarget(object):
"""A wrapper around a target object that creates new expectations on attribute access."""
def __init__(self, target):
"""
:param object target: The object to wrap.
"""
self._proxy = current_space().proxy_for(target)
def __getattribute__(self, attr_name):
"""
Returns the value of existing attributes, and returns a new expectation for any attribute
that doesn't yet exist.
:param str attr_name: The name of the attribute to look up.
:return: The existing value or a new ``Expectation``.
:rtype: object, Expectation
"""
__dict__ = object.__getattribute__(self, '__dict__')
if __dict__ and attr_name in __dict__:
return __dict__[attr_name]
caller = inspect.getframeinfo(inspect.currentframe().f_back)
return self._proxy.add_expectation(attr_name, caller)
|
examples/mcscf/02-cas_space_spin.py
|
pablomazo/pyscf.github.io
| 501 |
96672
|
<filename>examples/mcscf/02-cas_space_spin.py
#!/usr/bin/env python
'''
spin settings in active space
The mol.spin attribute controls the Sz value of the molecule. CASCI/CASSCF
methods by default use this parameter to determine Sz of the correlated
wave-function in active space (i.e. the number of alpha and beta electrons).
The number of alpha and beta electrons can be set independently in the mcscf
methods.
'''
import pyscf
mol = pyscf.M(
atom = 'C 0 0 0; O 0 0 1.2',
basis = 'ccpvdz',
spin = 0)
myhf = mol.RHF().run()
# 6 orbitals, 6 electrons. 3 alpha electrons and 3 beta electrons will be
# assigned to the active space due to mol.spin = 0
# This setting tends to converge to the singlet state.
mycas = myhf.CASSCF(6, 6).run()
# 6 orbitals, 4 alpha electrons, 2 beta electrons.
# This setting tends to converge to the triplet state
mycas = myhf.CASSCF(6, (4, 2)).run()
# 6 orbitals, 3 alpha electrons, 3 beta electrons, but solving the quintet
# state. See also example 18-spatial_spin_symmetry.py
mycas = myhf.CASSCF(6, (3, 3))
mycas.fix_spin_(ss=6)
mycas.run()
|
supersuit/vector/utils/space_wrapper.py
|
PettingZoo-Team/SuperSu
| 237 |
96687
|
import gym
import numpy as np
class SpaceWrapper:
def __init__(self, space):
if isinstance(space, gym.spaces.Discrete):
self.shape = ()
self.dtype = np.dtype(np.int64)
elif isinstance(space, gym.spaces.Box):
self.shape = space.shape
self.dtype = np.dtype(space.dtype)
else:
assert False, "ProcVectorEnv only support Box and Discrete types"
|
utils.py
|
trannguyen1510/LearningToCountEverything
| 129 |
96692
|
import numpy as np
import torch.nn.functional as F
import math
from torchvision import transforms
import torch
import cv2
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
matplotlib.use('agg')
MAPS = ['map3','map4']
Scales = [0.9, 1.1]
MIN_HW = 384
MAX_HW = 1584
IM_NORM_MEAN = [0.485, 0.456, 0.406]
IM_NORM_STD = [0.229, 0.224, 0.225]
def select_exemplar_rois(image):
all_rois = []
print("Press 'q' or Esc to quit. Press 'n' and then use mouse drag to draw a new examplar, 'space' to save.")
while True:
key = cv2.waitKey(1) & 0xFF
if key == 27 or key == ord('q'):
break
elif key == ord('n') or key == '\r':
rect = cv2.selectROI("image", image, False, False)
x1 = rect[0]
y1 = rect[1]
x2 = x1 + rect[2] - 1
y2 = y1 + rect[3] - 1
all_rois.append([y1, x1, y2, x2])
for rect in all_rois:
y1, x1, y2, x2 = rect
cv2.rectangle(image, (x1, y1), (x2, y2), (255, 0, 0), 2)
print("Press q or Esc to quit. Press 'n' and then use mouse drag to draw a new examplar")
return all_rois
def matlab_style_gauss2D(shape=(3,3),sigma=0.5):
"""
2D gaussian mask - should give the same result as MATLAB's
fspecial('gaussian',[shape],[sigma])
"""
m,n = [(ss-1.)/2. for ss in shape]
y,x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )
h[ h < np.finfo(h.dtype).eps*h.max() ] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
return h
def PerturbationLoss(output,boxes,sigma=8, use_gpu=True):
Loss = 0.
if boxes.shape[1] > 1:
boxes = boxes.squeeze()
for tempBoxes in boxes.squeeze():
y1 = int(tempBoxes[1])
y2 = int(tempBoxes[3])
x1 = int(tempBoxes[2])
x2 = int(tempBoxes[4])
out = output[:,:,y1:y2,x1:x2]
GaussKernel = matlab_style_gauss2D(shape=(out.shape[2],out.shape[3]),sigma=sigma)
GaussKernel = torch.from_numpy(GaussKernel).float()
if use_gpu: GaussKernel = GaussKernel.cuda()
Loss += F.mse_loss(out.squeeze(),GaussKernel)
else:
boxes = boxes.squeeze()
y1 = int(boxes[1])
y2 = int(boxes[3])
x1 = int(boxes[2])
x2 = int(boxes[4])
out = output[:,:,y1:y2,x1:x2]
Gauss = matlab_style_gauss2D(shape=(out.shape[2],out.shape[3]),sigma=sigma)
GaussKernel = torch.from_numpy(Gauss).float()
if use_gpu: GaussKernel = GaussKernel.cuda()
Loss += F.mse_loss(out.squeeze(),GaussKernel)
return Loss
def MincountLoss(output,boxes, use_gpu=True):
ones = torch.ones(1)
if use_gpu: ones = ones.cuda()
Loss = 0.
if boxes.shape[1] > 1:
boxes = boxes.squeeze()
for tempBoxes in boxes.squeeze():
y1 = int(tempBoxes[1])
y2 = int(tempBoxes[3])
x1 = int(tempBoxes[2])
x2 = int(tempBoxes[4])
X = output[:,:,y1:y2,x1:x2].sum()
if X.item() <= 1:
Loss += F.mse_loss(X,ones)
else:
boxes = boxes.squeeze()
y1 = int(boxes[1])
y2 = int(boxes[3])
x1 = int(boxes[2])
x2 = int(boxes[4])
X = output[:,:,y1:y2,x1:x2].sum()
if X.item() <= 1:
Loss += F.mse_loss(X,ones)
return Loss
def pad_to_size(feat, desire_h, desire_w):
""" zero-padding a four dim feature matrix: N*C*H*W so that the new Height and Width are the desired ones
desire_h and desire_w should be largers than the current height and weight
"""
cur_h = feat.shape[-2]
cur_w = feat.shape[-1]
left_pad = (desire_w - cur_w + 1) // 2
right_pad = (desire_w - cur_w) - left_pad
top_pad = (desire_h - cur_h + 1) // 2
bottom_pad =(desire_h - cur_h) - top_pad
return F.pad(feat, (left_pad, right_pad, top_pad, bottom_pad))
def extract_features(feature_model, image, boxes,feat_map_keys=['map3','map4'], exemplar_scales=[0.9, 1.1]):
N, M = image.shape[0], boxes.shape[2]
"""
Getting features for the image N * C * H * W
"""
Image_features = feature_model(image)
"""
Getting features for the examples (N*M) * C * h * w
"""
for ix in range(0,N):
# boxes = boxes.squeeze(0)
boxes = boxes[ix][0]
cnter = 0
Cnter1 = 0
for keys in feat_map_keys:
image_features = Image_features[keys][ix].unsqueeze(0)
if keys == 'map1' or keys == 'map2':
Scaling = 4.0
elif keys == 'map3':
Scaling = 8.0
elif keys == 'map4':
Scaling = 16.0
else:
Scaling = 32.0
boxes_scaled = boxes / Scaling
boxes_scaled[:, 1:3] = torch.floor(boxes_scaled[:, 1:3])
boxes_scaled[:, 3:5] = torch.ceil(boxes_scaled[:, 3:5])
boxes_scaled[:, 3:5] = boxes_scaled[:, 3:5] + 1 # make the end indices exclusive
feat_h, feat_w = image_features.shape[-2], image_features.shape[-1]
# make sure exemplars don't go out of bound
boxes_scaled[:, 1:3] = torch.clamp_min(boxes_scaled[:, 1:3], 0)
boxes_scaled[:, 3] = torch.clamp_max(boxes_scaled[:, 3], feat_h)
boxes_scaled[:, 4] = torch.clamp_max(boxes_scaled[:, 4], feat_w)
box_hs = boxes_scaled[:, 3] - boxes_scaled[:, 1]
box_ws = boxes_scaled[:, 4] - boxes_scaled[:, 2]
max_h = math.ceil(max(box_hs))
max_w = math.ceil(max(box_ws))
for j in range(0,M):
y1, x1 = int(boxes_scaled[j,1]), int(boxes_scaled[j,2])
y2, x2 = int(boxes_scaled[j,3]), int(boxes_scaled[j,4])
#print(y1,y2,x1,x2,max_h,max_w)
if j == 0:
examples_features = image_features[:,:,y1:y2, x1:x2]
if examples_features.shape[2] != max_h or examples_features.shape[3] != max_w:
#examples_features = pad_to_size(examples_features, max_h, max_w)
examples_features = F.interpolate(examples_features, size=(max_h,max_w),mode='bilinear')
else:
feat = image_features[:,:,y1:y2, x1:x2]
if feat.shape[2] != max_h or feat.shape[3] != max_w:
feat = F.interpolate(feat, size=(max_h,max_w),mode='bilinear')
#feat = pad_to_size(feat, max_h, max_w)
examples_features = torch.cat((examples_features,feat),dim=0)
"""
Convolving example features over image features
"""
h, w = examples_features.shape[2], examples_features.shape[3]
features = F.conv2d(
F.pad(image_features, ((int(w/2)), int((w-1)/2), int(h/2), int((h-1)/2))),
examples_features
)
combined = features.permute([1,0,2,3])
# computing features for scales 0.9 and 1.1
for scale in exemplar_scales:
h1 = math.ceil(h * scale)
w1 = math.ceil(w * scale)
if h1 < 1: # use original size if scaled size is too small
h1 = h
if w1 < 1:
w1 = w
examples_features_scaled = F.interpolate(examples_features, size=(h1,w1),mode='bilinear')
features_scaled = F.conv2d(F.pad(image_features, ((int(w1/2)), int((w1-1)/2), int(h1/2), int((h1-1)/2))),
examples_features_scaled)
features_scaled = features_scaled.permute([1,0,2,3])
combined = torch.cat((combined,features_scaled),dim=1)
if cnter == 0:
Combined = 1.0 * combined
else:
if Combined.shape[2] != combined.shape[2] or Combined.shape[3] != combined.shape[3]:
combined = F.interpolate(combined, size=(Combined.shape[2],Combined.shape[3]),mode='bilinear')
Combined = torch.cat((Combined,combined),dim=1)
cnter += 1
if ix == 0:
All_feat = 1.0 * Combined.unsqueeze(0)
else:
All_feat = torch.cat((All_feat,Combined.unsqueeze(0)),dim=0)
return All_feat
class resizeImage(object):
"""
If either the width or height of an image exceed a specified value, resize the image so that:
1. The maximum of the new height and new width does not exceed a specified value
2. The new height and new width are divisible by 8
3. The aspect ratio is preserved
No resizing is done if both height and width are smaller than the specified value
By: <NAME> (<EMAIL>)
"""
def __init__(self, MAX_HW=1504):
self.max_hw = MAX_HW
def __call__(self, sample):
image,lines_boxes = sample['image'], sample['lines_boxes']
W, H = image.size
if W > self.max_hw or H > self.max_hw:
scale_factor = float(self.max_hw)/ max(H, W)
new_H = 8*int(H*scale_factor/8)
new_W = 8*int(W*scale_factor/8)
resized_image = transforms.Resize((new_H, new_W))(image)
else:
scale_factor = 1
resized_image = image
boxes = list()
for box in lines_boxes:
box2 = [int(k*scale_factor) for k in box]
y1, x1, y2, x2 = box2[0], box2[1], box2[2], box2[3]
boxes.append([0, y1,x1,y2,x2])
boxes = torch.Tensor(boxes).unsqueeze(0)
resized_image = Normalize(resized_image)
sample = {'image':resized_image,'boxes':boxes}
return sample
class resizeImageWithGT(object):
"""
If either the width or height of an image exceed a specified value, resize the image so that:
1. The maximum of the new height and new width does not exceed a specified value
2. The new height and new width are divisible by 8
3. The aspect ratio is preserved
No resizing is done if both height and width are smaller than the specified value
By: <NAME> (<EMAIL>)
Modified by: Viresh
"""
def __init__(self, MAX_HW=1504):
self.max_hw = MAX_HW
def __call__(self, sample):
image,lines_boxes,density = sample['image'], sample['lines_boxes'],sample['gt_density']
W, H = image.size
if W > self.max_hw or H > self.max_hw:
scale_factor = float(self.max_hw)/ max(H, W)
new_H = 8*int(H*scale_factor/8)
new_W = 8*int(W*scale_factor/8)
resized_image = transforms.Resize((new_H, new_W))(image)
resized_density = cv2.resize(density, (new_W, new_H))
orig_count = np.sum(density)
new_count = np.sum(resized_density)
if new_count > 0: resized_density = resized_density * (orig_count / new_count)
else:
scale_factor = 1
resized_image = image
resized_density = density
boxes = list()
for box in lines_boxes:
box2 = [int(k*scale_factor) for k in box]
y1, x1, y2, x2 = box2[0], box2[1], box2[2], box2[3]
boxes.append([0, y1,x1,y2,x2])
boxes = torch.Tensor(boxes).unsqueeze(0)
resized_image = Normalize(resized_image)
resized_density = torch.from_numpy(resized_density).unsqueeze(0).unsqueeze(0)
sample = {'image':resized_image,'boxes':boxes,'gt_density':resized_density}
return sample
Normalize = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=IM_NORM_MEAN, std=IM_NORM_STD)])
Transform = transforms.Compose([resizeImage( MAX_HW)])
TransformTrain = transforms.Compose([resizeImageWithGT(MAX_HW)])
def denormalize(tensor, means=IM_NORM_MEAN, stds=IM_NORM_STD):
"""Reverses the normalisation on a tensor.
Performs a reverse operation on a tensor, so the pixel value range is
between 0 and 1. Useful for when plotting a tensor into an image.
Normalisation: (image - mean) / std
Denormalisation: image * std + mean
Args:
tensor (torch.Tensor, dtype=torch.float32): Normalized image tensor
Shape:
Input: :math:`(N, C, H, W)`
Output: :math:`(N, C, H, W)` (same shape as input)
Return:
torch.Tensor (torch.float32): Demornalised image tensor with pixel
values between [0, 1]
Note:
Symbols used to describe dimensions:
- N: number of images in a batch
- C: number of channels
- H: height of the image
- W: width of the image
"""
denormalized = tensor.clone()
for channel, mean, std in zip(denormalized, means, stds):
channel.mul_(std).add_(mean)
return denormalized
def scale_and_clip(val, scale_factor, min_val, max_val):
"Helper function to scale a value and clip it within range"
new_val = int(round(val*scale_factor))
new_val = max(new_val, min_val)
new_val = min(new_val, max_val)
return new_val
def visualize_output_and_save(input_, output, boxes, save_path, figsize=(20, 12), dots=None):
"""
dots: Nx2 numpy array for the ground truth locations of the dot annotation
if dots is None, this information is not available
"""
# get the total count
pred_cnt = output.sum().item()
boxes = boxes.squeeze(0)
boxes2 = []
for i in range(0, boxes.shape[0]):
y1, x1, y2, x2 = int(boxes[i, 1].item()), int(boxes[i, 2].item()), int(boxes[i, 3].item()), int(
boxes[i, 4].item())
roi_cnt = output[0,0,y1:y2, x1:x2].sum().item()
boxes2.append([y1, x1, y2, x2, roi_cnt])
img1 = format_for_plotting(denormalize(input_))
output = format_for_plotting(output)
fig = plt.figure(figsize=figsize)
# display the input image
ax = fig.add_subplot(2, 2, 1)
ax.set_axis_off()
ax.imshow(img1)
for bbox in boxes2:
y1, x1, y2, x2 = bbox[0], bbox[1], bbox[2], bbox[3]
rect = patches.Rectangle((x1, y1), x2-x1, y2-y1, linewidth=3, edgecolor='y', facecolor='none')
rect2 = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=1, edgecolor='k', linestyle='--', facecolor='none')
ax.add_patch(rect)
ax.add_patch(rect2)
if dots is not None:
ax.scatter(dots[:, 0], dots[:, 1], c='red', edgecolors='blue')
# ax.scatter(dots[:,0], dots[:,1], c='black', marker='+')
ax.set_title("Input image, gt count: {}".format(dots.shape[0]))
else:
ax.set_title("Input image")
ax = fig.add_subplot(2, 2, 2)
ax.set_axis_off()
ax.set_title("Overlaid result, predicted count: {:.2f}".format(pred_cnt))
img2 = 0.2989*img1[:,:,0] + 0.5870*img1[:,:,1] + 0.1140*img1[:,:,2]
ax.imshow(img2, cmap='gray')
ax.imshow(output, cmap=plt.cm.viridis, alpha=0.5)
# display the density map
ax = fig.add_subplot(2, 2, 3)
ax.set_axis_off()
ax.set_title("Density map, predicted count: {:.2f}".format(pred_cnt))
ax.imshow(output)
# plt.colorbar()
ax = fig.add_subplot(2, 2, 4)
ax.set_axis_off()
ax.set_title("Density map, predicted count: {:.2f}".format(pred_cnt))
ret_fig = ax.imshow(output)
for bbox in boxes2:
y1, x1, y2, x2, roi_cnt = bbox[0], bbox[1], bbox[2], bbox[3], bbox[4]
rect = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=3, edgecolor='y', facecolor='none')
rect2 = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=1, edgecolor='k', linestyle='--',
facecolor='none')
ax.add_patch(rect)
ax.add_patch(rect2)
ax.text(x1, y1, '{:.2f}'.format(roi_cnt), backgroundcolor='y')
fig.colorbar(ret_fig, ax=ax)
fig.savefig(save_path, bbox_inches="tight")
plt.close()
def format_for_plotting(tensor):
"""Formats the shape of tensor for plotting.
Tensors typically have a shape of :math:`(N, C, H, W)` or :math:`(C, H, W)`
which is not suitable for plotting as images. This function formats an
input tensor :math:`(H, W, C)` for RGB and :math:`(H, W)` for mono-channel
data.
Args:
tensor (torch.Tensor, torch.float32): Image tensor
Shape:
Input: :math:`(N, C, H, W)` or :math:`(C, H, W)`
Output: :math:`(H, W, C)` or :math:`(H, W)`, respectively
Return:
torch.Tensor (torch.float32): Formatted image tensor (detached)
Note:
Symbols used to describe dimensions:
- N: number of images in a batch
- C: number of channels
- H: height of the image
- W: width of the image
"""
has_batch_dimension = len(tensor.shape) == 4
formatted = tensor.clone()
if has_batch_dimension:
formatted = tensor.squeeze(0)
if formatted.shape[0] == 1:
return formatted.squeeze(0).detach()
else:
return formatted.permute(1, 2, 0).detach()
|
ssm/init_state_distns.py
|
adelaneh/ssm
| 208 |
96695
|
<filename>ssm/init_state_distns.py
from functools import partial
from warnings import warn
import autograd.numpy as np
import autograd.numpy.random as npr
from autograd.scipy.special import logsumexp
from autograd.misc.optimizers import sgd, adam
from autograd import grad
from ssm.util import ensure_args_are_lists
class InitialStateDistribution(object):
def __init__(self, K, D, M=0):
self.K, self.D, self.M = K, D, M
self.log_pi0 = -np.log(K) * np.ones(K)
@property
def params(self):
return (self.log_pi0,)
@params.setter
def params(self, value):
self.log_pi0 = value[0]
@property
def initial_state_distn(self):
return np.exp(self.log_pi0 - logsumexp(self.log_pi0))
@property
def log_initial_state_distn(self):
return self.log_pi0 - logsumexp(self.log_pi0)
@ensure_args_are_lists
def initialize(self, datas, inputs=None, masks=None, tags=None):
pass
def permute(self, perm):
"""
Permute the discrete latent states.
"""
self.log_pi0 = self.log_pi0[perm]
def log_prior(self):
return 0
def m_step(self, expectations, datas, inputs, masks, tags, **kwargs):
pi0 = sum([Ez[0] for Ez, _, _ in expectations]) + 1e-8
self.log_pi0 = np.log(pi0 / pi0.sum())
class FixedInitialStateDistribution(InitialStateDistribution):
def __init__(self, K, D, pi0=None, M=0):
super(FixedInitialStateDistribution, self).__init__(K, D, M=M)
if pi0 is not None:
# Handle the case where user passes a numpy array of (K, 1) instead of (K,)
pi0 = np.squeeze(np.array(pi0))
assert len(pi0) == K, "Array passed as pi0 is of the wrong length"
self.log_pi0 = np.log(pi0 + 1e-16)
def m_step(self, expectations, datas, inputs, masks, tags, **kwargs):
# Don't change the distribution
pass
|
data-structures/equal-stacks.py
|
gajubadge11/HackerRank-1
| 340 |
96702
|
#!/bin/python3
import sys
if __name__ == "__main__":
n1,n2,n3 = input().strip().split(' ')
n1,n2,n3 = [int(n1),int(n2),int(n3)]
h1 = [int(h1_temp) for h1_temp in input().strip().split(' ')]
h2 = [int(h2_temp) for h2_temp in input().strip().split(' ')]
h3 = [int(h3_temp) for h3_temp in input().strip().split(' ')]
stacks = [h1[::-1], h2[::-1], h3[::-1]]
stack_sums = [sum(stacks[i]) for i in range(3)]
while len({stack_sums[i] for i in range(3)}) > 1:
tallest_stack = max(stack_sums)
index = stack_sums.index(tallest_stack)
removed = stacks[index].pop()
stack_sums[index] -= removed
print(min([sum(stacks[i]) for i in range(3)]))
|
apps/base/views/plugin.py
|
youssriaboelseod/pyerp
| 115 |
96735
|
# Standard Library
import json
import subprocess
import sys
from collections import OrderedDict
from importlib import reload
from os import listdir, path
# Django Library
from django.apps import apps
from django.conf import settings
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect
from django.urls import clear_url_caches, reverse
# Localfolder Library
from ...base.models import PyPlugin
from .web_father import FatherListView
OBJECT_LIST_FIELDS = [
{'string': 'Nombre', 'field': 'name'},
{'string': 'Author', 'field': 'author'},
{'string': 'Description', 'field': 'description'},
{'string': 'Installed', 'field': 'installed'},
]
def Apps(request):
return render(request, 'base/plugin.html')
class PluginListView(LoginRequiredMixin, FatherListView):
model = PyPlugin
template_name = 'base/plugin.html'
fields = OBJECT_LIST_FIELDS
context_object_name = 'plugin_list'
paginate_by = 12
# ========================================================================== #
def PluginUpdate(self):
"""Actualiza los plugins
"""
FILE_NAME = 'info.json'
folder_apps = '{}/apps'.format(settings.BASE_DIR)
plugin_list = tuple(
set(name['name'] for name in PyPlugin.objects.all().values('name'))
)
for folder in listdir(folder_apps):
file = folder_apps + "/" + folder + "/" + FILE_NAME
if path.isfile(file) and folder not in plugin_list:
print(file)
with open(file) as json_file:
data = json.load(json_file)
plugin = PyPlugin(
name=data['name'].lower(),
description=data['description'],
author=data['author'],
fa=data['fa'],
version=data['version'],
website=data['website'],
color=data['color']
)
plugin.save()
return redirect(reverse('PyPlugin:list'))
def PluginInstall(self, pk):
plugin = PyPlugin.objects.get(id=pk)
plugin.installed = True
with open('installed_apps.py', 'a+') as installed_apps_file:
if installed_apps_file.write('apps.{}\n'.format(plugin.name.lower())):
print('yes')
else:
print("no")
# Como no se cargar una sola app, se leen todas las app que estan
# como plugins en tiempo de ejecución al instalar cualquier app
with open('%s/installed_apps.py' % settings.BASE_DIR, 'r') as ins_apps_file:
for line in ins_apps_file.readlines():
settings.INSTALLED_APPS += (line.strip().rstrip('\n'), )
apps.app_configs = OrderedDict()
apps.apps_ready = apps.models_ready = apps.loading = apps.ready = False
apps.clear_cache()
try:
# Se recargan todas las aplicaciones ¿como cargar solo una?
apps.populate(settings.INSTALLED_APPS)
except:
# plugin.installed = False
print('Fallo el proceso de poblado de la app')
try:
# Se contruyen las migraciones del plugin
call_command('makemigrations', plugin.name.lower(), interactive=False)
except:
# plugin.installed = False
print('No hay migración de la app')
try:
# Se ejecutan las migraciones de la app
call_command('migrate', plugin.name.lower(), interactive=False)
except:
# plugin.installed = False
print('No se migro la app')
try:
# Se ejecutan las migraciones de la app
call_command('loaddata', '{}.json'.format(plugin.name.lower()), interactive=False)
except:
# plugin.installed = False
print('No se cargaron datos de la app')
plugin.save()
# subprocess.run[PROJECT_RELOAD]
# Recargo en memoria la rutas del proyecto
urlconf = settings.ROOT_URLCONF
if urlconf in sys.modules:
clear_url_caches()
reload(sys.modules[urlconf])
return redirect(reverse('PyPlugin:list'))
# ========================================================================== #
def PluginUninstall(self, pk):
app = PyPlugin.objects.get(id=pk)
app.installed = False
app.save()
app_lists = []
with open('installed_apps.py', 'r') as installed_apps_file:
app_lists = installed_apps_file.readlines()
with open('installed_apps.py', 'w+') as installed_apps_file:
for line in app_lists:
if 'apps.%s' % app.name.lower() == line.strip():
continue
installed_apps_file.write(line)
return redirect(reverse('PyPlugin:list'))
|
python/vineyard/deploy/tests/test_local.py
|
linlih/v6d
| 417 |
96802
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import vineyard
def test_local_cluster():
client1, client2, client3 = vineyard.init(num_instances=3)
assert client1 != client2
assert client1 != client3
assert client2 != client3
obj_id = client1.put([1024, 1024])
client1.persist(obj_id)
meta2 = client2.get_meta(obj_id)
meta3 = client3.get_meta(obj_id)
assert str(meta2) == str(meta3)
vineyard.shutdown()
def test_local_single():
client = vineyard.init()
obj_id = client.put(1024)
client1 = vineyard.connect()
assert client1.get(obj_id) == 1024
client2 = vineyard.get_current_client()
assert client == client2
vineyard.shutdown()
|
zeus/migrations/52a5d85ba249_backfill_cr_multi_author.py
|
conrad-kronos/zeus
| 221 |
96812
|
<reponame>conrad-kronos/zeus
"""backfill_cr_multi_author
Revision ID: 52a5d85ba249
Revises: <PASSWORD>
Create Date: 2020-03-04 15:23:10.842507
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "52a5d85ba249"
down_revision = "<PASSWORD>"
branch_labels = ()
depends_on = None
def upgrade():
connection = op.get_bind()
connection.execute(
"""
INSERT INTO change_request_author (change_request_id, author_id)
SELECT id, author_id FROM change_request
WHERE author_id IS NOT NULL
ON CONFLICT DO NOTHING
"""
)
def downgrade():
pass
|
python/src/main/python/pygw/query/statistics/statistic_query_builder.py
|
radiant-maxar/geowave
| 280 |
96817
|
<filename>python/src/main/python/pygw/query/statistics/statistic_query_builder.py<gh_stars>100-1000
#
# Copyright (c) 2013-2020 Contributors to the Eclipse Foundation
#
# See the NOTICE file distributed with this work for additional information regarding copyright
# ownership. All rights reserved. This program and the accompanying materials are made available
# under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
# available at http://www.apache.org/licenses/LICENSE-2.0.txt
# ===============================================================================================
from pygw.base import GeoWaveObject
from pygw.base.type_conversions import StringArrayType
from pygw.config import geowave_pkg
from pygw.query.statistics.statistic_query import StatisticQuery
from pygw.statistics.bin_constraints import BinConstraints
from pygw.statistics.statistic_type import StatisticType, IndexStatisticType, DataTypeStatisticType
class StatisticQueryBuilder(GeoWaveObject):
"""
A builder for creating statistics queries. This class should not be constructed directly, instead use one of the
static methods to create an appropriate builder.
"""
def __init__(self, java_ref, result_transformer):
self._result_transformer = result_transformer
super().__init__(java_ref)
def tag(self, tag):
"""
Sets the tag to query for.
Args:
tag (str): The tag to query for.
Returns:
This statistic query builder.
"""
self._java_ref.tag(tag)
return self
def internal(self):
"""
When set, only internal statistics will be queried.
Returns:
This statistic query builder.
"""
self._java_ref.internal()
return self
def add_authorization(self, authorization):
"""
Adds an authorization to the query.
Args:
authorization (str): The authorization to add.
Returns:
This statistic query builder.
"""
self._java_ref.addAuthorization(authorization)
return self
def authorizations(self, authorizations):
"""
Sets the set of authorizations to use for the query.
Args:
authorizations (array of str): The authorizations to use for the query.
Returns:
This statistic query builder.
"""
self._java_ref.authorizations(StringArrayType().to_java(authorizations))
return self
def bin_constraints(self, bin_constraints):
"""
Sets the constraints to use for the statistic query. Only bins that match the given constraints will be
returned.
Args:
bin_constraints (BinConstraints): The constraints to constrain the query by.
Returns:
This statistic query builder.
"""
if not isinstance(bin_constraints, BinConstraints):
raise AttributeError('Must be a BinConstraints instance.')
self._java_ref.binConstraints(bin_constraints.java_ref())
return self
def build(self):
"""
Build the statistic query.
Returns:
This constructed statistic query.
"""
return StatisticQuery(self._java_ref.build(), self._result_transformer)
@staticmethod
def new_builder(statistic_type):
"""
Create a statistic query builder for the given statistic type.
Args:
statistic_type (StatisticType): The statistic type for the query builder.
Returns:
A statistic query builder.
"""
if not isinstance(statistic_type, StatisticType):
raise AttributeError('Must be a StatisticType instance.')
j_builder = geowave_pkg.core.store.api.StatisticQueryBuilder.newBuilder(statistic_type.java_ref())
if isinstance(statistic_type, IndexStatisticType):
return IndexStatisticQueryBuilder(statistic_type, j_builder)
if isinstance(statistic_type, DataTypeStatisticType):
return DataTypeStatisticQueryBuilder(statistic_type, j_builder)
return FieldStatisticQueryBuilder(statistic_type, j_builder)
@staticmethod
def differing_visibility_count():
"""
Create a statistic query builder for a differing visibility count statistic.
Returns:
A statistic query builder.
"""
j_builder = geowave_pkg.core.store.api.StatisticQueryBuilder.differingVisibilityCount()
return IndexStatisticQueryBuilder(java_ref=j_builder)
@staticmethod
def duplicate_entry_count():
"""
Create a statistic query builder for a duplicate entry count statistic.
Returns:
A statistic query builder.
"""
j_builder = geowave_pkg.core.store.api.StatisticQueryBuilder.duplicateEntryCount()
return IndexStatisticQueryBuilder(java_ref=j_builder)
@staticmethod
def field_visibility_count():
"""
Create a statistic query builder for a field visibility count statistic.
Returns:
A statistic query builder.
"""
j_builder = geowave_pkg.core.store.api.StatisticQueryBuilder.fieldVisibilityCount()
return IndexStatisticQueryBuilder(java_ref=j_builder)
@staticmethod
def index_meta_data_set():
"""
Create a statistic query builder for an index meta data set statistic.
Returns:
A statistic query builder.
"""
j_builder = geowave_pkg.core.store.api.StatisticQueryBuilder.indexMetaDataSet()
return IndexStatisticQueryBuilder(java_ref=j_builder)
@staticmethod
def max_duplicates():
"""
Create a statistic query builder for a max duplicates statistic.
Returns:
A statistic query builder.
"""
j_builder = geowave_pkg.core.store.api.StatisticQueryBuilder.maxDuplicates()
return IndexStatisticQueryBuilder(java_ref=j_builder)
@staticmethod
def partitions():
"""
Create a statistic query builder for a partitions statistic.
Returns:
A statistic query builder.
"""
j_builder = geowave_pkg.core.store.api.StatisticQueryBuilder.partitions()
return IndexStatisticQueryBuilder(java_ref=j_builder)
@staticmethod
def row_range_histogram():
"""
Create a statistic query builder for a row range histogram statistic.
Returns:
A statistic query builder.
"""
j_builder = geowave_pkg.core.store.api.StatisticQueryBuilder.rowRangeHistogram()
return IndexStatisticQueryBuilder(java_ref=j_builder)
@staticmethod
def count():
"""
Create a statistic query builder for a count statistic.
Returns:
A statistic query builder.
"""
j_builder = geowave_pkg.core.store.api.StatisticQueryBuilder.count()
return DataTypeStatisticQueryBuilder(java_ref=j_builder)
@staticmethod
def bbox():
"""
Create a statistic query builder for a bounding box statistic.
Returns:
A statistic query builder.
"""
j_builder = geowave_pkg.core.geotime.store.statistics.SpatialTemporalStatisticQueryBuilder.bbox()
return FieldStatisticQueryBuilder(java_ref=j_builder)
@staticmethod
def bloom_filter():
"""
Create a statistic query builder for a bloom filter statistic.
Returns:
A statistic query builder.
"""
j_builder = geowave_pkg.core.store.api.StatisticQueryBuilder.bloomFilter()
return FieldStatisticQueryBuilder(java_ref=j_builder)
@staticmethod
def time_range():
"""
Create a statistic query builder for a time range statistic.
Returns:
A statistic query builder.
"""
j_builder = geowave_pkg.core.geotime.store.statistics.SpatialTemporalStatisticQueryBuilder.timeRange()
return FieldStatisticQueryBuilder(java_ref=j_builder)
@staticmethod
def count_min_sketch():
"""
Create a statistic query builder for a count min sketch statistic.
Returns:
A statistic query builder.
"""
j_builder = geowave_pkg.core.store.api.StatisticQueryBuilder.countMinSketch()
return FieldStatisticQueryBuilder(java_ref=j_builder)
@staticmethod
def fixed_bin_numeric_histogram():
"""
Create a statistic query builder for a fixed bin numeric histogram statistic.
Returns:
A statistic query builder.
"""
j_builder = geowave_pkg.core.store.api.StatisticQueryBuilder.fixedBinNumericHistogram()
return FieldStatisticQueryBuilder(java_ref=j_builder)
@staticmethod
def hyper_log_log():
"""
Create a statistic query builder for a hyper log log statistic.
Returns:
A statistic query builder.
"""
j_builder = geowave_pkg.core.store.api.StatisticQueryBuilder.hyperLogLog()
return FieldStatisticQueryBuilder(java_ref=j_builder)
@staticmethod
def numeric_histogram():
"""
Create a statistic query builder for a numeric histogram statistic.
Returns:
A statistic query builder.
"""
j_builder = geowave_pkg.core.store.api.StatisticQueryBuilder.numericHistogram()
return FieldStatisticQueryBuilder(java_ref=j_builder)
@staticmethod
def numeric_mean():
"""
Create a statistic query builder for a numeric mean statistic.
Returns:
A statistic query builder.
"""
j_builder = geowave_pkg.core.store.api.StatisticQueryBuilder.numericMean()
return FieldStatisticQueryBuilder(java_ref=j_builder)
@staticmethod
def numeric_range():
"""
Create a statistic query builder for a numeric range statistic.
Returns:
A statistic query builder.
"""
j_builder = geowave_pkg.core.store.api.StatisticQueryBuilder.numericRange()
return FieldStatisticQueryBuilder(java_ref=j_builder)
@staticmethod
def numeric_stats():
"""
Create a statistic query builder for a numeric stats statistic.
Returns:
A statistic query builder.
"""
j_builder = geowave_pkg.core.store.api.StatisticQueryBuilder.numericStats()
return FieldStatisticQueryBuilder(java_ref=j_builder)
class IndexStatisticQueryBuilder(StatisticQueryBuilder):
"""
A builder for index statistic queries.
"""
def __init__(self, statistic_type=None, java_ref=None):
if java_ref is None:
j_qbuilder = geowave_pkg.core.statistics.query.IndexStatisticQueryBuilder(statistic_type.java_ref())
else:
j_qbuilder = java_ref
super().__init__(j_qbuilder, None)
def index_name(self, index_name):
"""
Set the index name to constrain the query by.
Args:
index_name (str): The index name to query.
Returns:
This statistic query builder.
"""
self._java_ref.indexName(index_name)
return self
class DataTypeStatisticQueryBuilder(StatisticQueryBuilder):
"""
A builder for data type statistic queries.
"""
def __init__(self, statistic_type=None, java_ref=None):
if java_ref is None:
j_qbuilder = geowave_pkg.core.statistics.query.DataTypeStatisticQueryBuilder(statistic_type.java_ref())
else:
j_qbuilder = java_ref
super().__init__(j_qbuilder, None)
def type_name(self, type_name):
"""
Set the type name to constrain the query by.
Args:
type_name (str): The type name to query.
Returns:
This statistic query builder.
"""
self._java_ref.typeName(type_name)
return self
class FieldStatisticQueryBuilder(StatisticQueryBuilder):
"""
A builder for field statistic queries.
"""
def __init__(self, statistic_type=None, java_ref=None):
if java_ref is None:
j_qbuilder = geowave_pkg.core.statistics.query.FieldStatisticQueryBuilder(statistic_type.java_ref())
else:
j_qbuilder = java_ref
super().__init__(j_qbuilder, None)
def type_name(self, type_name):
"""
Set the type name to constrain the query by.
Args:
type_name (str): The type name to query.
Returns:
This statistic query builder.
"""
self._java_ref.typeName(type_name)
return self
def field_name(self, field_name):
"""
Set the field name to constrain the query by.
Args:
field_name (str): The field name to query.
Returns:
This statistic query builder.
"""
self._java_ref.fieldName(field_name)
return self
|
agents/rainbow_agent.py
|
christopherhesse/retro-baselines
| 134 |
96818
|
<reponame>christopherhesse/retro-baselines
#!/usr/bin/env python
"""
Train an agent on Sonic using an open source Rainbow DQN
implementation.
"""
import tensorflow as tf
from anyrl.algos import DQN
from anyrl.envs import BatchedGymEnv
from anyrl.envs.wrappers import BatchedFrameStack
from anyrl.models import rainbow_models
from anyrl.rollouts import BatchedPlayer, PrioritizedReplayBuffer, NStepPlayer
from anyrl.spaces import gym_space_vectorizer
import gym_remote.exceptions as gre
from sonic_util import AllowBacktracking, make_env
def main():
"""Run DQN until the environment throws an exception."""
env = AllowBacktracking(make_env(stack=False, scale_rew=False))
env = BatchedFrameStack(BatchedGymEnv([[env]]), num_images=4, concat=False)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # pylint: disable=E1101
with tf.Session(config=config) as sess:
dqn = DQN(*rainbow_models(sess,
env.action_space.n,
gym_space_vectorizer(env.observation_space),
min_val=-200,
max_val=200))
player = NStepPlayer(BatchedPlayer(env, dqn.online_net), 3)
optimize = dqn.optimize(learning_rate=1e-4)
sess.run(tf.global_variables_initializer())
dqn.train(num_steps=2000000, # Make sure an exception arrives before we stop.
player=player,
replay_buffer=PrioritizedReplayBuffer(500000, 0.5, 0.4, epsilon=0.1),
optimize_op=optimize,
train_interval=1,
target_interval=8192,
batch_size=32,
min_buffer_size=20000)
if __name__ == '__main__':
try:
main()
except gre.GymRemoteError as exc:
print('exception', exc)
|
tests/unit/v1/test_document.py
|
anna-hope/python-firestore
| 140 |
96851
|
<reponame>anna-hope/python-firestore
# Copyright 2017 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import pytest
def _make_document_reference(*args, **kwargs):
from google.cloud.firestore_v1.document import DocumentReference
return DocumentReference(*args, **kwargs)
def test_constructor():
collection_id1 = "users"
document_id1 = "alovelace"
collection_id2 = "platform"
document_id2 = "*nix"
client = mock.MagicMock()
client.__hash__.return_value = 1234
document = _make_document_reference(
collection_id1, document_id1, collection_id2, document_id2, client=client
)
assert document._client is client
expected_path = "/".join(
(collection_id1, document_id1, collection_id2, document_id2)
)
assert document.path == expected_path
def _make_commit_repsonse(write_results=None):
from google.cloud.firestore_v1.types import firestore
response = mock.create_autospec(firestore.CommitResponse)
response.write_results = write_results or [mock.sentinel.write_result]
response.commit_time = mock.sentinel.commit_time
return response
def _write_pb_for_create(document_path, document_data):
from google.cloud.firestore_v1.types import common
from google.cloud.firestore_v1.types import document
from google.cloud.firestore_v1.types import write
from google.cloud.firestore_v1 import _helpers
return write.Write(
update=document.Document(
name=document_path, fields=_helpers.encode_dict(document_data)
),
current_document=common.Precondition(exists=False),
)
def _create_helper(retry=None, timeout=None):
from google.cloud.firestore_v1 import _helpers
# Create a minimal fake GAPIC with a dummy response.
firestore_api = mock.Mock()
firestore_api.commit.mock_add_spec(spec=["commit"])
firestore_api.commit.return_value = _make_commit_repsonse()
# Attach the fake GAPIC to a real client.
client = _make_client("dignity")
client._firestore_api_internal = firestore_api
# Actually make a document and call create().
document = _make_document_reference("foo", "twelve", client=client)
document_data = {"hello": "goodbye", "count": 99}
kwargs = _helpers.make_retry_timeout_kwargs(retry, timeout)
write_result = document.create(document_data, **kwargs)
# Verify the response and the mocks.
assert write_result is mock.sentinel.write_result
write_pb = _write_pb_for_create(document._document_path, document_data)
firestore_api.commit.assert_called_once_with(
request={
"database": client._database_string,
"writes": [write_pb],
"transaction": None,
},
metadata=client._rpc_metadata,
**kwargs,
)
def test_documentreference_create():
_create_helper()
def test_documentreference_create_w_retry_timeout():
from google.api_core.retry import Retry
retry = Retry(predicate=object())
timeout = 123.0
_create_helper(retry=retry, timeout=timeout)
def test_documentreference_create_empty():
# Create a minimal fake GAPIC with a dummy response.
from google.cloud.firestore_v1.document import DocumentReference
from google.cloud.firestore_v1.document import DocumentSnapshot
firestore_api = mock.Mock(spec=["commit"])
document_reference = mock.create_autospec(DocumentReference)
snapshot = mock.create_autospec(DocumentSnapshot)
snapshot.exists = True
document_reference.get.return_value = snapshot
firestore_api.commit.return_value = _make_commit_repsonse(
write_results=[document_reference]
)
# Attach the fake GAPIC to a real client.
client = _make_client("dignity")
client._firestore_api_internal = firestore_api
client.get_all = mock.MagicMock()
client.get_all.exists.return_value = True
# Actually make a document and call create().
document = _make_document_reference("foo", "twelve", client=client)
document_data = {}
write_result = document.create(document_data)
assert write_result.get().exists
def _write_pb_for_set(document_path, document_data, merge):
from google.cloud.firestore_v1.types import common
from google.cloud.firestore_v1.types import document
from google.cloud.firestore_v1.types import write
from google.cloud.firestore_v1 import _helpers
write_pbs = write.Write(
update=document.Document(
name=document_path, fields=_helpers.encode_dict(document_data)
)
)
if merge:
field_paths = [
field_path
for field_path, value in _helpers.extract_fields(
document_data, _helpers.FieldPath()
)
]
field_paths = [field_path.to_api_repr() for field_path in sorted(field_paths)]
mask = common.DocumentMask(field_paths=sorted(field_paths))
write_pbs._pb.update_mask.CopyFrom(mask._pb)
return write_pbs
def _set_helper(merge=False, retry=None, timeout=None, **option_kwargs):
from google.cloud.firestore_v1 import _helpers
# Create a minimal fake GAPIC with a dummy response.
firestore_api = mock.Mock(spec=["commit"])
firestore_api.commit.return_value = _make_commit_repsonse()
# Attach the fake GAPIC to a real client.
client = _make_client("db-dee-bee")
client._firestore_api_internal = firestore_api
# Actually make a document and call create().
document = _make_document_reference("User", "Interface", client=client)
document_data = {"And": 500, "Now": b"\xba\xaa\xaa \xba\xaa\xaa"}
kwargs = _helpers.make_retry_timeout_kwargs(retry, timeout)
write_result = document.set(document_data, merge, **kwargs)
# Verify the response and the mocks.
assert write_result is mock.sentinel.write_result
write_pb = _write_pb_for_set(document._document_path, document_data, merge)
firestore_api.commit.assert_called_once_with(
request={
"database": client._database_string,
"writes": [write_pb],
"transaction": None,
},
metadata=client._rpc_metadata,
**kwargs,
)
def test_documentreference_set():
_set_helper()
def test_documentreference_set_w_retry_timeout():
from google.api_core.retry import Retry
retry = Retry(predicate=object())
timeout = 123.0
_set_helper(retry=retry, timeout=timeout)
def test_documentreference_set_merge():
_set_helper(merge=True)
def _write_pb_for_update(document_path, update_values, field_paths):
from google.cloud.firestore_v1.types import common
from google.cloud.firestore_v1.types import document
from google.cloud.firestore_v1.types import write
from google.cloud.firestore_v1 import _helpers
return write.Write(
update=document.Document(
name=document_path, fields=_helpers.encode_dict(update_values)
),
update_mask=common.DocumentMask(field_paths=field_paths),
current_document=common.Precondition(exists=True),
)
def _update_helper(retry=None, timeout=None, **option_kwargs):
from collections import OrderedDict
from google.cloud.firestore_v1 import _helpers
from google.cloud.firestore_v1.transforms import DELETE_FIELD
# Create a minimal fake GAPIC with a dummy response.
firestore_api = mock.Mock(spec=["commit"])
firestore_api.commit.return_value = _make_commit_repsonse()
# Attach the fake GAPIC to a real client.
client = _make_client("potato-chip")
client._firestore_api_internal = firestore_api
# Actually make a document and call create().
document = _make_document_reference("baked", "Alaska", client=client)
# "Cheat" and use OrderedDict-s so that iteritems() is deterministic.
field_updates = OrderedDict(
(("hello", 1), ("then.do", False), ("goodbye", DELETE_FIELD))
)
kwargs = _helpers.make_retry_timeout_kwargs(retry, timeout)
if option_kwargs:
option = client.write_option(**option_kwargs)
write_result = document.update(field_updates, option=option, **kwargs)
else:
option = None
write_result = document.update(field_updates, **kwargs)
# Verify the response and the mocks.
assert write_result is mock.sentinel.write_result
update_values = {
"hello": field_updates["hello"],
"then": {"do": field_updates["then.do"]},
}
field_paths = list(field_updates.keys())
write_pb = _write_pb_for_update(
document._document_path, update_values, sorted(field_paths)
)
if option is not None:
option.modify_write(write_pb)
firestore_api.commit.assert_called_once_with(
request={
"database": client._database_string,
"writes": [write_pb],
"transaction": None,
},
metadata=client._rpc_metadata,
**kwargs,
)
def test_documentreference_update_with_exists():
with pytest.raises(ValueError):
_update_helper(exists=True)
def test_documentreference_update():
_update_helper()
def test_documentreference_update_w_retry_timeout():
from google.api_core.retry import Retry
retry = Retry(predicate=object())
timeout = 123.0
_update_helper(retry=retry, timeout=timeout)
def test_documentreference_update_with_precondition():
from google.protobuf import timestamp_pb2
timestamp = timestamp_pb2.Timestamp(seconds=1058655101, nanos=100022244)
_update_helper(last_update_time=timestamp)
def test_documentreference_empty_update():
# Create a minimal fake GAPIC with a dummy response.
firestore_api = mock.Mock(spec=["commit"])
firestore_api.commit.return_value = _make_commit_repsonse()
# Attach the fake GAPIC to a real client.
client = _make_client("potato-chip")
client._firestore_api_internal = firestore_api
# Actually make a document and call create().
document = _make_document_reference("baked", "Alaska", client=client)
# "Cheat" and use OrderedDict-s so that iteritems() is deterministic.
field_updates = {}
with pytest.raises(ValueError):
document.update(field_updates)
def _delete_helper(retry=None, timeout=None, **option_kwargs):
from google.cloud.firestore_v1 import _helpers
from google.cloud.firestore_v1.types import write
# Create a minimal fake GAPIC with a dummy response.
firestore_api = mock.Mock(spec=["commit"])
firestore_api.commit.return_value = _make_commit_repsonse()
# Attach the fake GAPIC to a real client.
client = _make_client("donut-base")
client._firestore_api_internal = firestore_api
kwargs = _helpers.make_retry_timeout_kwargs(retry, timeout)
# Actually make a document and call delete().
document = _make_document_reference("where", "we-are", client=client)
if option_kwargs:
option = client.write_option(**option_kwargs)
delete_time = document.delete(option=option, **kwargs)
else:
option = None
delete_time = document.delete(**kwargs)
# Verify the response and the mocks.
assert delete_time is mock.sentinel.commit_time
write_pb = write.Write(delete=document._document_path)
if option is not None:
option.modify_write(write_pb)
firestore_api.commit.assert_called_once_with(
request={
"database": client._database_string,
"writes": [write_pb],
"transaction": None,
},
metadata=client._rpc_metadata,
**kwargs,
)
def test_documentreference_delete():
_delete_helper()
def test_documentreference_delete_with_option():
from google.protobuf import timestamp_pb2
timestamp_pb = timestamp_pb2.Timestamp(seconds=1058655101, nanos=100022244)
_delete_helper(last_update_time=timestamp_pb)
def test_documentreference_delete_w_retry_timeout():
from google.api_core.retry import Retry
retry = Retry(predicate=object())
timeout = 123.0
_delete_helper(retry=retry, timeout=timeout)
def _get_helper(
field_paths=None,
use_transaction=False,
not_found=False,
# This should be an impossible case, but we test against it for
# completeness
return_empty=False,
retry=None,
timeout=None,
):
from google.cloud.firestore_v1 import _helpers
from google.cloud.firestore_v1.types import common
from google.cloud.firestore_v1.types import document
from google.cloud.firestore_v1.types import firestore
from google.cloud.firestore_v1.transaction import Transaction
# Create a minimal fake GAPIC with a dummy response.
create_time = 123
update_time = 234
read_time = 345
firestore_api = mock.Mock(spec=["batch_get_documents"])
response = mock.create_autospec(firestore.BatchGetDocumentsResponse)
response.read_time = read_time
response.found = mock.create_autospec(document.Document)
response.found.fields = {}
response.found.create_time = create_time
response.found.update_time = update_time
client = _make_client("donut-base")
client._firestore_api_internal = firestore_api
document_reference = _make_document_reference("where", "we-are", client=client)
response.found.name = None if not_found else document_reference._document_path
response.missing = document_reference._document_path if not_found else None
def WhichOneof(val):
return "missing" if not_found else "found"
response._pb = response
response._pb.WhichOneof = WhichOneof
firestore_api.batch_get_documents.return_value = iter(
[response] if not return_empty else []
)
if use_transaction:
transaction = Transaction(client)
transaction_id = transaction._id = b"asking-me-2"
else:
transaction = None
kwargs = _helpers.make_retry_timeout_kwargs(retry, timeout)
snapshot = document_reference.get(
field_paths=field_paths, transaction=transaction, **kwargs
)
assert snapshot.reference is document_reference
if not_found or return_empty:
assert snapshot._data is None
assert not snapshot.exists
assert snapshot.read_time is not None
assert snapshot.create_time is None
assert snapshot.update_time is None
else:
assert snapshot.to_dict() == {}
assert snapshot.exists
assert snapshot.read_time is read_time
assert snapshot.create_time is create_time
assert snapshot.update_time is update_time
# Verify the request made to the API
if field_paths is not None:
mask = common.DocumentMask(field_paths=sorted(field_paths))
else:
mask = None
if use_transaction:
expected_transaction_id = transaction_id
else:
expected_transaction_id = None
firestore_api.batch_get_documents.assert_called_once_with(
request={
"database": client._database_string,
"documents": [document_reference._document_path],
"mask": mask,
"transaction": expected_transaction_id,
},
metadata=client._rpc_metadata,
**kwargs,
)
def test_documentreference_get_not_found():
_get_helper(not_found=True)
def test_documentreference_get_default():
_get_helper()
def test_documentreference_get_return_empty():
_get_helper(return_empty=True)
def test_documentreference_get_w_retry_timeout():
from google.api_core.retry import Retry
retry = Retry(predicate=object())
timeout = 123.0
_get_helper(retry=retry, timeout=timeout)
def test_documentreference_get_w_string_field_path():
with pytest.raises(ValueError):
_get_helper(field_paths="foo")
def test_documentreference_get_with_field_path():
_get_helper(field_paths=["foo"])
def test_documentreference_get_with_multiple_field_paths():
_get_helper(field_paths=["foo", "bar.baz"])
def test_documentreference_get_with_transaction():
_get_helper(use_transaction=True)
def _collections_helper(page_size=None, retry=None, timeout=None):
from google.cloud.firestore_v1.collection import CollectionReference
from google.cloud.firestore_v1 import _helpers
from google.cloud.firestore_v1.services.firestore.client import FirestoreClient
collection_ids = ["coll-1", "coll-2"]
class Pager(object):
def __iter__(self):
yield from collection_ids
api_client = mock.create_autospec(FirestoreClient)
api_client.list_collection_ids.return_value = Pager()
client = _make_client()
client._firestore_api_internal = api_client
kwargs = _helpers.make_retry_timeout_kwargs(retry, timeout)
# Actually make a document and call delete().
document = _make_document_reference("where", "we-are", client=client)
if page_size is not None:
collections = list(document.collections(page_size=page_size, **kwargs))
else:
collections = list(document.collections(**kwargs))
# Verify the response and the mocks.
assert len(collections) == len(collection_ids)
for collection, collection_id in zip(collections, collection_ids):
assert isinstance(collection, CollectionReference)
assert collection.parent == document
assert collection.id == collection_id
api_client.list_collection_ids.assert_called_once_with(
request={"parent": document._document_path, "page_size": page_size},
metadata=client._rpc_metadata,
**kwargs,
)
def test_documentreference_collections_wo_page_size():
_collections_helper()
def test_documentreference_collections_w_page_size():
_collections_helper(page_size=10)
def test_documentreference_collections_w_retry_timeout():
from google.api_core.retry import Retry
retry = Retry(predicate=object())
timeout = 123.0
_collections_helper(retry=retry, timeout=timeout)
@mock.patch("google.cloud.firestore_v1.document.Watch", autospec=True)
def test_documentreference_on_snapshot(watch):
client = mock.Mock(_database_string="sprinklez", spec=["_database_string"])
document = _make_document_reference("yellow", "mellow", client=client)
document.on_snapshot(None)
watch.for_document.assert_called_once()
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
def _make_client(project="project-project"):
from google.cloud.firestore_v1.client import Client
credentials = _make_credentials()
return Client(project=project, credentials=credentials)
|
agents/instance.py
|
Andrea-MariaDB-2/LastOrder-Dota2
| 332 |
96876
|
import numpy as np
from gym_env.feature_processors.enums import ACTION_NAME_TO_INDEX, DOUBLE_ACTION_PARA_TYPE
class Instance:
# reward is the td n reward plus the target state value
def __init__(self,
dota_time=None,
state_gf=None,
state_ucf=None,
state_ucategory=None,
mask=None,
reward=0,
action=None,
action_params=None,
state_value=0,
dump_path=None,
instant_reward=0.,
gae_advantage=0,
action_prob=None,
sub_action_prob=1,
final_action_prob=None,
model_time=None,
units_mask=None,
lstm_state=None,
lstm_gradient_mask=None,
embedding_dict=None,
dota_map=None,
update_times=0):
self.dota_time = dota_time
self.state_gf = state_gf
self.state_ucf = state_ucf
self.state_ucategory = state_ucategory
self.mask = mask
self.state_value = state_value
self.action = action
self.action_params = action_params
self.q_reward = reward
self.instant_reward = instant_reward
self.model_time = model_time
self.action_prob = action_prob
self.sub_action_prob = sub_action_prob
self.gae_advantage = gae_advantage
self.units_mask = units_mask
self.lstm_state = lstm_state
self.lstm_gradient_mask = 1
self.embedding_dict = embedding_dict
self.dota_map = dota_map
self.update_times = update_times
def zeros_like(self, target_instance):
self.dota_time = 0
self.state_gf = np.zeros_like(target_instance.state_gf)
self.state_ucf = np.zeros_like(target_instance.state_ucf)
# for ensure there is one enemy hero/tower
self.state_ucategory = target_instance.state_ucategory
self.mask = np.zeros_like(target_instance.mask)
self.state_value = 0
self.action = ACTION_NAME_TO_INDEX["STOP"]
self.action_params = {}
for atype in DOUBLE_ACTION_PARA_TYPE:
self.action_params[atype] = 0
self.q_reward = 0
self.instant_reward = 0
self.model_time = target_instance.model_time
self.action_prob = 1
self.sub_action_prob = 1
self.gae_advantage = 0
self.units_mask = np.zeros_like(target_instance.units_mask)
self.lstm_state = np.zeros_like(target_instance.lstm_state)
self.lstm_gradient_mask = 1
self.embedding_dict = target_instance.embedding_dict
self.dota_map = np.zeros_like(target_instance.dota_map)
self.update_times = 0
def padding_instance(reward_instance, latest_instance, total_length, exclude_last_instance):
padding_length = total_length - len(reward_instance)
if exclude_last_instance:
start_position = -len(reward_instance) - 1
else:
start_position = -len(reward_instance)
padding_instances = latest_instance[start_position - padding_length:start_position]
if len(padding_instances) < padding_length:
zero_instance = Instance()
zero_instance.zeros_like(reward_instance[0])
for i in range(padding_length - len(padding_instances)):
padding_instances.insert(0, zero_instance)
#padding instance do not compute gradient
for index, item in enumerate(padding_instances):
padding_instances[index].lstm_gradient_mask = 0
for index, item in enumerate(reward_instance):
reward_instance[index].lstm_gradient_mask = 1
padding_instances.extend(reward_instance)
return padding_instances
|
kivymd/uix/navigationdrawer/navigationdrawer.py
|
tct123/KivyMD
| 668 |
96892
|
<gh_stars>100-1000
"""
Components/NavigationDrawer
===========================
.. seealso::
`Material Design 2 spec, Navigation drawer <https://material.io/components/navigation-drawer>`_ and
`Material Design 3 spec, Navigation drawer <https://m3.material.io/components/navigation-drawer/overview>`_
.. rubric:: Navigation drawers provide access to destinations in your app.
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/navigation-drawer.png
:align: center
When using the class :class:`~MDNavigationDrawer` skeleton of your `KV` markup
should look like this:
Anatomy
-------
.. code-block:: kv
Root:
MDNavigationLayout:
ScreenManager:
Screen_1:
Screen_2:
MDNavigationDrawer:
# This custom rule should implement what will be appear in your
# MDNavigationDrawer.
ContentNavigationDrawer:
A simple example
----------------
.. code-block:: python
from kivy.lang import Builder
from kivymd.uix.boxlayout import MDBoxLayout
from kivymd.app import MDApp
KV = '''
#:import get_color_from_hex kivy.utils.get_color_from_hex
MDScreen:
MDNavigationLayout:
ScreenManager:
MDScreen:
MDToolbar:
title: "Navigation Drawer"
elevation: 10
pos_hint: {"top": 1}
md_bg_color: get_color_from_hex("#e7e4c0")
specific_text_color: get_color_from_hex("#4a4939")
left_action_items:
[['menu', lambda x: nav_drawer.set_state("open")]]
MDNavigationDrawer:
id: nav_drawer
md_bg_color: get_color_from_hex("#f7f4e7")
ContentNavigationDrawer:
'''
class ContentNavigationDrawer(MDBoxLayout):
pass
class TestNavigationDrawer(MDApp):
def build(self):
return Builder.load_string(KV)
TestNavigationDrawer().run()
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/navigation-drawer.gif
:align: center
.. Note:: :class:`~MDNavigationDrawer` is an empty
:class:`~kivymd.uix.card.MDCard` panel.
Custom content for navigation drawer
------------------------------------
Let's extend the ``ContentNavigationDrawer`` class from the above example and
create content for our :class:`~MDNavigationDrawer` panel:
.. code-block:: kv
# Menu item in the DrawerList list.
<ItemDrawer>
theme_text_color: "Custom"
on_release: self.parent.set_color_item(self)
IconLeftWidget:
id: icon
icon: root.icon
theme_text_color: "Custom"
text_color: root.text_color
.. code-block:: python
class ItemDrawer(OneLineIconListItem):
icon = StringProperty()
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/drawer-item.png
:align: center
Top of ``ContentNavigationDrawer`` and ``DrawerList`` for menu items:
.. code-block:: kv
<ContentNavigationDrawer>
orientation: "vertical"
padding: "8dp"
spacing: "8dp"
AnchorLayout:
anchor_x: "left"
size_hint_y: None
height: avatar.height
Image:
id: avatar
size_hint: None, None
size: "56dp", "56dp"
source: "kivymd.png"
MDLabel:
text: "KivyMD library"
font_style: "Button"
size_hint_y: None
height: self.texture_size[1]
MDLabel:
text: "<EMAIL>"
font_style: "Caption"
size_hint_y: None
height: self.texture_size[1]
ScrollView:
DrawerList:
id: md_list
.. code-block:: python
class ContentNavigationDrawer(BoxLayout):
pass
class DrawerList(ThemableBehavior, MDList):
def set_color_item(self, instance_item):
'''Called when tap on a menu item.'''
# Set the color of the icon and text for the menu item.
for item in self.children:
if item.text_color == self.theme_cls.primary_color:
item.text_color = self.theme_cls.text_color
break
instance_item.text_color = self.theme_cls.primary_color
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/drawer-top.png
:align: center
Create a menu list for ``ContentNavigationDrawer``:
.. code-block:: python
def on_start(self):
icons_item = {
"folder": "My files",
"account-multiple": "Shared with me",
"star": "Starred",
"history": "Recent",
"checkbox-marked": "Shared with me",
"upload": "Upload",
}
for icon_name in icons_item.keys():
self.root.ids.content_drawer.ids.md_list.add_widget(
ItemDrawer(icon=icon_name, text=icons_item[icon_name])
)
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/drawer-work.gif
:align: center
Standard content for the navigation bar
---------------------------------------
.. code-block:: python
from kivy.lang import Builder
from kivymd.app import MDApp
KV = '''
#:import get_color_from_hex kivy.utils.get_color_from_hex
#:set text_color get_color_from_hex("#4a4939")
#:set focus_color get_color_from_hex("#e7e4c0")
#:set ripple_color get_color_from_hex("#c5bdd2")
#:set bg_color get_color_from_hex("#f7f4e7")
#:set selected_color get_color_from_hex("#0c6c4d")
<DrawerClickableItem@MDNavigationDrawerItem>
focus_color: focus_color
unfocus_color: bg_color
text_color: text_color
icon_color: text_color
ripple_color: ripple_color
selected_color: selected_color
<DrawerLabelItem@MDNavigationDrawerItem>
bg_color: bg_color
text_color: text_color
icon_color: text_color
_no_ripple_effect: True
MDScreen:
MDNavigationLayout:
ScreenManager:
MDScreen:
MDToolbar:
title: "Navigation Drawer"
elevation: 10
pos_hint: {"top": 1}
md_bg_color: focus_color
specific_text_color: text_color
left_action_items:
[ \
[ \
'menu', lambda x: \
nav_drawer.set_state("open") \
if nav_drawer.state == "close" else \
nav_drawer.set_state("close") \
] \
]
MDNavigationDrawer:
id: nav_drawer
radius: (0, 16, 16, 0) if self.anchor == "left" else (16, 0, 0, 16)
md_bg_color: bg_color
MDNavigationDrawerMenu:
MDNavigationDrawerHeader:
title: "Header title"
title_color: text_color
text: "Header text"
title_color: text_color
spacing: "4dp"
padding: "12dp", 0, 0, "56dp"
MDNavigationDrawerLabel:
text: "Mail"
DrawerClickableItem:
icon: "gmail"
right_text: "+99"
text_right_color: text_color
text: "Inbox"
DrawerClickableItem:
icon: "send"
text: "Outbox"
MDNavigationDrawerDivider:
MDNavigationDrawerLabel:
text: "Labels"
DrawerLabelItem:
icon: "information-outline"
text: "Label"
DrawerLabelItem:
icon: "information-outline"
text: "Label"
'''
class TestNavigationDrawer(MDApp):
def build(self):
self.theme_cls.primary_palette = "Indigo"
return Builder.load_string(KV)
TestNavigationDrawer().run()
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/navigation-drawer-standatd-content.gif
:align: center
Switching screens in the ``ScreenManager`` and using the common ``MDToolbar``
-----------------------------------------------------------------------------
.. code-block:: python
from kivy.lang import Builder
from kivy.properties import ObjectProperty
from kivymd.app import MDApp
from kivymd.uix.boxlayout import MDBoxLayout
KV = '''
<ContentNavigationDrawer>
ScrollView:
MDList:
OneLineListItem:
text: "Screen 1"
on_press:
root.nav_drawer.set_state("close")
root.screen_manager.current = "scr 1"
OneLineListItem:
text: "Screen 2"
on_press:
root.nav_drawer.set_state("close")
root.screen_manager.current = "scr 2"
MDScreen:
MDToolbar:
id: toolbar
pos_hint: {"top": 1}
elevation: 10
title: "MDNavigationDrawer"
left_action_items: [["menu", lambda x: nav_drawer.set_state("open")]]
MDNavigationLayout:
x: toolbar.height
ScreenManager:
id: screen_manager
MDScreen:
name: "scr 1"
MDLabel:
text: "Screen 1"
halign: "center"
MDScreen:
name: "scr 2"
MDLabel:
text: "Screen 2"
halign: "center"
MDNavigationDrawer:
id: nav_drawer
ContentNavigationDrawer:
screen_manager: screen_manager
nav_drawer: nav_drawer
'''
class ContentNavigationDrawer(MDBoxLayout):
screen_manager = ObjectProperty()
nav_drawer = ObjectProperty()
class TestNavigationDrawer(MDApp):
def build(self):
return Builder.load_string(KV)
TestNavigationDrawer().run()
"""
__all__ = (
"MDNavigationLayout",
"MDNavigationDrawer",
"MDNavigationDrawerItem",
"MDNavigationDrawerMenu",
"MDNavigationDrawerHeader",
"MDNavigationDrawerLabel",
"MDNavigationDrawerDivider",
)
import os
from typing import Union
from kivy.animation import Animation, AnimationTransition
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.graphics.context_instructions import Color
from kivy.graphics.vertex_instructions import Rectangle
from kivy.lang import Builder
from kivy.properties import (
AliasProperty,
BooleanProperty,
ColorProperty,
NumericProperty,
ObjectProperty,
OptionProperty,
StringProperty,
VariableListProperty,
)
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.screenmanager import ScreenManager
from kivy.uix.scrollview import ScrollView
from kivymd import uix_path
from kivymd.uix.behaviors import FakeRectangularElevationBehavior, FocusBehavior
from kivymd.uix.boxlayout import MDBoxLayout
from kivymd.uix.card import MDCard
from kivymd.uix.list import MDList, OneLineAvatarIconListItem
from kivymd.uix.toolbar import MDToolbar
with open(
os.path.join(uix_path, "navigationdrawer", "navigationdrawer.kv"),
encoding="utf-8",
) as kv_file:
Builder.load_string(kv_file.read())
class NavigationDrawerContentError(Exception):
pass
class MDNavigationLayout(FloatLayout):
_scrim_color = ObjectProperty(None)
_scrim_rectangle = ObjectProperty(None)
_screen_manager = ObjectProperty(None)
_navigation_drawer = ObjectProperty(None)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.bind(width=self.update_pos)
def update_pos(self, instance_navigation_drawer, pos_x: float) -> None:
drawer = self._navigation_drawer
manager = self._screen_manager
if not drawer or not manager:
return
if drawer.type == "standard":
manager.size_hint_x = None
if drawer.anchor == "left":
manager.x = drawer.width + drawer.x
manager.width = self.width - manager.x
else:
manager.x = 0
manager.width = drawer.x
elif drawer.type == "modal":
manager.size_hint_x = None
manager.x = 0
if drawer.anchor == "left":
manager.width = self.width - manager.x
else:
manager.width = self.width
def add_scrim(self, instance_manager: ScreenManager) -> None:
with instance_manager.canvas.after:
self._scrim_color = Color(rgba=[0, 0, 0, 0])
self._scrim_rectangle = Rectangle(
pos=instance_manager.pos, size=instance_manager.size
)
instance_manager.bind(
pos=self.update_scrim_rectangle,
size=self.update_scrim_rectangle,
)
def update_scrim_rectangle(
self, instance_manager: ScreenManager, size: list
) -> None:
self._scrim_rectangle.pos = self.pos
self._scrim_rectangle.size = self.size
def add_widget(self, widget, index=0, canvas=None):
"""
Only two layouts are allowed:
:class:`~kivy.uix.screenmanager.ScreenManager` and
:class:`~MDNavigationDrawer`.
"""
if not isinstance(
widget, (MDNavigationDrawer, ScreenManager, MDToolbar)
):
raise NavigationDrawerContentError(
"The MDNavigationLayout must contain "
"only `MDNavigationDrawer` and `ScreenManager`"
)
if isinstance(widget, ScreenManager):
self._screen_manager = widget
self.add_scrim(widget)
if isinstance(widget, MDNavigationDrawer):
self._navigation_drawer = widget
widget.bind(
x=self.update_pos, width=self.update_pos, anchor=self.update_pos
)
if len(self.children) > 3:
raise NavigationDrawerContentError(
"The MDNavigationLayout must contain "
"only `MDNavigationDrawer` and `ScreenManager`"
)
return super().add_widget(widget)
class MDNavigationDrawerLabel(MDBoxLayout):
"""
Implements a label for a menu for :class:`~MDNavigationDrawer` class.
.. versionadded:: 1.0.0
.. code-block:: kv
MDNavigationDrawer:
MDNavigationDrawerMenu:
MDNavigationDrawerLabel:
text: "Mail"
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/navigation-drawer-label.png
:align: center
"""
text = StringProperty()
"""
Text label.
:attr:`text` is a :class:`~kivy.properties.StringProperty`
and defaults to `''`.
"""
padding = VariableListProperty(["20dp", 0, 0, "8dp"])
"""
Padding between layout box and children: [padding_left, padding_top,
padding_right, padding_bottom].
Padding also accepts a two argument form [padding_horizontal,
padding_vertical] and a one argument form [padding].
:attr:`padding` is a :class:`~kivy.properties.VariableListProperty`
and defaults to `['20dp', 0, 0, '8dp']`.
"""
class MDNavigationDrawerDivider(MDBoxLayout):
"""
Implements a divider for a menu for :class:`~MDNavigationDrawer` class.
.. versionadded:: 1.0.0
.. code-block:: kv
MDNavigationDrawer:
MDNavigationDrawerMenu:
MDNavigationDrawerLabel:
text: "Mail"
MDNavigationDrawerDivider:
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/navigation-drawer-divider.png
:align: center
"""
padding = VariableListProperty(["20dp", "12dp", 0, "12dp"])
"""
Padding between layout box and children: [padding_left, padding_top,
padding_right, padding_bottom].
Padding also accepts a two argument form [padding_horizontal,
padding_vertical] and a one argument form [padding].
:attr:`padding` is a :class:`~kivy.properties.VariableListProperty`
and defaults to `['20dp', '12dp', 0, '12dp']`.
"""
color = ColorProperty(None)
"""
Divider color in ``rgba`` format.
:attr:`color` is a :class:`~kivy.properties.ColorProperty`
and defaults to `None`.
"""
class MDNavigationDrawerHeader(MDBoxLayout):
"""
Implements a header for a menu for :class:`~MDNavigationDrawer` class.
.. versionadded:: 1.0.0
.. code-block:: kv
MDNavigationDrawer:
MDNavigationDrawerMenu:
MDNavigationDrawerHeader:
title: "Header title"
text: "Header text"
spacing: "4dp"
padding: "12dp", 0, 0, "56dp"
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/navigation-drawer-header.png
:align: center
"""
source = StringProperty()
"""
Image logo path.
.. code-block:: kv
MDNavigationDrawer:
MDNavigationDrawerMenu:
MDNavigationDrawerHeader:
title: "Header title"
text: "Header text"
source: "logo.png"
spacing: "4dp"
padding: "12dp", 0, 0, "56dp"
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/navigation-drawer-header-source.png
:align: center
:attr:`source` is a :class:`~kivy.properties.StringProperty`
and defaults to `''`.
"""
title = StringProperty()
"""
Title shown in the first line.
:attr:`title` is a :class:`~kivy.properties.StringProperty`
and defaults to `''`.
"""
title_halign = StringProperty("left")
"""
Title halign first line.
:attr:`title_halign` is a :class:`~kivy.properties.StringProperty`
and defaults to `'left'`.
"""
title_color = ColorProperty(None)
"""
Title text color.
:attr:`title_color` is a :class:`~kivy.properties.ColorProperty`
and defaults to `None`.
"""
title_font_style = StringProperty("H4")
"""
Title shown in the first line.
:attr:`title_font_style` is a :class:`~kivy.properties.StringProperty`
and defaults to `'H4'`.
"""
title_font_size = StringProperty("34sp")
"""
Title shown in the first line.
:attr:`title_font_size` is a :class:`~kivy.properties.StringProperty`
and defaults to `'34sp'`.
"""
text = StringProperty()
"""
Text shown in the second line.
:attr:`text` is a :class:`~kivy.properties.StringProperty`
and defaults to `''`.
"""
text_halign = StringProperty("left")
"""
Text halign first line.
:attr:`text_halign` is a :class:`~kivy.properties.StringProperty`
and defaults to `'left'`.
"""
text_color = ColorProperty(None)
"""
Title text color.
:attr:`text_color` is a :class:`~kivy.properties.ColorProperty`
and defaults to `None`.
"""
text_font_style = StringProperty("H6")
"""
Title shown in the first line.
:attr:`text_font_style` is a :class:`~kivy.properties.StringProperty`
and defaults to `'H6'`.
"""
text_font_size = StringProperty("20sp")
"""
Title shown in the first line.
:attr:`text_font_size` is a :class:`~kivy.properties.StringProperty`
and defaults to `'20sp'`.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
Clock.schedule_once(self.check_content)
def check_content(self, interval: Union[int, float]) -> None:
"""Removes widgets that the user has not added to the container."""
if not self.title:
self.ids.label_box.remove_widget(self.ids.title)
if not self.text:
self.ids.label_box.remove_widget(self.ids.text)
if not self.source:
self.remove_widget(self.ids.logo)
class MDNavigationDrawerItem(OneLineAvatarIconListItem, FocusBehavior):
"""
Implements an item for the :class:`~MDNavigationDrawer` menu list.
.. versionadded:: 1.0.0
.. code-block:: kv
MDNavigationDrawer:
MDNavigationDrawerMenu:
MDNavigationDrawerHeader:
title: "Header title"
text: "Header text"
spacing: "4dp"
padding: "12dp", 0, 0, "56dp"
MDNavigationDrawerItem
icon: "gmail"
right_text: "+99"
text: "Inbox"
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/navigation-drawer-item.png
:align: center
"""
selected = BooleanProperty(False)
"""
Is the item selected.
:attr:`selected` is a :class:`~kivy.properties.BooleanProperty`
and defaults to `False`.
"""
icon = StringProperty()
"""
Icon item.
:attr:`icon` is a :class:`~kivy.properties.StringProperty`
and defaults to `''`.
"""
icon_color = ColorProperty(None)
"""
Icon color item.
:attr:`icon_color` is a :class:`~kivy.properties.ColorProperty`
and defaults to `None`.
"""
selected_color = ColorProperty([0, 0, 0, 1])
"""
The color of the icon and text of the selected item.
:attr:`selected_color` is a :class:`~kivy.properties.ColorProperty`
and defaults to `[0, 0, 0, 1]`.
"""
right_text = StringProperty()
"""
Right text item.
:attr:`right_text` is a :class:`~kivy.properties.StringProperty`
and defaults to `''`.
"""
text_right_color = ColorProperty(None)
"""
Right text color item.
:attr:`text_right_color` is a :class:`~kivy.properties.ColorProperty`
and defaults to `None`.
"""
_text_color = None
_text_right_color = None
# kivymd.uix.navigationdrawer.navigationdrawer.MDNavigationDrawerMenu
_drawer_menu = ObjectProperty()
class MDNavigationDrawerMenu(ScrollView):
"""
Implements a scrollable list for menu items of the
:class:`~MDNavigationDrawer` class.
.. versionadded:: 1.0.0
.. code-block:: kv
MDNavigationDrawer:
MDNavigationDrawerMenu:
# Your menu items.
...
"""
spacing = NumericProperty(0)
"""
Spacing between children, in pixels.
:attr:`spacing` is a :class:`~kivy.properties.NumericProperty`
and defaults to `0`.
"""
def add_widget(self, widget, *args, **kwargs):
if isinstance(widget, MDList):
return super().add_widget(widget, *args, **kwargs)
else:
if isinstance(widget, MDNavigationDrawerItem):
widget._drawer_menu = self
self.ids.menu.add_widget(widget)
def reset_active_color(self, item: MDNavigationDrawerItem) -> None:
for widget in self.ids.menu.children:
if issubclass(widget.__class__, MDNavigationDrawerItem):
if widget != item:
widget.selected = False
else:
widget.selected = True
if (
issubclass(widget.__class__, MDNavigationDrawerItem)
and widget != item
):
if widget._text_color:
widget.text_color = widget._text_color
class MDNavigationDrawer(MDCard, FakeRectangularElevationBehavior):
type = OptionProperty("modal", options=("standard", "modal"))
"""
Type of drawer. Modal type will be on top of screen. Standard type will be
at left or right of screen. Also it automatically disables
:attr:`close_on_click` and :attr:`enable_swiping` to prevent closing
drawer for standard type.
Standard
--------
.. code-block:: kv
MDNavigationDrawer:
type: "standard"
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/navigation-drawer-standard.gif
:align: center
Model
-----
.. code-block:: kv
MDNavigationDrawer:
type: "modal"
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/navigation-drawer-modal.gif
:align: center
:attr:`type` is a :class:`~kivy.properties.OptionProperty`
and defaults to `'modal'`.
"""
anchor = OptionProperty("left", options=("left", "right"))
"""
Anchoring screen edge for drawer. Set it to `'right'` for right-to-left
languages. Available options are: `'left'`, `'right'`.
Left
----
.. code-block:: kv
MDNavigationDrawer:
anchor: "left"
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/navigation-type-left.png
:align: center
Right
-----
.. code-block:: kv
MDNavigationDrawer:
anchor: "right"
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/navigation-type-right.png
:align: center
:attr:`anchor` is a :class:`~kivy.properties.OptionProperty`
and defaults to `'left'`.
"""
# FIXME: Doesn't work in Kivy v2.1.0.
scrim_color = ColorProperty([0, 0, 0, 0.5])
"""
Color for scrim. Alpha channel will be multiplied with
:attr:`_scrim_alpha`. Set fourth channel to 0 if you want to disable
scrim.
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/navigation-drawer-scrim-color.png
:align: center
.. code-block:: kv
MDNavigationDrawer:
scrim_color: 0, 0, 0, .8
# scrim_color: 0, 0, 0, .2
:attr:`scrim_color` is a :class:`~kivy.properties.ColorProperty`
and defaults to `[0, 0, 0, 0.5]`.
"""
padding = VariableListProperty([16, 16, 12, 16])
"""
Padding between layout box and children: [padding_left, padding_top,
padding_right, padding_bottom].
Padding also accepts a two argument form [padding_horizontal,
padding_vertical] and a one argument form [padding].
.. versionchanged:: 1.0.0
.. code-block:: kv
MDNavigationDrawer:
padding: 56, 56, 12, 16
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/navigation-drawer-padding.png
:align: center
:attr:`padding` is a :class:`~kivy.properties.VariableListProperty` and
defaults to '[16, 16, 12, 16]'.
"""
close_on_click = BooleanProperty(True)
"""
Close when click on scrim or keyboard escape. It automatically sets to
False for "standard" type.
:attr:`close_on_click` is a :class:`~kivy.properties.BooleanProperty`
and defaults to `True`.
"""
state = OptionProperty("close", options=("close", "open"))
"""
Indicates if panel closed or opened. Sets after :attr:`status` change.
Available options are: `'close'`, `'open'`.
:attr:`state` is a :class:`~kivy.properties.OptionProperty`
and defaults to `'close'`.
"""
status = OptionProperty(
"closed",
options=(
"closed",
"opening_with_swipe",
"opening_with_animation",
"opened",
"closing_with_swipe",
"closing_with_animation",
),
)
"""
Detailed state. Sets before :attr:`state`. Bind to :attr:`state` instead
of :attr:`status`. Available options are: `'closed'`,
`'opening_with_swipe'`, `'opening_with_animation'`, `'opened'`,
`'closing_with_swipe'`, `'closing_with_animation'`.
:attr:`status` is a :class:`~kivy.properties.OptionProperty`
and defaults to `'closed'`.
"""
open_progress = NumericProperty(0.0)
"""
Percent of visible part of side panel. The percent is specified as a
floating point number in the range 0-1. 0.0 if panel is closed and 1.0 if
panel is opened.
:attr:`open_progress` is a :class:`~kivy.properties.NumericProperty`
and defaults to `0.0`.
"""
enable_swiping = BooleanProperty(True)
"""
Allow to open or close navigation drawer with swipe. It automatically
sets to False for "standard" type.
:attr:`enable_swiping` is a :class:`~kivy.properties.BooleanProperty`
and defaults to `True`.
"""
swipe_distance = NumericProperty(10)
"""
The distance of the swipe with which the movement of navigation drawer
begins.
:attr:`swipe_distance` is a :class:`~kivy.properties.NumericProperty`
and defaults to `10`.
"""
swipe_edge_width = NumericProperty(20)
"""
The size of the area in px inside which should start swipe to drag
navigation drawer.
:attr:`swipe_edge_width` is a :class:`~kivy.properties.NumericProperty`
and defaults to `20`.
"""
def _get_scrim_alpha(self):
_scrim_alpha = 0
if self.type == "modal":
_scrim_alpha = self._scrim_alpha_transition(self.open_progress)
if (
isinstance(self.parent, MDNavigationLayout)
and self.parent._scrim_color
):
self.parent._scrim_color.rgba = self.scrim_color[:3] + [
self.scrim_color[3] * _scrim_alpha
]
return _scrim_alpha
_scrim_alpha = AliasProperty(
_get_scrim_alpha,
None,
bind=("_scrim_alpha_transition", "open_progress", "scrim_color"),
)
"""
Multiplier for alpha channel of :attr:`scrim_color`. For internal
usage only.
"""
scrim_alpha_transition = StringProperty("linear")
"""
The name of the animation transition type to use for changing
:attr:`scrim_alpha`.
:attr:`scrim_alpha_transition` is a :class:`~kivy.properties.StringProperty`
and defaults to `'linear'`.
"""
def _get_scrim_alpha_transition(self):
return getattr(AnimationTransition, self.scrim_alpha_transition)
_scrim_alpha_transition = AliasProperty(
_get_scrim_alpha_transition,
None,
bind=("scrim_alpha_transition",),
cache=True,
)
opening_transition = StringProperty("out_cubic")
"""
The name of the animation transition type to use when animating to
the :attr:`state` `'open'`.
:attr:`opening_transition` is a :class:`~kivy.properties.StringProperty`
and defaults to `'out_cubic'`.
"""
opening_time = NumericProperty(0.2)
"""
The time taken for the panel to slide to the :attr:`state` `'open'`.
:attr:`opening_time` is a :class:`~kivy.properties.NumericProperty`
and defaults to `0.2`.
"""
closing_transition = StringProperty("out_sine")
"""The name of the animation transition type to use when animating to
the :attr:`state` 'close'.
:attr:`closing_transition` is a :class:`~kivy.properties.StringProperty`
and defaults to `'out_sine'`.
"""
closing_time = NumericProperty(0.2)
"""
The time taken for the panel to slide to the :attr:`state` `'close'`.
:attr:`closing_time` is a :class:`~kivy.properties.NumericProperty`
and defaults to `0.2`.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.bind(
open_progress=self.update_status,
status=self.update_status,
state=self.update_status,
)
Window.bind(on_keyboard=self._handle_keyboard)
def set_state(self, new_state="toggle", animation=True) -> None:
"""
Change state of the side panel.
New_state can be one of `"toggle"`, `"open"` or `"close"`.
"""
if new_state == "toggle":
new_state = "close" if self.state == "open" else "open"
if new_state == "open":
Animation.cancel_all(self, "open_progress")
self.status = "opening_with_animation"
if animation:
Animation(
open_progress=1.0,
d=self.opening_time * (1 - self.open_progress),
t=self.opening_transition,
).start(self)
else:
self.open_progress = 1
else: # "close"
Animation.cancel_all(self, "open_progress")
self.status = "closing_with_animation"
if animation:
Animation(
open_progress=0.0,
d=self.closing_time * self.open_progress,
t=self.closing_transition,
).start(self)
else:
self.open_progress = 0
def update_status(self, *_) -> None:
status = self.status
if status == "closed":
self.state = "close"
elif status == "opened":
self.state = "open"
elif self.open_progress == 1 and status == "opening_with_animation":
self.status = "opened"
self.state = "open"
elif self.open_progress == 0 and status == "closing_with_animation":
self.status = "closed"
self.state = "close"
elif status in (
"opening_with_swipe",
"opening_with_animation",
"closing_with_swipe",
"closing_with_animation",
):
pass
if self.status == "closed":
self.opacity = 0
else:
self.opacity = 1
def get_dist_from_side(self, x: float) -> float:
if self.anchor == "left":
return 0 if x < 0 else x
return 0 if x > Window.width else Window.width - x
def on_touch_down(self, touch):
if self.status == "closed":
return False
elif self.status == "opened":
for child in self.children[:]:
if child.dispatch("on_touch_down", touch):
return True
if self.type == "standard" and not self.collide_point(
touch.ox, touch.oy
):
return False
return True
def on_touch_move(self, touch):
if self.enable_swiping:
if self.status == "closed":
if (
self.get_dist_from_side(touch.ox) <= self.swipe_edge_width
and abs(touch.x - touch.ox) > self.swipe_distance
):
self.status = "opening_with_swipe"
elif self.status == "opened":
if abs(touch.x - touch.ox) > self.swipe_distance:
self.status = "closing_with_swipe"
if self.status in ("opening_with_swipe", "closing_with_swipe"):
self.open_progress = max(
min(
self.open_progress
+ (touch.dx if self.anchor == "left" else -touch.dx)
/ self.width,
1,
),
0,
)
return True
return super().on_touch_move(touch)
def on_touch_up(self, touch):
if self.status == "opening_with_swipe":
if self.open_progress > 0.5:
self.set_state("open", animation=True)
else:
self.set_state("close", animation=True)
elif self.status == "closing_with_swipe":
if self.open_progress < 0.5:
self.set_state("close", animation=True)
else:
self.set_state("open", animation=True)
elif self.status == "opened":
if self.close_on_click and not self.collide_point(
touch.ox, touch.oy
):
self.set_state("close", animation=True)
elif self.type == "standard" and not self.collide_point(
touch.ox, touch.oy
):
return False
elif self.status == "closed":
return False
return True
def on_radius(self, instance_navigation_drawer, radius_value: list) -> None:
self._radius = radius_value
def on_type(self, instance_navigation_drawer, drawer_type: str) -> None:
if self.type == "standard":
self.enable_swiping = False
self.close_on_click = False
else:
self.enable_swiping = True
self.close_on_click = True
def _handle_keyboard(self, window, key, *largs):
if key == 27 and self.status == "opened" and self.close_on_click:
self.set_state("close")
return True
|
poline/core.py
|
riolet/poline
| 140 |
96918
|
<gh_stars>100-1000
from __future__ import print_function
import re
import os
import sys
import argparse
import collections
import subprocess
import json
from poline.utilfuncs import *
from poline.fields import Fields
from itertools import islice
from operator import itemgetter, attrgetter
if sys.version_info >= (3,0):
from urllib.parse import urlparse
else:
from urlparse import urlparse
from pprint import pprint, pformat
if sys.version_info >= (3,5):
_collections_Generator = collections.Generator
else:
from poline import _com_collections
_collections_Generator = _com_collections.Generator
T = True
F = False
def _len(value):
if isinstance(value, _collections_Generator):
return sum(1 for x in value)
else:
return len(value)
def _stdin(args):
for line in sys.stdin:
if args.separator is not None:
yield Fields(line.strip().split(args.separator))
elif args.split:
yield Fields(line.strip().split())
else:
yield line.strip()
sys.stdin.close()
# Hello old friends
_shell_commands= ['cp', 'df', 'docker', 'du', 'find', 'grep', 'git', 'history',
'ln', 'ls', 'lsof', 'mv', 'netstat', 'nmcli', 'ps', 'rm',
'stat', 'whois']
for _shell_command in _shell_commands:
exec ("""{funcname} = lambda *args, **kwargs: sh(['{funcname}']+list(args), **kwargs)""".format(funcname=_shell_command))
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('expression', nargs='+', help="python expression")
parser.add_argument('-F', '--separator', default=None, help="split each line by SEPARATOR")
parser.add_argument('-s', '--split', const=True, default=False, action='store_const', help="split each line")
parser.add_argument('-q', '--quiet', const=True, default=False, action='store_const',
help="don't implicitly print results")
if argv is not None:
args = parser.parse_args(argv)
else:
args = parser.parse_args()
result = _stdin(args)
for expression in args.expression:
separator = None
new_result = []
if expression.startswith('|') or expression.startswith('%'):
if expression.startswith('%'):
expression = expression[1:]
exp_parts = expression.split('%')
separator = exp_parts[0]
expression = '%'.join(exp_parts[1:])
else:
expression = expression[1:]
for result_line in result:
if separator:
result_parts = Fields(result_line.split(separator))
else:
result_parts = Fields(result_line.split())
invars = {
'_': result,
'__': result_parts,
'__str': result_line,
'len': _len,
}
for result_pard_idx in range(len(result_parts)+10):
invars['_{}'.format(result_pard_idx)] = result_parts[result_pard_idx]
new_result += [eval('(%s)' % expression, globals(), invars)]
result = new_result
elif expression.startswith(':'):
invars = {
'_': result,
'len': _len,
}
expression = expression[1:]
exp_parts = expression.split(':')
tuples = exp_parts[0]
expression = '{} {}'.format(':'.join(exp_parts[1:]), 'for ({}) in _'.format(tuples))
result = eval('(%s)' % expression, globals(), invars)
else:
invars = {
'_': result,
'len': _len,
}
result = eval('(%s)' % expression, globals(), invars)
#argv is not None when we're calling this from a unit test
if argv is not None:
return result
if not args.quiet:
if isinstance(result, (list, _collections_Generator)):
for line in result:
if isinstance(line, (list, tuple)):
print(*line)
else:
print(line)
else:
print(result)
if __name__ == "__main__":
main()
|
playlist/playlist.py
|
CodeMasters688/pp
| 505 |
96923
|
<reponame>CodeMasters688/pp<gh_stars>100-1000
"""
playlist.py
Description: Playing with iTunes Playlists.
Author: <NAME>
Website: electronut.in
"""
import re, argparse
import sys
from matplotlib import pyplot
import plistlib
import numpy as np
def findCommonTracks(fileNames):
"""
Find common tracks in given playlist files, and save them
to common.txt.
"""
# a list of sets of track names
trackNameSets = []
for fileName in fileNames:
# create a new set
trackNames = set()
# read in playlist
plist = plistlib.readPlist(fileName)
# get the tracks
tracks = plist['Tracks']
# iterate through tracks
for trackId, track in tracks.items():
try:
# add name to set
trackNames.add(track['Name'])
except:
# ignore
pass
# add to list
trackNameSets.append(trackNames)
# get set of common tracks
commonTracks = set.intersection(*trackNameSets)
# write to file
if len(commonTracks) > 0:
f = open("common.txt", 'wb')
for val in commonTracks:
s = "%s\n" % val
f.write(s.encode("UTF-8"))
f.close()
print("%d common tracks found. "
"Track names written to common.txt." % len(commonTracks))
else:
print("No common tracks!")
def plotStats(fileName):
"""
Plot some statistics by readin track information from playlist.
"""
# read in playlist
plist = plistlib.readPlist(fileName)
# get the tracks
tracks = plist['Tracks']
# create lists of ratings and duration
ratings = []
durations = []
# iterate through tracks
for trackId, track in tracks.items():
try:
ratings.append(track['Album Rating'])
durations.append(track['Total Time'])
except:
# ignore
pass
# ensure valid data was collected
if ratings == [] or durations == []:
print("No valid Album Rating/Total Time data in %s." % fileName)
return
# cross plot
x = np.array(durations, np.int32)
# convert to minutes
x = x/60000.0
y = np.array(ratings, np.int32)
pyplot.subplot(2, 1, 1)
pyplot.plot(x, y, 'o')
pyplot.axis([0, 1.05*np.max(x), -1, 110])
pyplot.xlabel('Track duration')
pyplot.ylabel('Track rating')
# plot histogram
pyplot.subplot(2, 1, 2)
pyplot.hist(x, bins=20)
pyplot.xlabel('Track duration')
pyplot.ylabel('Count')
# show plot
pyplot.show()
def findDuplicates(fileName):
"""
Find duplicate tracks in given playlist.
"""
print('Finding duplicate tracks in %s...' % fileName)
# read in playlist
plist = plistlib.readPlist(fileName)
# get the tracks
tracks = plist['Tracks']
# create a track name dict
trackNames = {}
# iterate through tracks
for trackId, track in tracks.items():
try:
name = track['Name']
duration = track['Total Time']
# is there an entry already?
if name in trackNames:
# if name and duration matches, increment count
# duration rounded to nearest second
if duration//1000 == trackNames[name][0]//1000:
count = trackNames[name][1]
trackNames[name] = (duration, count+1)
else:
# add entry - duration and count
trackNames[name] = (duration, 1)
except:
# ignore
pass
# store duplicates as (name, count) tuples
dups = []
for k, v in trackNames.items():
if v[1] > 1:
dups.append((v[1], k))
# save dups to file
if len(dups) > 0:
print("Found %d duplicates. Track names saved to dup.txt" % len(dups))
else:
print("No duplicate tracks found!")
f = open("dups.txt", 'w')
for val in dups:
f.write("[%d] %s\n" % (val[0], val[1]))
f.close()
# Gather our code in a main() function
def main():
# create parser
descStr = """
This program analyzes playlist files (.xml) exported from iTunes.
"""
parser = argparse.ArgumentParser(description=descStr)
# add a mutually exclusive group of arguments
group = parser.add_mutually_exclusive_group()
# add expected arguments
group .add_argument('--common', nargs = '*', dest='plFiles', required=False)
group .add_argument('--stats', dest='plFile', required=False)
group .add_argument('--dup', dest='plFileD', required=False)
# parse args
args = parser.parse_args()
if args.plFiles:
# find common tracks
findCommonTracks(args.plFiles)
elif args.plFile:
# plot stats
plotStats(args.plFile)
elif args.plFileD:
# find duplicate tracks
findDuplicates(args.plFileD)
else:
print("These are not the tracks you are looking for.")
# main method
if __name__ == '__main__':
main()
|
leetcode.com/python/428_Serialize_and_Deserialize_N-ary_Tree.py
|
vansh-tiwari/coding-interview-gym
| 713 |
96928
|
<filename>leetcode.com/python/428_Serialize_and_Deserialize_N-ary_Tree.py<gh_stars>100-1000
"""
# Definition for a Node.
class Node(object):
def __init__(self, val, children):
self.val = val
self.children = children
"""
# Idea: preorder recursive traversal; add number of children after root val, in order to know when to terminate.
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: Node
:rtype: str
"""
nodeList = []
self.serializeHelper(root, nodeList)
return ','.join(map(str, nodeList))
def serializeHelper(self, root, nodeList):
if root is None:
return
nodeList.append(root.val)
nodeList.append(len(root.children))
for child in root.children:
self.serializeHelper(child, nodeList)
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: Node
"""
if len(data) <= 0:
return None
nodeList = data.split(",")
currentNodeIndexs = [0]
deserializedData = self.deserializeHelper(nodeList, currentNodeIndexs)
return deserializedData
def deserializeHelper(self, nodeList, currentNodeIndexs):
if currentNodeIndexs[0] == len(nodeList):
return None
root = Node(int(nodeList[currentNodeIndexs[0]]), [])
currentNodeIndexs[0] += 1
childrenSize = int(nodeList[currentNodeIndexs[0]])
currentNodeIndexs[0] += 1
for index in range(childrenSize):
root.children.append(self.deserializeHelper(nodeList, currentNodeIndexs))
return root
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
|
.modules/.metagoofil/hachoir_parser/misc/mstask.py
|
termux-one/EasY_HaCk
| 1,103 |
96936
|
<gh_stars>1000+
"""
ms task/job file parser
Author: <NAME>
Creation date: 2010-11
References:
http://msdn.microsoft.com/en-us/library/cc248286%28v=PROT.13%29.aspx
http://msdn.microsoft.com/en-us/library/cc248287%28v=PROT.13%29.aspx
http://technet.microsoft.com/en-us/library/bb490996.aspx
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, RootSeekableFieldSet,
CString, String, PascalString16,
UInt32, UInt16, UInt8,
Bit, Bits, PaddingBits,
TimestampWin64, DateTimeMSDOS32,
NullBytes, PaddingBytes, RawBits, RawBytes, Enum)
from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
from hachoir_core.text_handler import textHandler, hexadecimal
from hachoir_parser.common.win32 import PascalStringWin16, GUID
from hachoir_parser.common.msdos import MSDOSFileAttr16, MSDOSFileAttr32
from hachoir_core.text_handler import filesizeHandler
class TaskTrigger(FieldSet):
TRIGGER_TYPE = {
0x00000000: "ONCE",
0x00000001: "DAILY",
0x00000002: "WEEKLY",
0x00000003: "MONTHLYDATE",
0x00000004: "MONTHLYDOW",
0x00000005: "EVENT_ON_IDLE",
0x00000006: "EVENT_AT_SYSTEMSTART",
0x00000007: "EVENT_AT_LOGON"
}
def __init__(self, *args, **kwargs):
FieldSet.__init__(self, *args, **kwargs)
self._size = self["TriggerSize"].value * 8
def createFields(self):
yield UInt16(self, "TriggerSize")
yield UInt16(self, "Reserved[]")
yield UInt16(self, "BeginYear")
yield UInt16(self, "BeginMonth")
yield UInt16(self, "BeginDay")
yield UInt16(self, "EndYear")
yield UInt16(self, "EndMonth")
yield UInt16(self, "EndDay")
yield UInt16(self, "StartHour")
yield UInt16(self, "StartMinute")
yield UInt32(self, "MinutesDuration")
yield UInt32(self, "MinutesInterval","Time period between repeated trigger firings.")
yield Bit(self, "HasEndDate","Can task stop at some point in time?")
yield Bit(self, "KillAtDurationEnd","Can task be stopped at the end of the repetition period?")
yield Bit(self, "TriggerDisabled","Is this trigger disabled?")
yield RawBits(self, "Unused[]", 29)
yield Enum(UInt32(self, "TriggerType"),self.TRIGGER_TYPE)
yield UInt16(self, "TriggerSpecific0")
yield UInt16(self, "TriggerSpecific1")
yield UInt16(self, "TriggerSpecific2")
yield UInt16(self, "Padding")
yield UInt16(self, "Reserved[]")
yield UInt16(self, "Reserved[]")
class MSTaskFile(Parser, RootSeekableFieldSet):
PARSER_TAGS = {
"id": "mstask",
"category": "misc", # "archive", "audio", "container", ...
"file_ext": ("job",), # TODO: Example ("bmp",) to parse the file "image.bmp"
"min_size": 100, # TODO: Minimum file size (x bits, or x*8 in bytes)
"description": ".job 'at' file parser from ms windows", # TODO: Example: "A bitmap picture",
}
endian = LITTLE_ENDIAN
PRODUCT_VERSION = {
0x0400: "Windows NT 4.0",
0x0500: "Windows 2000",
0x0501: "Windows XP",
0x0600: "Windows Vista",
0x0601: "Windows 7"
}
TASK_STATUS = {
0x00041300: "Task Ready",
0x00041301: "Task running",
0x00041302: "Task disabled",
0x00041303: "Task has not run",
0x00041304: "Task has no more runs",
0x00041305: "Task not scheduled",
0x00041306: "Task terminated",
0x00041307: "Task has no valid triggers",
0x00041308: "Task contains only event triggers that do not have set run times",
0x00041309: "Task trigger not found",
0x0004130A: "One or more of the properties that are required to run this task have not been set.",
0x0004130B: "There is no running instance of the task",
0x0004130C: "Task Schedule Remoting Protocol service is not installed",
0x0004130D: "Task object cannot be opened",
0x0004130E: "Task object is invalid",
0x0004130F: "No Account information could be found in Task Scheduler Remoting Protocol security database for the task indicated."
}
def validate(self):
# The MAGIC for a task file is the windows version that created it
# http://msdn.microsoft.com/en-us/library/2d1fbbab-fe6c-4ae5-bdf5-41dc526b2439%28v=PROT.13%29#id11
if self['WindowsVersion'].value not in self.PRODUCT_VERSION:
return "Invalid Product Version Field"
return True
def createFields(self):
yield Enum(UInt16(self, "WindowsVersion"), self.PRODUCT_VERSION)
yield UInt16(self, "FileVersion")
yield GUID(self, "JobUUID")
yield UInt16(self, "AppNameOffset", "App Name Length Offset")
yield UInt16(self, "TriggerOffset", "Contains the offset in bytes within the .JOB file where the task triggers are located.")
yield UInt16(self, "ErrorRetryCount", "Contains the number of execute attempts that are attempted for the task if the task fails to start.")
yield UInt16(self, "ErrorRetryInterval", "Contains the interval, in minutes, between successive retries")
yield UInt16(self, "IdleDeadline", "Contains a maximum time in minutes to wait for the machine to become idle for Idle Wait minutes.")
yield UInt16(self, "IdleWait", "Contains a value in minutes. The machine remains idle for this many minutes before it runs the task")
yield UInt32(self, "Priority")
yield UInt32(self, "MaxRunTime", "Maximum run time in milliseconds")
yield UInt32(self, "ExitCode", "This contains the exit code of the executed task upon the completion of that task.")
yield Enum(UInt32(self, "Status"), self.TASK_STATUS)
yield Bit(self, "Interactive", "Can Task interact with user?")
yield Bit(self, "DeleteWhenDone", "Remove the task file when done?")
yield Bit(self, "Disabled", "Is Task disabled?")
yield Bit(self, "StartOnlyIfIdle", "Task begins only if computer is not in use at the scheduled time")
yield Bit(self, "KillOnIdleEnd", "Kill task if user input is detected, terminating idle state?")
yield Bit(self, "DontStartIfOnBatteries")
yield Bit(self, "KillIfGoingOnBatteries")
yield Bit(self, "RunOnlyIfDocked")
yield Bit(self, "HiddenTask")
yield Bit(self, "RunIfConnectedToInternet")
yield Bit(self, "RestartOnIdleResume")
yield Bit(self, "SystemRequired", "Can task cause system to resume or awaken if system is sleeping?")
yield Bit(self, "OnlyIfUserLoggedOn")
yield Bit(self, "ApplicationNameExists", "Does task have an application name defined?")
yield Bit(self, "Unused[]")
yield Bit(self, "Unused[]")
yield RawBytes(self, "flags", 2)
yield UInt16(self, "LastRunYear")
yield UInt16(self, "LastRunMonth")
yield UInt16(self, "LastRunWeekday", "Sunday=0,Saturday=6")
yield UInt16(self, "LastRunDay")
yield UInt16(self, "LastRunHour")
yield UInt16(self, "LastRunMinute")
yield UInt16(self, "LastRunSecond")
yield UInt16(self, "LastRunMillisecond")
yield UInt16(self, "RunningInstanceCount")
yield PascalStringWin16(self, "AppNameLength", strip='\0')
yield PascalStringWin16(self, "Parameters", strip='\0')
yield PascalStringWin16(self, "WorkingDirectory", strip='\0')
yield PascalStringWin16(self, "Author", strip='\0')
yield PascalStringWin16(self, "Comment", strip='\0')
yield UInt16(self, "UserDataSize")
#todo: read optional userdata
yield UInt16(self, "ReservedDataSize")
if self["ReservedDataSize"].value==8:
yield Enum(UInt32(self, "StartError", "contains the HRESULT error from the most recent attempt to start the task"), self.TASK_STATUS)
yield UInt32(self, "TaskFlags")
elif self["ReservedDataSize"].value:
yield RawBytes(self, "Reserved", self["ReservedDataSize"].value)
yield UInt16(self, "TriggerCount", "size of the array of triggers")
for i in xrange(self["TriggerCount"].value):
yield TaskTrigger(self, "Trigger[]")
|
dex-net/src/dexnet/grasping/quality.py
|
gachiemchiep/PointNetGPD
| 193 |
96975
|
<reponame>gachiemchiep/PointNetGPD
# -*- coding: utf-8 -*-
# """
# Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
# Permission to use, copy, modify, and distribute this software and its documentation for educational,
# research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
# hereby granted, provided that the above copyright notice, this paragraph and the following two
# paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
# Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
# 7201, <EMAIL>, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
#
# IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
# INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
# THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIgit clone https://github.com/jeffmahler/Boost.NumPy.git
# ED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
# HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
# MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
# """
# """
# Quasi-static point-based grasp quality metrics.
# Author: <NAME> and <NAME>
# """
import logging
# logging.root.setLevel(level=logging.DEBUG)
import numpy as np
try:
import pyhull.convex_hull as cvh
except:
pass
# logging.warning('Failed to import pyhull')
try:
import cvxopt as cvx
except:
pass
# logging.warning('Failed to import cvx')
import os
import scipy.spatial as ss
import sys
import time
from dexnet.grasping import PointGrasp, GraspableObject3D, GraspQualityConfig
import meshpy.obj_file as obj_file
import meshpy.sdf_file as sdf_file
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
try:
import mayavi.mlab as mv
except:
pass
# logging.warning('Failed to import mayavi')
import IPython
# turn off output logging
cvx.solvers.options['show_progress'] = False
class PointGraspMetrics3D:
""" Class to wrap functions for quasistatic point grasp quality metrics.
"""
@staticmethod
def grasp_quality(grasp, obj, params, vis=False):
"""
Computes the quality of a two-finger point grasps on a given object using a quasi-static model.
Parameters
----------
grasp : :obj:`ParallelJawPtGrasp3D`
grasp to evaluate
obj : :obj:`GraspableObject3D`
object to evaluate quality on
params : :obj:`GraspQualityConfig`
parameters of grasp quality function
"""
start = time.time()
if not isinstance(grasp, PointGrasp):
raise ValueError('Must provide a point grasp object')
if not isinstance(obj, GraspableObject3D):
raise ValueError('Must provide a 3D graspable object')
if not isinstance(params, GraspQualityConfig):
raise ValueError('Must provide GraspQualityConfig')
# read in params
method = params.quality_method
friction_coef = params.friction_coef
num_cone_faces = params.num_cone_faces
soft_fingers = params.soft_fingers
check_approach = params.check_approach
if not hasattr(PointGraspMetrics3D, method):
raise ValueError('Illegal point grasp metric %s specified' % (method))
# get point grasp contacts
contacts_start = time.time()
contacts_found, contacts = grasp.close_fingers(obj, check_approach=check_approach, vis=vis)
if not contacts_found:
logging.debug('Contacts not found')
print('Contacts not found')
return 0
if method == 'force_closure':
# Use fast force closure test (Nguyen 1988) if possible.
if len(contacts) == 2:
c1, c2 = contacts
return PointGraspMetrics3D.force_closure(c1, c2, friction_coef)
# Default to QP force closure test.
method = 'force_closure_qp'
# add the forces, torques, etc at each contact point
forces_start = time.time()
num_contacts = len(contacts)
forces = np.zeros([3, 0])
torques = np.zeros([3, 0])
normals = np.zeros([3, 0])
for i in range(num_contacts):
contact = contacts[i]
if vis:
if i == 0:
contact.plot_friction_cone(color='y')
else:
contact.plot_friction_cone(color='c')
# get contact forces
force_success, contact_forces, contact_outward_normal = contact.friction_cone(num_cone_faces, friction_coef)
if not force_success:
print('Force computation failed')
logging.debug('Force computation failed')
if params.all_contacts_required:
return 0
# get contact torques
torque_success, contact_torques = contact.torques(contact_forces)
if not torque_success:
print('Torque computation failed')
logging.debug('Torque computation failed')
if params.all_contacts_required:
return 0
# get the magnitude of the normal force that the contacts could apply
n = contact.normal_force_magnitude()
forces = np.c_[forces, n * contact_forces]
torques = np.c_[torques, n * contact_torques]
normals = np.c_[normals, n * -contact_outward_normal] # store inward pointing normals
if normals.shape[1] == 0:
logging.debug('No normals')
print('No normals')
return 0
# normalize torques
if 'torque_scaling' not in list(params.keys()):
torque_scaling = 1.0
if method == 'ferrari_canny_L1':
mn, mx = obj.mesh.bounding_box()
torque_scaling = 1.0 / np.median(mx)
print("torque scaling", torque_scaling)
params.torque_scaling = torque_scaling
if vis:
ax = plt.gca()
ax.set_xlim3d(0, obj.sdf.dims_[0])
ax.set_ylim3d(0, obj.sdf.dims_[1])
ax.set_zlim3d(0, obj.sdf.dims_[2])
plt.show()
# evaluate the desired quality metric
quality_start = time.time()
Q_func = getattr(PointGraspMetrics3D, method)
quality = Q_func(forces, torques, normals,
soft_fingers=soft_fingers,
params=params)
end = time.time()
logging.debug('Contacts took %.3f sec' % (forces_start - contacts_start))
logging.debug('Forces took %.3f sec' % (quality_start - forces_start))
logging.debug('Quality eval took %.3f sec' % (end - quality_start))
logging.debug('Everything took %.3f sec' % (end - start))
return quality
@staticmethod
def grasp_matrix(forces, torques, normals, soft_fingers=False,
finger_radius=0.005, params=None):
""" Computes the grasp map between contact forces and wrenchs on the object in its reference frame.
Parameters
----------
forces : 3xN :obj:`numpy.ndarray`
set of forces on object in object basis
torques : 3xN :obj:`numpy.ndarray`
set of torques on object in object basis
normals : 3xN :obj:`numpy.ndarray`
surface normals at the contact points
soft_fingers : bool
whether or not to use the soft finger contact model
finger_radius : float
the radius of the fingers to use
params : :obj:`GraspQualityConfig`
set of parameters for grasp matrix and contact model
Returns
-------
G : 6xM :obj:`numpy.ndarray`
grasp map
"""
if params is not None and 'finger_radius' in list(params.keys()):
finger_radius = params.finger_radius
num_forces = forces.shape[1]
num_torques = torques.shape[1]
if num_forces != num_torques:
raise ValueError('Need same number of forces and torques')
num_cols = num_forces
if soft_fingers:
num_normals = 2
if normals.ndim > 1:
num_normals = 2 * normals.shape[1]
num_cols = num_cols + num_normals
G = np.zeros([6, num_cols])
for i in range(num_forces):
G[:3, i] = forces[:, i]
# print("liang", params.torque_scaling)
G[3:, i] = params.torque_scaling * torques[:, i]
if soft_fingers:
torsion = np.pi * finger_radius ** 2 * params.friction_coef * normals * params.torque_scaling
pos_normal_i = int(-num_normals)
neg_normal_i = int(-num_normals + num_normals / 2)
G[3:, pos_normal_i:neg_normal_i] = torsion
G[3:, neg_normal_i:] = -torsion
return G
@staticmethod
def force_closure(c1, c2, friction_coef, use_abs_value=True):
"""" Checks force closure using the antipodality trick.
Parameters
----------
c1 : :obj:`Contact3D`
first contact point
c2 : :obj:`Contact3D`
second contact point
friction_coef : float
coefficient of friction at the contact point
use_abs_value : bool
whether or not to use directoinality of the surface normal (useful when mesh is not oriented)
Returns
-------
int : 1 if in force closure, 0 otherwise
"""
if c1.point is None or c2.point is None or c1.normal is None or c2.normal is None:
return 0
p1, p2 = c1.point, c2.point
n1, n2 = -c1.normal, -c2.normal # inward facing normals
if (p1 == p2).all(): # same point
return 0
for normal, contact, other_contact in [(n1, p1, p2), (n2, p2, p1)]:
diff = other_contact - contact
normal_proj = normal.dot(diff) / np.linalg.norm(normal)
if use_abs_value:
normal_proj = abs(normal.dot(diff)) / np.linalg.norm(normal)
if normal_proj < 0:
return 0 # wrong side
alpha = np.arccos(normal_proj / np.linalg.norm(diff))
if alpha > np.arctan(friction_coef):
return 0 # outside of friction cone
return 1
@staticmethod
def force_closure_qp(forces, torques, normals, soft_fingers=False,
wrench_norm_thresh=1e-3, wrench_regularizer=1e-10,
params=None):
""" Checks force closure by solving a quadratic program (whether or not zero is in the convex hull)
Parameters
----------
forces : 3xN :obj:`numpy.ndarray`
set of forces on object in object basis
torques : 3xN :obj:`numpy.ndarray`
set of torques on object in object basis
normals : 3xN :obj:`numpy.ndarray`
surface normals at the contact points
soft_fingers : bool
whether or not to use the soft finger contact model
wrench_norm_thresh : float
threshold to use to determine equivalence of target wrenches
wrench_regularizer : float
small float to make quadratic program positive semidefinite
params : :obj:`GraspQualityConfig`
set of parameters for grasp matrix and contact model
Returns
-------
int : 1 if in force closure, 0 otherwise
"""
if params is not None:
if 'wrench_norm_thresh' in list(params.keys()):
wrench_norm_thresh = params.wrench_norm_thresh
if 'wrench_regularizer' in list(params.keys()):
wrench_regularizer = params.wrench_regularizer
G = PointGraspMetrics3D.grasp_matrix(forces, torques, normals, soft_fingers, params=params)
min_norm, _ = PointGraspMetrics3D.min_norm_vector_in_facet(G, wrench_regularizer=wrench_regularizer)
return 1 * (min_norm < wrench_norm_thresh) # if greater than wrench_norm_thresh, 0 is outside of hull
@staticmethod
def partial_closure(forces, torques, normals, soft_fingers=False,
wrench_norm_thresh=1e-3, wrench_regularizer=1e-10,
params=None):
""" Evalutes partial closure: whether or not the forces and torques can resist a specific wrench.
Estimates resistance by sollving a quadratic program (whether or not the target wrench is in the convex hull).
Parameters
----------
forces : 3xN :obj:`numpy.ndarray`
set of forces on object in object basis
torques : 3xN :obj:`numpy.ndarray`
set of torques on object in object basis
normals : 3xN :obj:`numpy.ndarray`
surface normals at the contact points
soft_fingers : bool
whether or not to use the soft finger contact model
wrench_norm_thresh : float
threshold to use to determine equivalence of target wrenches
wrench_regularizer : float
small float to make quadratic program positive semidefinite
params : :obj:`GraspQualityConfig`
set of parameters for grasp matrix and contact model
Returns
-------
int : 1 if in partial closure, 0 otherwise
"""
force_limit = None
if params is None:
return 0
force_limit = params.force_limits
target_wrench = params.target_wrench
if 'wrench_norm_thresh' in list(params.keys()):
wrench_norm_thresh = params.wrench_norm_thresh
if 'wrench_regularizer' in list(params.keys()):
wrench_regularizer = params.wrench_regularizer
# reorganize the grasp matrix for easier constraint enforcement in optimization
num_fingers = normals.shape[1]
num_wrenches_per_finger = forces.shape[1] / num_fingers
G = np.zeros([6, 0])
for i in range(num_fingers):
start_i = num_wrenches_per_finger * i
end_i = num_wrenches_per_finger * (i + 1)
G_i = PointGraspMetrics3D.grasp_matrix(forces[:, start_i:end_i], torques[:, start_i:end_i],
normals[:, i:i + 1],
soft_fingers, params=params)
G = np.c_[G, G_i]
wrench_resisted, _ = PointGraspMetrics3D.wrench_in_positive_span(G, target_wrench, force_limit, num_fingers,
wrench_norm_thresh=wrench_norm_thresh,
wrench_regularizer=wrench_regularizer)
return 1 * wrench_resisted
@staticmethod
def wrench_resistance(forces, torques, normals, soft_fingers=False,
wrench_norm_thresh=1e-3, wrench_regularizer=1e-10,
finger_force_eps=1e-9, params=None):
""" Evalutes wrench resistance: the inverse norm of the contact forces required to resist a target wrench
Estimates resistance by sollving a quadratic program (min normal contact forces to produce a wrench).
Parameters
----------
forces : 3xN :obj:`numpy.ndarray`
set of forces on object in object basis
torques : 3xN :obj:`numpy.ndarray`
set of torques on object in object basis
normals : 3xN :obj:`numpy.ndarray`
surface normals at the contact points
soft_fingers : bool
whether or not to use the soft finger contact model
wrench_norm_thresh : float
threshold to use to determine equivalence of target wrenches
wrench_regularizer : float
small float to make quadratic program positive semidefinite
finger_force_eps : float
small float to prevent numeric issues in wrench resistance metric
params : :obj:`GraspQualityConfig`
set of parameters for grasp matrix and contact model
Returns
-------
float : value of wrench resistance metric
"""
force_limit = None
if params is None:
return 0
force_limit = params.force_limits
target_wrench = params.target_wrench
if 'wrench_norm_thresh' in list(params.keys()):
wrench_norm_thresh = params.wrench_norm_thresh
if 'wrench_regularizer' in list(params.keys()):
wrench_regularizer = params.wrench_regularizer
if 'finger_force_eps' in list(params.keys()):
finger_force_eps = params.finger_force_eps
# reorganize the grasp matrix for easier constraint enforcement in optimization
num_fingers = normals.shape[1]
num_wrenches_per_finger = forces.shape[1] / num_fingers
G = np.zeros([6, 0])
for i in range(num_fingers):
start_i = num_wrenches_per_finger * i
end_i = num_wrenches_per_finger * (i + 1)
G_i = PointGraspMetrics3D.grasp_matrix(forces[:, start_i:end_i], torques[:, start_i:end_i],
normals[:, i:i + 1],
soft_fingers, params=params)
G = np.c_[G, G_i]
# compute metric from finger force norm
Q = 0
wrench_resisted, finger_force_norm = PointGraspMetrics3D.wrench_in_positive_span(G, target_wrench, force_limit,
num_fingers,
wrench_norm_thresh=wrench_norm_thresh,
wrench_regularizer=wrench_regularizer)
if wrench_resisted:
Q = 1.0 / (finger_force_norm + finger_force_eps) - 1.0 / (2 * force_limit)
return Q
@staticmethod
def min_singular(forces, torques, normals, soft_fingers=False, params=None):
""" Min singular value of grasp matrix - measure of wrench that grasp is "weakest" at resisting.
Parameters
----------
forces : 3xN :obj:`numpy.ndarray`
set of forces on object in object basis
torques : 3xN :obj:`numpy.ndarray`
set of torques on object in object basis
normals : 3xN :obj:`numpy.ndarray`
surface normals at the contact points
soft_fingers : bool
whether or not to use the soft finger contact model
params : :obj:`GraspQualityConfig`
set of parameters for grasp matrix and contact model
Returns
-------
float : value of smallest singular value
"""
G = PointGraspMetrics3D.grasp_matrix(forces, torques, normals, soft_fingers)
_, S, _ = np.linalg.svd(G)
min_sig = S[5]
return min_sig
@staticmethod
def wrench_volume(forces, torques, normals, soft_fingers=False, params=None):
""" Volume of grasp matrix singular values - score of all wrenches that the grasp can resist.
Parameters
----------
forces : 3xN :obj:`numpy.ndarray`
set of forces on object in object basis
torques : 3xN :obj:`numpy.ndarray`
set of torques on object in object basis
normals : 3xN :obj:`numpy.ndarray`
surface normals at the contact points
soft_fingers : bool
whether or not to use the soft finger contact model
params : :obj:`GraspQualityConfig`
set of parameters for grasp matrix and contact model
Returns
-------
float : value of wrench volume
"""
k = 1
if params is not None and 'k' in list(params.keys()):
k = params.k
G = PointGraspMetrics3D.grasp_matrix(forces, torques, normals, soft_fingers)
_, S, _ = np.linalg.svd(G)
sig = S
return k * np.sqrt(np.prod(sig))
@staticmethod
def grasp_isotropy(forces, torques, normals, soft_fingers=False, params=None):
""" Condition number of grasp matrix - ratio of "weakest" wrench that the grasp can exert to the "strongest" one.
Parameters
----------
forces : 3xN :obj:`numpy.ndarray`
set of forces on object in object basis
torques : 3xN :obj:`numpy.ndarray`
set of torques on object in object basis
normals : 3xN :obj:`numpy.ndarray`
surface normals at the contact points
soft_fingers : bool
whether or not to use the soft finger contact model
params : :obj:`GraspQualityConfig`
set of parameters for grasp matrix and contact model
Returns
-------
float : value of grasp isotropy metric
"""
G = PointGraspMetrics3D.grasp_matrix(forces, torques, normals, soft_fingers)
_, S, _ = np.linalg.svd(G)
max_sig = S[0]
min_sig = S[5]
isotropy = min_sig / max_sig
if np.isnan(isotropy) or np.isinf(isotropy):
return 0
return isotropy
@staticmethod
def ferrari_canny_L1(forces, torques, normals, soft_fingers=False, params=None,
wrench_norm_thresh=1e-3,
wrench_regularizer=1e-10):
""" Ferrari & Canny's L1 metric. Also known as the epsilon metric.
Parameters
----------
forces : 3xN :obj:`numpy.ndarray`
set of forces on object in object basis
torques : 3xN :obj:`numpy.ndarray`
set of torques on object in object basis
normals : 3xN :obj:`numpy.ndarray`
surface normals at the contact points
soft_fingers : bool
whether or not to use the soft finger contact model
params : :obj:`GraspQualityConfig`
set of parameters for grasp matrix and contact model
wrench_norm_thresh : float
threshold to use to determine equivalence of target wrenches
wrench_regularizer : float
small float to make quadratic program positive semidefinite
Returns
-------
float : value of metric
"""
if params is not None and 'wrench_norm_thresh' in list(params.keys()):
wrench_norm_thresh = params.wrench_norm_thresh
if params is not None and 'wrench_regularizer' in list(params.keys()):
wrench_regularizer = params.wrench_regularizer
# create grasp matrix
G = PointGraspMetrics3D.grasp_matrix(forces, torques, normals,
soft_fingers, params=params)
s = time.time()
# center grasp matrix for better convex hull comp
hull = cvh.ConvexHull(G.T)
# TODO: suppress ridiculous amount of output for perfectly valid input to qhull
e = time.time()
logging.debug('CVH took %.3f sec' % (e - s))
debug = False
if debug:
fig = plt.figure()
torques = G[3:, :].T
ax = Axes3D(fig)
ax.scatter(torques[:, 0], torques[:, 1], torques[:, 2], c='b', s=50)
ax.scatter(0, 0, 0, c='k', s=80)
ax.set_xlim3d(-1.5, 1.5)
ax.set_ylim3d(-1.5, 1.5)
ax.set_zlim3d(-1.5, 1.5)
ax.set_xlabel('tx')
ax.set_ylabel('ty')
ax.set_zlabel('tz')
plt.show()
if len(hull.vertices) == 0:
logging.warning('Convex hull could not be computed')
return 0.0
# determine whether or not zero is in the convex hull
s = time.time()
min_norm_in_hull, v = PointGraspMetrics3D.min_norm_vector_in_facet(G, wrench_regularizer=wrench_regularizer)
e = time.time()
logging.debug('Min norm took %.3f sec' % (e - s))
# print("shunang",min_norm_in_hull)
# if norm is greater than 0 then forces are outside of hull
if min_norm_in_hull > wrench_norm_thresh:
logging.debug('Zero not in convex hull')
return 0.0
# if there are fewer nonzeros than D-1 (dim of space minus one)
# then zero is on the boundary and therefore we do not have
# force closure
if np.sum(v > 1e-4) <= G.shape[0] - 1:
logging.warning('Zero not in interior of convex hull')
return 0.0
# find minimum norm vector across all facets of convex hull
s = time.time()
min_dist = sys.float_info.max
closest_facet = None
# print("shunang",G)
for v in hull.vertices:
if np.max(np.array(v)) < G.shape[1]: # because of some occasional odd behavior from pyhull
facet = G[:, v]
# print("shunang1",facet)
dist, _ = PointGraspMetrics3D.min_norm_vector_in_facet(facet, wrench_regularizer=wrench_regularizer)
if dist < min_dist:
min_dist = dist
closest_facet = v
e = time.time()
logging.debug('Min dist took %.3f sec for %d vertices' % (e - s, len(hull.vertices)))
return min_dist
@staticmethod
def ferrari_canny_L1_force_only(forces, torques, normals, soft_fingers=False, params=None,
wrench_norm_thresh=1e-3,
wrench_regularizer=1e-10):
""" Ferrari & Canny's L1 metric with force only. Also known as the epsilon metric.
Parameters
----------
forces : 3xN :obj:`numpy.ndarray`
set of forces on object in object basis
torques : 3xN :obj:`numpy.ndarray`
set of torques on object in object basis
normals : 3xN :obj:`numpy.ndarray`
surface normals at the contact points
soft_fingers : bool
whether or not to use the soft finger contact model
params : :obj:`GraspQualityConfig`
set of parameters for grasp matrix and contact model
wrench_norm_thresh : float
threshold to use to determine equivalence of target wrenches
wrench_regularizer : float
small float to make quadratic program positive semidefinite
Returns
-------
float : value of metric
"""
if params is not None and 'wrench_norm_thresh' in list(params.keys()):
wrench_norm_thresh = params.wrench_norm_thresh
if params is not None and 'wrench_regularizer' in list(params.keys()):
wrench_regularizer = params.wrench_regularizer
# create grasp matrix
G = PointGraspMetrics3D.grasp_matrix(forces, torques, normals,
soft_fingers, params=params)
G = G[:3, :]
s = time.time()
# center grasp matrix for better convex hull comp
hull = cvh.ConvexHull(G.T)
# TODO: suppress ridiculous amount of output for perfectly valid input to qhull
e = time.time()
logging.debug('CVH took %.3f sec' % (e - s))
debug = False
if debug:
fig = plt.figure()
torques = G[3:, :].T
ax = Axes3D(fig)
ax.scatter(torques[:, 0], torques[:, 1], torques[:, 2], c='b', s=50)
ax.scatter(0, 0, 0, c='k', s=80)
ax.set_xlim3d(-1.5, 1.5)
ax.set_ylim3d(-1.5, 1.5)
ax.set_zlim3d(-1.5, 1.5)
ax.set_xlabel('tx')
ax.set_ylabel('ty')
ax.set_zlabel('tz')
plt.show()
if len(hull.vertices) == 0:
logging.warning('Convex hull could not be computed')
return 0.0
# determine whether or not zero is in the convex hull
s = time.time()
min_norm_in_hull, v = PointGraspMetrics3D.min_norm_vector_in_facet(G, wrench_regularizer=wrench_regularizer)
e = time.time()
logging.debug('Min norm took %.3f sec' % (e - s))
# print("shunang",min_norm_in_hull)
# if norm is greater than 0 then forces are outside of hull
if min_norm_in_hull > wrench_norm_thresh:
logging.debug('Zero not in convex hull')
return 0.0
# if there are fewer nonzeros than D-1 (dim of space minus one)
# then zero is on the boundary and therefore we do not have
# force closure
if np.sum(v > 1e-4) <= G.shape[0] - 1:
logging.warning('Zero not in interior of convex hull')
return 0.0
# find minimum norm vector across all facets of convex hull
s = time.time()
min_dist = sys.float_info.max
closest_facet = None
# print("shunang",G)
for v in hull.vertices:
if np.max(np.array(v)) < G.shape[1]: # because of some occasional odd behavior from pyhull
facet = G[:, v]
# print("shunang1",facet)
dist, _ = PointGraspMetrics3D.min_norm_vector_in_facet(facet, wrench_regularizer=wrench_regularizer)
if dist < min_dist:
min_dist = dist
closest_facet = v
e = time.time()
logging.debug('Min dist took %.3f sec for %d vertices' % (e - s, len(hull.vertices)))
return min_dist
@staticmethod
def wrench_in_positive_span(wrench_basis, target_wrench, force_limit, num_fingers=1,
wrench_norm_thresh=1e-4, wrench_regularizer=1e-10):
""" Check whether a target can be exerted by positive combinations of wrenches in a given basis with L1 norm fonger force limit limit.
Parameters
----------
wrench_basis : 6xN :obj:`numpy.ndarray`
basis for the wrench space
target_wrench : 6x1 :obj:`numpy.ndarray`
target wrench to resist
force_limit : float
L1 upper bound on the forces per finger (aka contact point)
num_fingers : int
number of contacts, used to enforce L1 finger constraint
wrench_norm_thresh : float
threshold to use to determine equivalence of target wrenches
wrench_regularizer : float
small float to make quadratic program positive semidefinite
Returns
-------
int
whether or not wrench can be resisted
float
minimum norm of the finger forces required to resist the wrench
"""
num_wrenches = wrench_basis.shape[1]
# quadratic and linear costs
P = wrench_basis.T.dot(wrench_basis) + wrench_regularizer * np.eye(num_wrenches)
q = -wrench_basis.T.dot(target_wrench)
# inequalities
lam_geq_zero = -1 * np.eye(num_wrenches)
num_wrenches_per_finger = num_wrenches / num_fingers
force_constraint = np.zeros([num_fingers, num_wrenches])
for i in range(num_fingers):
start_i = num_wrenches_per_finger * i
end_i = num_wrenches_per_finger * (i + 1)
force_constraint[i, start_i:end_i] = np.ones(num_wrenches_per_finger)
G = np.r_[lam_geq_zero, force_constraint]
h = np.zeros(num_wrenches + num_fingers)
for i in range(num_fingers):
h[num_wrenches + i] = force_limit
# convert to cvx and solve
P = cvx.matrix(P)
q = cvx.matrix(q)
G = cvx.matrix(G)
h = cvx.matrix(h)
sol = cvx.solvers.qp(P, q, G, h)
v = np.array(sol['x'])
min_dist = np.linalg.norm(wrench_basis.dot(v).ravel() - target_wrench) ** 2
# add back in the target wrench
return min_dist < wrench_norm_thresh, np.linalg.norm(v)
@staticmethod
def min_norm_vector_in_facet(facet, wrench_regularizer=1e-10):
""" Finds the minimum norm point in the convex hull of a given facet (aka simplex) by solving a QP.
Parameters
----------
facet : 6xN :obj:`numpy.ndarray`
vectors forming the facet
wrench_regularizer : float
small float to make quadratic program positive semidefinite
Returns
-------
float
minimum norm of any point in the convex hull of the facet
Nx1 :obj:`numpy.ndarray`
vector of coefficients that achieves the minimum
"""
dim = facet.shape[1] # num vertices in facet
# create alpha weights for vertices of facet
G = facet.T.dot(facet)
grasp_matrix = G + wrench_regularizer * np.eye(G.shape[0])
# Solve QP to minimize .5 x'Px + q'x subject to Gx <= h, Ax = b
P = cvx.matrix(2 * grasp_matrix) # quadratic cost for Euclidean dist
q = cvx.matrix(np.zeros((dim, 1)))
G = cvx.matrix(-np.eye(dim)) # greater than zero constraint
h = cvx.matrix(np.zeros((dim, 1)))
A = cvx.matrix(np.ones((1, dim))) # sum constraint to enforce convex
b = cvx.matrix(np.ones(1)) # combinations of vertices
sol = cvx.solvers.qp(P, q, G, h, A, b)
v = np.array(sol['x'])
min_norm = np.sqrt(sol['primal objective'])
return abs(min_norm), v
|
app/views/api.py
|
modoupi/git-webhook
| 1,617 |
97047
|
<gh_stars>1000+
# -*- coding: utf-8 -*-
'''
Created on 2016-10-20
@author: hustcc
'''
from app import app
from app.utils import RequestUtil, HookDataParse, JsonUtil
from app.tasks import tasks
from flask.globals import request
import json
from app.database.model import WebHook, History
@app.route('/api/git-webhook/<key>', methods=['POST', 'GET'])
def api_for_webhook(key):
'''git hook data
'''
# try:
data = RequestUtil.get_parameter('hook', None)
if data is None:
data = request.data
# for test
# data = WebhookData.github
# data = WebhookData.gitlab
# data = WebhookData.gitosc
try:
data = json.loads(data)
webhook = WebHook.query.filter_by(key=key).first()
if webhook:
repo = webhook.repo
branch = webhook.branch
# then repo and branch is match the config. then do the shell
if (HookDataParse.get_repo_name(data) == repo and
HookDataParse.get_repo_branch(data) == branch):
# start to process, add history into database
# waiting to done
history = History(webhook_id=webhook.id,
data=JsonUtil.object_2_json(data))
history.updateStatus('1')
# status is waiting
webhook.updateStatus('1')
# do the async task
tasks.do_webhook_shell.delay(webhook.id, history.id, data)
return "Work put into Queue."
return "Not match the Repo and Branch."
else:
return "The webhook is not exist."
except Exception as e:
return "Request is not valid Git webhook: " + str(e)
|
bin/lib/config_safe_loader.py
|
jfalcou/infra
| 135 |
97049
|
<gh_stars>100-1000
import yaml
# With thanks to:
# https://stackoverflow.com/questions/34667108/ignore-dates-and-times-while-parsing-yaml
class ConfigSafeLoader(yaml.SafeLoader):
@classmethod
def remove_implicit_resolver(cls, tag_to_remove):
"""
Remove implicit resolvers for a particular tag
Takes care not to modify resolvers in super classes.
"""
if 'yaml_implicit_resolvers' not in cls.__dict__:
cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
for first_letter, mappings in cls.yaml_implicit_resolvers.items():
cls.yaml_implicit_resolvers[first_letter] = [(tag, regexp)
for tag, regexp in mappings
if tag != tag_to_remove]
ConfigSafeLoader.remove_implicit_resolver('tag:yaml.org,2002:timestamp')
|
mindinsight/debugger/stream_handler/metadata_handler.py
|
mindspore-ai/mindinsight
| 216 |
97050
|
<filename>mindinsight/debugger/stream_handler/metadata_handler.py
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Define the metadata stream handler."""
from mindinsight.debugger.common.log import LOGGER as log
from mindinsight.debugger.common.utils import ServerStatus, DebuggerServerMode
from mindinsight.debugger.stream_handler.base_handler import StreamHandlerBase
class MetadataHandler(StreamHandlerBase):
"""Metadata Handler."""
def __init__(self):
self._state = ServerStatus.PENDING
self._device_name = ""
self.step = 0
self._client_ip = ""
self._cur_node_name = ""
self._cur_full_name = ""
self.backend = ""
self._enable_recheck = False
self._cur_graph_name = ""
# If recommendation_confirmed is true, it only means the user has answered yes or no to the question,
# it does not necessarily mean that the user will use the recommended watch points.
self._recommendation_confirmed = False
self._debugger_version = {}
# maximum step number among all devices
self._max_step_num = 0
self._debugger_type = DebuggerServerMode.ONLINE.value
@property
def debugger_type(self):
"""The property of debugger_type."""
return self._debugger_type
@debugger_type.setter
def debugger_type(self, debugger_type):
"""The property of debugger_type."""
self._debugger_type = debugger_type
@property
def device_name(self):
"""The property of device name."""
return self._device_name
@property
def node_name(self):
"""The property of current node name."""
return self._cur_node_name
@node_name.setter
def node_name(self, node_name):
"""The property of current node name."""
self._cur_node_name = node_name
@property
def graph_name(self):
"""The property of current node name."""
return self._cur_graph_name
@graph_name.setter
def graph_name(self, graph_name):
"""The property of current node name."""
self._cur_graph_name = graph_name if graph_name else ''
@property
def full_name(self):
"""The property of current node name."""
return self._cur_full_name
@property
def state(self):
"""The property of state."""
return self._state.value
@state.setter
def state(self, value):
"""
Set the property of state.
Args:
value (str): The new state.
"""
self._state = ServerStatus(value)
@property
def client_ip(self):
"""The property of client ip."""
return self._client_ip
@client_ip.setter
def client_ip(self, value):
"""
Set the property of client ip.
Args:
value (str): The new ip.
"""
self._client_ip = str(value)
@property
def enable_recheck(self):
"""The property of enable_recheck."""
return self._enable_recheck and self._state == ServerStatus.WAITING
@enable_recheck.setter
def enable_recheck(self, value):
"""
Set the property of enable_recheck.
Args:
value (bool): The new ip.
"""
self._enable_recheck = bool(value)
@property
def recommendation_confirmed(self):
"""The property of recommendation_confirmed."""
return self._recommendation_confirmed
@recommendation_confirmed.setter
def recommendation_confirmed(self, value):
"""
Set the property of recommendation_confirmed.
Args:
value (str): The new ip.
"""
self._recommendation_confirmed = value
@property
def debugger_version(self):
"""The property of debugger_version."""
return self._debugger_version
@debugger_version.setter
def debugger_version(self, value):
"""
Set the property of debugger_version.
Args:
value (dict): The semantic versioning of mindinsight and mindspore,
format is {'ms': 'x.x.x', 'mi': 'x.x.x'}.
"""
self._debugger_version = value
@property
def max_step_num(self):
"""The property of max_step_num."""
return self._max_step_num
@max_step_num.setter
def max_step_num(self, max_step_num):
"""Set the property of max_step_num."""
self._max_step_num = max_step_num
def put(self, value):
"""
Put value into metadata cache. Called by grpc server.
Args:
value (MetadataProto): The Metadata proto message.
"""
self._device_name = value.device_name.split(':')[0]
self.step = value.cur_step
self._cur_full_name = value.cur_node
self.backend = value.backend if value.backend else "Ascend"
log.debug("Put metadata into cache at the %d-th step.", self.step)
def get(self, filter_condition=None):
"""
Get updated value. Called by main server.
Args:
filter_condition (Union[str, list[str]]): The filter property.
Returns:
dict, the metadata.
"""
metadata = {}
if filter_condition is None:
metadata = {
'state': self.state,
'step': self.step,
'device_name': self.device_name,
'pos': '0',
'ip': self.client_ip,
'node_name': self.node_name,
'backend': self.backend,
'enable_recheck': self.enable_recheck,
'graph_name': self.graph_name,
'recommendation_confirmed': self._recommendation_confirmed,
'debugger_version': self.debugger_version
}
if self.debugger_type == 'offline':
metadata['total_step_num'] = self.max_step_num
else:
if not isinstance(filter_condition, list):
filter_condition = [filter_condition]
for field in filter_condition:
metadata[field] = getattr(self, field) if \
hasattr(self, field) else None
return {'metadata': metadata}
|
hkust-gmission/gmission/controllers/geo_controller.py
|
gmission/gmission
| 251 |
97051
|
import math
__author__ = 'chenzhao'
from gmission.models import *
# 1km is about 0.01, 1m is 0.00001
def location_nearby_user_count(location_id, r=0.01):
location = Location.query.get(location_id)
P = UserLastPosition
in_rect = (P.longitude >= location.coordinate.longitude - r) & (P.longitude <= location.coordinate.longitude + r) \
& (P.latitude >= location.coordinate.latitude - r) & (P.latitude <= location.coordinate.latitude + r)
c = P.query.filter(in_rect).count()
return c
def get_nearest_n_users(longitude, latitude, n, r=0.00001):
P = UserLastPosition
in_rect = (P.longitude >= longitude - r) & (P.longitude <= longitude + r) \
& (P.latitude >= latitude - r) & (P.latitude <= latitude + r)
c = P.query.filter(in_rect).count()
print 'KNN', n, r, c
if c < n and r < 0.1:
return get_nearest_n_users(longitude, latitude, n, r * 2)
ps = sorted(P.query.filter(in_rect).all(), key=lambda p: geo_distance(p.longitude, p.latitude, longitude, latitude))
return [p.user for p in ps[:n]]
def get_nearby_users(longitude, latitude, r=0.05):
P = UserLastPosition
in_rect = (P.longitude >= longitude - r) & (P.longitude <= longitude + r) \
& (P.latitude >= latitude - r) & (P.latitude <= latitude + r)
c = P.query.filter(in_rect).count()
print ('user in %f bound: %d') % (r, c)
# ps = sorted(P.query.filter(in_rect).all(), key=lambda p: geo_distance(p.longitude, p.latitude, longitude, latitude))
return [p.user for p in P.query.filter(in_rect).all()]
def geo_angle(startPointLong, startPointLati, endPointLong, endPointLati):
angle = math.atan2(endPointLati - startPointLati, endPointLong - startPointLong)
return angle
def geo_distance(long1, lati1, long2, lati2):
return math.sqrt((long1 - long2) ** 2 + (lati1 - lati2) ** 2)
pass
def filter_location(data):
if data.get('location_id', None):
# print 'location_id provided, pop location'
data.pop('location', None)
return
# if 'location' in data:
# # print 'location provided'
# uc_keys = ['name', 'longitude','latitude']
# existing_location = Location.query.filter_by(**dict(zip(uc_keys, map(data['location'].get, uc_keys)))).first()
# # print 'existing location', existing_location
# if existing_location:
# data.pop('location', None)
# data['location_id'] = existing_location.id
if __name__ == '__main__':
pass
|
ch06/06_15.py
|
TeikyungKim/book-cryptocurrency
| 121 |
97054
|
import time
import datetime
now = datetime.datetime.now()
mid = datetime.datetime(now.year, now.month, now.day) + datetime.timedelta(1)
while True:
now = datetime.datetime.now()
if mid < now < mid + datetime.timedelta(seconds=10) :
print("정각입니다")
mid = datetime.datetime(now.year, now.month, now.day) + datetime.timedelta(1)
time.sleep(1)
|
tests/schedules/test_filters.py
|
concreted/prefect
| 8,633 |
97086
|
import pendulum
import pytest
import prefect.schedules.filters as filters
def test_on_datetime_0():
filter_fn = filters.on_datetime(pendulum.datetime(2019, 1, 2, 3, 4, 5))
assert filter_fn(pendulum.datetime(2019, 1, 2, 3, 4, 5))
def test_on_datetime_1():
filter_fn = filters.on_datetime(pendulum.datetime(2019, 1, 2))
assert filter_fn(pendulum.datetime(2019, 1, 2))
def test_on_datetime_2():
filter_fn = filters.on_datetime(pendulum.datetime(2019, 1, 2, 3, 4))
assert not filter_fn(pendulum.datetime(2019, 1, 2, 3, 4, 5))
def test_on_datetime_3():
filter_fn = filters.on_datetime(pendulum.datetime(2019, 1, 2, 3, 4, 5))
assert not filter_fn(pendulum.datetime(2019, 1, 2, 3, 4))
@pytest.mark.parametrize(
"test_datetimes",
[
(pendulum.datetime(2019, 1, 1), pendulum.datetime(2019, 1, 2), True),
(pendulum.datetime(2019, 1, 1), pendulum.datetime(2019, 1, 1), False),
(pendulum.datetime(2019, 1, 2), pendulum.datetime(2019, 1, 2), False),
(pendulum.datetime(2019, 1, 1, 6), pendulum.datetime(2019, 1, 1, 6), True),
(
pendulum.datetime(2019, 1, 1, 5, 59),
pendulum.datetime(2019, 1, 1, 6, 1),
True,
),
],
)
def test_between_datetimes(test_datetimes):
dt = pendulum.datetime(2019, 1, 1, 6)
filter_fn = filters.between_datetimes(test_datetimes[0], test_datetimes[1])
assert filter_fn(dt) is test_datetimes[2]
def test_on_date():
filter_fn = filters.on_date(3, 4)
assert filter_fn(pendulum.datetime(2019, 3, 4))
assert not filter_fn(pendulum.datetime(2019, 3, 5))
assert filter_fn(pendulum.datetime(2019, 3, 4, 5, 6))
assert filter_fn(pendulum.datetime(2034, 3, 4))
assert not filter_fn(pendulum.datetime(2034, 3, 5))
assert not filter_fn(pendulum.datetime(2034, 4, 4))
@pytest.mark.parametrize(
"test_dates",
[
((1, 1, 12, 31), True),
((6, 1, 6, 1), True),
((5, 31, 6, 2), True),
((6, 2, 5, 31), False),
((6, 2, 7, 1), False),
((11, 1, 7, 1), True),
],
)
def test_between_dates(test_dates):
dt = pendulum.datetime(2019, 6, 1)
filter_fn = filters.between_dates(*test_dates[0])
assert filter_fn(dt) is test_dates[1]
@pytest.mark.parametrize(
"test_times",
[
(pendulum.datetime(2019, 1, 2, 4, 30), False),
(pendulum.datetime(2019, 1, 2, 3, 30), True),
(pendulum.datetime(2020, 1, 2, 3, 30), True),
(pendulum.datetime(2019, 4, 5, 3, 30), True),
(pendulum.datetime(2019, 4, 5, 3, 30, 1), False),
],
)
def test_at_time(test_times):
test_dt, result = test_times
filter_fn = filters.at_time(pendulum.time(3, 30))
assert filter_fn(test_dt) is result
@pytest.mark.parametrize(
"test_times",
[
(pendulum.time(5), pendulum.time(7), True),
(pendulum.time(6), pendulum.time(6), True),
(pendulum.time(7), pendulum.time(5), False),
(pendulum.time(7), pendulum.time(6), True),
],
)
def test_between_times(test_times):
dt = pendulum.datetime(2019, 6, 1, 6)
filter_fn = filters.between_times(test_times[0], test_times[1])
assert filter_fn(dt) is test_times[2]
@pytest.mark.parametrize("dt", [pendulum.datetime(2019, 1, i) for i in range(1, 10)])
def test_is_weekday(dt):
assert filters.is_weekday(dt) == (dt.weekday() < 5)
@pytest.mark.parametrize("dt", [pendulum.datetime(2019, 1, i) for i in range(1, 10)])
def test_is_weekend(dt):
assert filters.is_weekend(dt) == (dt.weekday() > 4)
@pytest.mark.parametrize(
"dates",
[
(pendulum.datetime(2019, 1, 20), False),
(pendulum.datetime(2019, 1, 31), True),
(pendulum.datetime(2019, 2, 27), False),
(pendulum.datetime(2019, 2, 28), True),
(pendulum.datetime(2020, 2, 28), False),
(pendulum.datetime(2020, 2, 29), True),
],
)
def test_is_month_end(dates):
assert filters.is_month_end(dates[0]) is dates[1]
@pytest.mark.parametrize(
"year",
[
1971, # Before start of UTC
1972, # Start of UTC
1992, # Near past
2020, # Relative present
2525, # Distant future
],
)
@pytest.mark.parametrize("month", list(range(1, 12)))
def test_is_month_start(year: int, month: int):
filter_fn = filters.is_month_start
assert filter_fn(dt=pendulum.datetime(year=year, month=month, day=1))
assert not filter_fn(dt=pendulum.datetime(year=year, month=month, day=2))
assert not filter_fn(dt=pendulum.datetime(year=year, month=month, day=15))
assert not filter_fn(dt=pendulum.datetime(year=year, month=month, day=28))
def test_is_day_of_week():
years = {
1971: {"month": 2, "day": 22}, # Before start of UTC
1972: {"month": 6, "day": 12}, # Start of UTC
1992: {"month": 6, "day": 8}, # Near past
2020: {"month": 9, "day": 14}, # Relative present
2525: {"month": 12, "day": 3}, # Distant future
}
months = range(1, 12)
days_week = range(0, 6)
def test_day_of_week(day_of_week: int):
filter_fn = filters.is_day_of_week(day_of_week=day_of_week)
for year in years:
month = years[year]["month"]
day = (
years[year]["day"] + day_of_week
) # day of the week also acts as an offset for each day, which starts at Sunday (0)
next_day = day + 1
assert filter_fn(dt=pendulum.datetime(year=year, month=month, day=day))
assert not filter_fn(
dt=pendulum.datetime(year=year, month=month, day=next_day)
)
for day in days_week:
test_day_of_week(day)
|
vnpy/app/rpc_service/ui/widget.py
|
dennislwm/pyalgotrader
| 102 |
97125
|
from vnpy.event import EventEngine, Event
from vnpy.trader.engine import MainEngine
from vnpy.trader.ui import QtWidgets, QtCore
from ..engine import APP_NAME, EVENT_RPC_LOG
class RpcManager(QtWidgets.QWidget):
""""""
signal_log = QtCore.pyqtSignal(Event)
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super().__init__()
self.main_engine = main_engine
self.event_engine = event_engine
self.rpc_engine = main_engine.get_engine(APP_NAME)
self.init_ui()
self.register_event()
def init_ui(self):
""""""
self.setWindowTitle("RPC service ")
self.setFixedWidth(900)
self.setFixedHeight(500)
self.start_button = QtWidgets.QPushButton(" start up ")
self.start_button.clicked.connect(self.start_server)
self.stop_button = QtWidgets.QPushButton(" stop ")
self.stop_button.clicked.connect(self.stop_server)
self.stop_button.setEnabled(False)
for button in [self.start_button, self.stop_button]:
hint = button.sizeHint()
button.setFixedHeight(hint.height() * 2)
button.setFixedWidth(hint.width() * 4)
self.rep_line = QtWidgets.QLineEdit(self.rpc_engine.rep_address)
self.rep_line.setFixedWidth(300)
self.pub_line = QtWidgets.QLineEdit(self.rpc_engine.pub_address)
self.pub_line.setFixedWidth(300)
self.log_monitor = QtWidgets.QTextEdit()
self.log_monitor.setReadOnly(True)
form = QtWidgets.QFormLayout()
form.addRow(" address request response ", self.rep_line)
form.addRow(" event broadcast address ", self.pub_line)
hbox = QtWidgets.QHBoxLayout()
hbox.addLayout(form)
hbox.addWidget(self.start_button)
hbox.addWidget(self.stop_button)
hbox.addStretch()
vbox = QtWidgets.QVBoxLayout()
vbox.addLayout(hbox)
vbox.addWidget(self.log_monitor)
self.setLayout(vbox)
def register_event(self):
""""""
self.signal_log.connect(self.process_log_event)
self.event_engine.register(EVENT_RPC_LOG, self.signal_log.emit)
def process_log_event(self, event: Event):
""""""
log = event.data
msg = f"{log.time}\t{log.msg}"
self.log_monitor.append(msg)
def start_server(self):
""""""
rep_address = self.rep_line.text()
pub_address = self.pub_line.text()
result = self.rpc_engine.start(rep_address, pub_address)
if result:
self.start_button.setEnabled(False)
self.stop_button.setEnabled(True)
def stop_server(self):
""""""
result = self.rpc_engine.stop()
if result:
self.start_button.setEnabled(True)
self.stop_button.setEnabled(False)
|
corehq/apps/hqadmin/views/utils.py
|
dimagilg/commcare-hq
| 471 |
97126
|
<reponame>dimagilg/commcare-hq<filename>corehq/apps/hqadmin/views/utils.py
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext_lazy
from corehq.apps.domain.decorators import require_superuser
from corehq.apps.hqwebapp.views import BaseSectionPageView
from corehq.util import reverse
@require_superuser
def default(request):
from ..reports import UserListReport
return HttpResponseRedirect(UserListReport.get_url())
def get_hqadmin_base_context(request):
return {
"domain": None,
}
class BaseAdminSectionView(BaseSectionPageView):
section_name = ugettext_lazy("Admin")
@property
def section_url(self):
return reverse('default_admin_report')
@property
def page_url(self):
return reverse(self.urlname)
|
data/labeled_faces_wild/fetch_data.py
|
DEVESHTARASIA/big-data-tutorial
| 107 |
97136
|
<filename>data/labeled_faces_wild/fetch_data.py
"""Simple script to fetch a numpy version of the LFW data
Original dataset and credits available at:
http://vis-www.cs.umass.edu/lfw/
"""
import os
import urllib2
URL = "https://downloads.sourceforge.net/project/scikit-learn/data/lfw_preprocessed.tar.gz"
ARCHIVE_NAME = "lfw_preprocessed.tar.gz"
FOLDER_NAME = "lfw_preprocessed"
if not os.path.exists(FOLDER_NAME):
if not os.path.exists(ARCHIVE_NAME):
print "Downloading data, please Wait (58.8MB)..."
print URL
opener = urllib2.urlopen(URL)
open(ARCHIVE_NAME, 'wb').write(opener.read())
print
import tarfile
print "Decompressiong the archive: " + ARCHIVE_NAME
tarfile.open(ARCHIVE_NAME, "r:gz").extractall()
os.remove(ARCHIVE_NAME)
|
mmdeploy/mmcv/ops/roi_align.py
|
xizi/mmdeploy
| 746 |
97139
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List
import torch
from torch import Tensor
from mmdeploy.core import SYMBOLIC_REWRITER
from mmdeploy.utils import Backend, get_backend, get_ir_config
# Here using mmcv.ops.roi_align.__self__ to find
# mmcv.ops.roi_align.RoIAlignFunction, because RoIAlignFunction is not
# visible in mmcv.
@SYMBOLIC_REWRITER.register_symbolic(
'mmcv.ops.roi_align.__self__', backend='default')
def roi_align_default(ctx, g, input: Tensor, rois: Tensor,
output_size: List[int], spatial_scale: float,
sampling_ratio: int, pool_mode: str, aligned: bool):
"""Rewrite symbolic function for default backend.
Replace onnx::RoiAlign with mmcv::MMCVRoiAlign for PPLNN. For ONNXRuntime,
align operation get done outside the inference engine for opset versions
lower than 16. By default, onnx::RoiAlign get replaced to
mmdeploy::MMCVRoiAlign.
Args:
ctx (ContextCaller): The context with additional information.
g (Graph): The traced onnx graph.
input (Tensor): Input tensor, 4-D feature map of shape (N, C, H, W).
rois (Tensor): Bx5 boxes. First column is the index into N. The other
4 columns are xyxy.
output_size(List[int]): Output size of height and width.
spatial_scale (float):
sampling_ratio (int): Number of inputs samples to take for each
output sample. 0 to take samples densely for current models.
pool_mode (str): Pooling mode in each bin, could be 'avg' or 'max'.
aligned (bool): With `aligned=True`, we first appropriately scale
the ROI and then shift it by -0.5 prior to calling roi_align.
This produces the correct neighbors;
Returns:
MMCVRoiAlign op for onnx.
"""
backend = get_backend(ctx.cfg)
if backend == Backend.PPLNN:
domain = 'mmcv'
elif backend == Backend.ONNXRUNTIME:
from torch.onnx.symbolic_opset9 import _cast_Long
from torch.onnx.symbolic_opset11 import add, select, squeeze
batch_indices = _cast_Long(
g,
squeeze(
g,
select(
g, rois, 1,
g.op(
'Constant',
value_t=torch.tensor([0], dtype=torch.long))), 1),
False)
rois = select(
g, rois, 1,
g.op(
'Constant',
value_t=torch.tensor([1, 2, 3, 4], dtype=torch.long)))
ir_cfg = get_ir_config(ctx.cfg)
opset_version = ir_cfg.get('opset_version', 11)
if opset_version < 16:
# preprocess rois to make compatible with opset 16-
# as for opset 16+, `aligned` get implemented inside onnxruntime.
if aligned is True:
rois = add(
g, rois,
g.op(
'Constant',
value_t=torch.tensor([-0.5 / spatial_scale],
dtype=torch.float)))
return g.op(
'RoiAlign',
input,
rois,
batch_indices,
output_height_i=output_size[0],
output_width_i=output_size[1],
spatial_scale_f=spatial_scale,
sampling_ratio_i=sampling_ratio,
mode_s=pool_mode)
else:
return g.op(
'RoiAlign',
input,
rois,
batch_indices,
output_height_i=output_size[0],
output_width_i=output_size[1],
spatial_scale_f=spatial_scale,
sampling_ratio_i=sampling_ratio,
mode_s=pool_mode,
aligned_i=aligned)
else:
domain = 'mmdeploy'
return g.op(
f'{domain}::MMCVRoiAlign',
input,
rois,
output_height_i=output_size[0],
output_width_i=output_size[1],
spatial_scale_f=spatial_scale,
sampling_ratio_i=sampling_ratio,
mode_s=pool_mode,
aligned_i=aligned)
|
insights/parsers/tests/test_neutron_ovs_agent_log.py
|
mglantz/insights-core
| 121 |
97154
|
from insights.parsers.neutron_ovs_agent_log import NeutronOVSAgentLog
from insights.tests import context_wrap
from datetime import datetime
LOG = """
2016-11-09 14:39:25.348 3153 WARNING oslo_config.cfg [-] Option "rabbit_password" from group "oslo_messaging_rabbit" is deprecated for removal. Its value may be silently ignored in the future.
2016-11-09 14:39:25.348 3153 WARNING oslo_config.cfg [-] Option "rabbit_userid" from group "oslo_messaging_rabbit" is deprecated for removal. Its value may be silently ignored in the future.
2016-11-09 14:39:25.352 3153 INFO ryu.base.app_manager [-] loading app neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native.ovs_ryuapp
2016-11-09 14:39:27.171 3153 INFO ryu.base.app_manager [-] loading app ryu.app.ofctl.service
2016-11-09 14:39:27.190 3153 INFO ryu.base.app_manager [-] loading app ryu.controller.ofp_handler
"""
def test_neutron_ovs_agent_log():
log = NeutronOVSAgentLog(context_wrap(LOG))
assert len(log.get("WARNING")) == 2
assert len(list(log.get_after(datetime(2016, 11, 9, 14, 39, 26)))) == 2
|
src/db-up/azext_db_up/vendored_sdks/azure_mgmt_sql/sql/models/sync_full_schema_table_column_py3.py
|
Mannan2812/azure-cli-extensions
| 207 |
97158
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SyncFullSchemaTableColumn(Model):
"""Properties of the column in the table of database full schema.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar data_size: Data size of the column.
:vartype data_size: str
:ivar data_type: Data type of the column.
:vartype data_type: str
:ivar error_id: Error id of the column.
:vartype error_id: str
:ivar has_error: If there is error in the table.
:vartype has_error: bool
:ivar is_primary_key: If it is the primary key of the table.
:vartype is_primary_key: bool
:ivar name: Name of the column.
:vartype name: str
:ivar quoted_name: Quoted name of the column.
:vartype quoted_name: str
"""
_validation = {
'data_size': {'readonly': True},
'data_type': {'readonly': True},
'error_id': {'readonly': True},
'has_error': {'readonly': True},
'is_primary_key': {'readonly': True},
'name': {'readonly': True},
'quoted_name': {'readonly': True},
}
_attribute_map = {
'data_size': {'key': 'dataSize', 'type': 'str'},
'data_type': {'key': 'dataType', 'type': 'str'},
'error_id': {'key': 'errorId', 'type': 'str'},
'has_error': {'key': 'hasError', 'type': 'bool'},
'is_primary_key': {'key': 'isPrimaryKey', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'},
'quoted_name': {'key': 'quotedName', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(SyncFullSchemaTableColumn, self).__init__(**kwargs)
self.data_size = None
self.data_type = None
self.error_id = None
self.has_error = None
self.is_primary_key = None
self.name = None
self.quoted_name = None
|
RecoHI/HiEvtPlaneAlgos/python/RecoHiEvtPlane_EventContent_cff.py
|
ckamtsikis/cmssw
| 852 |
97167
|
<filename>RecoHI/HiEvtPlaneAlgos/python/RecoHiEvtPlane_EventContent_cff.py
import FWCore.ParameterSet.Config as cms
# AOD content
RecoHiEvtPlaneAOD = cms.PSet(
outputCommands = cms.untracked.vstring(
'keep recoEvtPlanes_hiEvtPlane_*_*',
'keep ZDCRecHitsSorted_zdcreco_*_*',
'keep ZDCDataFramesSorted_hcalDigis_*_*',
'keep HFRecHitsSorted_hfreco_*_*')
)
# RECO content
RecoHiEvtPlaneRECO = cms.PSet(
outputCommands = cms.untracked.vstring()
)
RecoHiEvtPlaneRECO.outputCommands.extend(RecoHiEvtPlaneAOD.outputCommands)
# FEVT content
RecoHiEvtPlaneFEVT = cms.PSet(
outputCommands = cms.untracked.vstring()
)
RecoHiEvtPlaneFEVT.outputCommands.extend(RecoHiEvtPlaneRECO.outputCommands)
|
ipynb/fs/finder.py
|
jayvdb/ipynb
| 208 |
97176
|
"""
Contains the finder for use with filesystems.
"""
import sys
import os
from importlib.abc import MetaPathFinder
from importlib.machinery import ModuleSpec
class FSFinder(MetaPathFinder):
"""
Finder for ipynb/py files from the filesystem.
Only tries to load modules that are under ipynb.fs.
Tries to treat .ipynb and .py files exactly the same as much as possible.
The loader_class passed in to the constructor is used to do actual loading
"""
def __init__(self, package_prefix, loader_class):
self.loader_class = loader_class
self.package_prefix = package_prefix
def _get_paths(self, fullname):
"""
Generate ordered list of paths we should look for fullname module in
"""
real_path = os.path.join(*fullname[len(self.package_prefix):].split('.'))
for base_path in sys.path:
if base_path == '':
# Empty string means process's cwd
base_path = os.getcwd()
path = os.path.join(base_path, real_path)
yield path + '.ipynb'
yield path + '.py'
yield os.path.join(path, '__init__.ipynb')
yield os.path.join(path, '__init__.py')
def find_spec(self, fullname, path, target=None):
"""
Claims modules that are under ipynb.fs
"""
if fullname.startswith(self.package_prefix):
for path in self._get_paths(fullname):
if os.path.exists(path):
return ModuleSpec(
name=fullname,
loader=self.loader_class(fullname, path),
origin=path,
is_package=(path.endswith('__init__.ipynb') or path.endswith('__init__.py')),
)
|
cscs-checks/prgenv/cuda/cuda_memtest_check.py
|
CLIP-HPC/reframe
| 167 |
97253
|
<filename>cscs-checks/prgenv/cuda/cuda_memtest_check.py
# Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import reframe as rfm
import reframe.utility.sanity as sn
@rfm.simple_test
class cuda_memtest_check(rfm.RegressionTest):
valid_systems = ['daint:gpu', 'dom:gpu', 'ault:amdv100',
'ault:intelv100']
valid_prog_environs = ['PrgEnv-cray']
descr = 'Flexible CUDA Memtest'
maintainers = ['TM', 'SK']
num_tasks_per_node = 1
num_tasks = 0
num_gpus_per_node = 1
modules = ['cudatoolkit']
src_url = ('https://downloads.sourceforge.net/project/cudagpumemtest/'
'cuda_memtest-1.2.3.tar.gz')
prebuild_cmds = [
'wget %s' % src_url,
'tar -xzf cuda_memtest-1.2.3.tar.gz',
'cd cuda_memtest-1.2.3',
'patch -p1 < ../cuda_memtest-1.2.3.patch'
]
build_system = 'Make'
executable = './cuda_memtest-1.2.3/cuda_memtest'
executable_opts = ['--disable_test', '6', '--num_passes', '1']
tags = {'diagnostic', 'ops', 'craype', 'health'}
@run_before('sanity')
def set_sanity_patterns(self):
valid_test_ids = {i for i in range(11) if i not in {6, 9}}
assert_finished_tests = [
sn.assert_eq(
sn.count(sn.findall('Test%s finished' % test_id, self.stdout)),
self.job.num_tasks
)
for test_id in valid_test_ids
]
self.sanity_patterns = sn.all([
*assert_finished_tests,
sn.assert_not_found('(?i)ERROR', self.stdout),
sn.assert_not_found('(?i)ERROR', self.stderr)])
|
object-detection/yolov2/download_darknet_yolo.py
|
AaratiAkkapeddi/nnabla-examples
| 228 |
97258
|
# Copyright 2018,2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def download(url):
from nnabla.utils.data_source_loader import download as dl
dl(url, url.split('/')[-1], False)
def main():
categories = 'https://raw.githubusercontent.com/pjreddie/darknet/master/data/coco.names'
weights = 'https://pjreddie.com/media/files/yolov2.weights'
example_image = 'https://raw.githubusercontent.com/pjreddie/darknet/master/data/dog.jpg'
print('Downloading MS COCO category names ...')
download(categories)
print('Downloading Darknet YOLO weights ...')
download(weights)
print('Downloading an example image ...')
download(example_image)
if __name__ == '__main__':
main()
|
segmentron/models/espnetv2.py
|
cocolord/SegmenTron
| 654 |
97278
|
"ESPNetv2: A Light-weight, Power Efficient, and General Purpose for Semantic Segmentation"
import torch
import torch.nn as nn
import torch.nn.functional as F
from .segbase import SegBaseModel
from .model_zoo import MODEL_REGISTRY
from ..modules import _ConvBNPReLU, EESP, _BNPReLU, _FCNHead
from ..config import cfg
@MODEL_REGISTRY.register()
class ESPNetV2(SegBaseModel):
r"""ESPNetV2
Reference:
<NAME>, et al. "ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network."
arXiv preprint arXiv:1811.11431 (2018).
"""
def __init__(self, **kwargs):
super(ESPNetV2, self).__init__()
self.proj_L4_C = _ConvBNPReLU(256, 128, 1, **kwargs)
self.pspMod = nn.Sequential(
EESP(256, 128, stride=1, k=4, r_lim=7, **kwargs),
_PSPModule(128, 128, **kwargs))
self.project_l3 = nn.Sequential(
nn.Dropout2d(0.1),
nn.Conv2d(128, self.nclass, 1, bias=False))
self.act_l3 = _BNPReLU(self.nclass, **kwargs)
self.project_l2 = _ConvBNPReLU(64 + self.nclass, self.nclass, 1, **kwargs)
self.project_l1 = nn.Sequential(
nn.Dropout2d(0.1),
nn.Conv2d(32 + self.nclass, self.nclass, 1, bias=False))
self.__setattr__('exclusive', ['proj_L4_C', 'pspMod', 'project_l3', 'act_l3', 'project_l2', 'project_l1'])
def forward(self, x):
size = x.size()[2:]
out_l1, out_l2, out_l3, out_l4 = self.encoder(x, seg=True)
out_l4_proj = self.proj_L4_C(out_l4)
up_l4_to_l3 = F.interpolate(out_l4_proj, scale_factor=2, mode='bilinear', align_corners=True)
merged_l3_upl4 = self.pspMod(torch.cat([out_l3, up_l4_to_l3], 1))
proj_merge_l3_bef_act = self.project_l3(merged_l3_upl4)
proj_merge_l3 = self.act_l3(proj_merge_l3_bef_act)
out_up_l3 = F.interpolate(proj_merge_l3, scale_factor=2, mode='bilinear', align_corners=True)
merge_l2 = self.project_l2(torch.cat([out_l2, out_up_l3], 1))
out_up_l2 = F.interpolate(merge_l2, scale_factor=2, mode='bilinear', align_corners=True)
merge_l1 = self.project_l1(torch.cat([out_l1, out_up_l2], 1))
outputs = list()
merge1_l1 = F.interpolate(merge_l1, scale_factor=2, mode='bilinear', align_corners=True)
outputs.append(merge1_l1)
if self.aux:
# different from paper
auxout = F.interpolate(proj_merge_l3_bef_act, size, mode='bilinear', align_corners=True)
outputs.append(auxout)
return tuple(outputs)
# different from PSPNet
class _PSPModule(nn.Module):
def __init__(self, in_channels, out_channels=1024, sizes=(1, 2, 4, 8), **kwargs):
super(_PSPModule, self).__init__()
self.stages = nn.ModuleList(
[nn.Conv2d(in_channels, in_channels, 3, 1, 1, groups=in_channels, bias=False) for _ in sizes])
self.project = _ConvBNPReLU(in_channels * (len(sizes) + 1), out_channels, 1, 1, **kwargs)
def forward(self, x):
size = x.size()[2:]
feats = [x]
for stage in self.stages:
x = F.avg_pool2d(x, kernel_size=3, stride=2, padding=1)
upsampled = F.interpolate(stage(x), size, mode='bilinear', align_corners=True)
feats.append(upsampled)
return self.project(torch.cat(feats, dim=1))
|
pytradfri/smart_task.py
|
ggravlingen/ikeatradfri
| 726 |
97292
|
"""Smart tasks set timers to turn on/off lights in various ways.
> Currently supporting wake up
SmartTask # return top level info
TaskControl # Change top level values
StartAction # Get top level info on start action
StartActionItem # Get info on specific device in task
StartActionItemController # change values for task
"""
from __future__ import annotations
from datetime import datetime as dt, time, timedelta
from typing import TYPE_CHECKING, Any, List, Optional
from pydantic import BaseModel, Field
from .command import Command
from .const import (
ATTR_DEVICE_STATE,
ATTR_GATEWAY_INFO,
ATTR_ID,
ATTR_LIGHT_DIMMER,
ATTR_REPEAT_DAYS,
ATTR_SMART_TASK_LIGHTS_OFF,
ATTR_SMART_TASK_NOT_AT_HOME,
ATTR_SMART_TASK_TRIGGER_TIME_INTERVAL,
ATTR_SMART_TASK_TRIGGER_TIME_START_HOUR,
ATTR_SMART_TASK_TRIGGER_TIME_START_MIN,
ATTR_SMART_TASK_TYPE,
ATTR_SMART_TASK_WAKE_UP,
ATTR_START_ACTION,
ATTR_TIME_START_TIME_MINUTE,
ATTR_TRANSITION_TIME,
ROOT_GATEWAY,
ROOT_SMART_TASKS,
ROOT_START_ACTION,
)
from .resource import ApiResource, ApiResourceResponse, BaseResponse, TypeRaw
from .util import BitChoices
if TYPE_CHECKING:
from .gateway import Gateway, GatewayInfo
WEEKDAYS: BitChoices = BitChoices(
(
("mon", "Monday"),
("tue", "Tuesday"),
("wed", "Wednesday"),
("thu", "Thursday"),
("fri", "Friday"),
("sat", "Saturday"),
("sun", "Sunday"),
)
)
class SmartTaskMixin(BaseModel):
"""Represent common task attributes."""
state: int = Field(alias=ATTR_DEVICE_STATE)
class StartActionResponse(BaseResponse):
"""Represent a start action response."""
transition_time: Optional[int] = Field(alias=ATTR_TRANSITION_TIME)
dimmer: int = Field(alias=ATTR_LIGHT_DIMMER)
class TimeIntervalResponse(BaseModel):
"""Represent a time interval response."""
hour_start: int = Field(alias=ATTR_SMART_TASK_TRIGGER_TIME_START_HOUR)
minute_start: int = Field(alias=ATTR_TIME_START_TIME_MINUTE)
class RootStartActionResponse(SmartTaskMixin, BaseModel):
"""Represent a smart action response."""
root_start_action: List[StartActionResponse] = Field(alias=ROOT_START_ACTION)
class SmartTaskResponse(SmartTaskMixin, ApiResourceResponse):
"""Represent a smart task response."""
smart_task_type: int = Field(alias=ATTR_SMART_TASK_TYPE)
repeat_days: int = Field(alias=ATTR_REPEAT_DAYS)
start_action: RootStartActionResponse = Field(alias=ATTR_START_ACTION)
time_interval: List[TimeIntervalResponse] = Field(
alias=ATTR_SMART_TASK_TRIGGER_TIME_INTERVAL
)
class SmartTask(ApiResource):
"""Represent a smart task."""
_model_class: type[SmartTaskResponse] = SmartTaskResponse
raw: SmartTaskResponse
def __init__(self, gateway: Gateway, raw: TypeRaw) -> None:
"""Initialize the class."""
super().__init__(raw)
self._gateway = gateway
self.delta_time_gateway_local = timedelta(0)
@property
def path(self) -> list[str]:
"""Return gateway path."""
return [ROOT_SMART_TASKS, str(self.id)]
@property
def state(self) -> bool:
"""Boolean representing the light state of the transition."""
return self.raw.state == 1
@property
def task_type_id(self) -> int:
"""Return type of task."""
return self.raw.smart_task_type
@property
def task_type_name(self) -> str | None:
"""Return the task type in plain text.
(Own interpretation of names.)
"""
if self.is_wake_up:
return "Wake Up"
if self.is_not_at_home:
return "Not At Home"
if self.is_lights_off:
return "Lights Off"
return None
@property
def is_wake_up(self) -> bool:
"""Boolean representing if this is a wake up task."""
return self.raw.smart_task_type == ATTR_SMART_TASK_WAKE_UP
@property
def is_not_at_home(self) -> bool:
"""Boolean representing if this is a not home task."""
return self.raw.smart_task_type == ATTR_SMART_TASK_NOT_AT_HOME
@property
def is_lights_off(self) -> bool:
"""Boolean representing if this is a lights off task."""
return self.raw.smart_task_type == ATTR_SMART_TASK_LIGHTS_OFF
@property
def repeat_days(self) -> int:
"""Return int (bit) for enabled weekdays."""
return self.raw.repeat_days
@property
def repeat_days_list(self) -> list[str]:
"""Binary representation of weekdays the event takes place."""
return WEEKDAYS.get_selected_values(self.repeat_days)
@property
def task_start_parameters(self) -> TimeIntervalResponse:
"""Return hour and minute that task starts."""
return self.raw.time_interval[0]
@property
def task_start_time(self) -> time:
"""Return the time the task starts.
Time is set according to iso8601.
"""
return time(
self.task_start_parameters.hour_start,
self.task_start_parameters.minute_start,
)
@property
def task_control(self) -> TaskControl:
"""Control a task."""
return TaskControl(self, self.state, self.path, self._gateway)
@property
def start_action(self) -> StartAction:
"""Return start action object."""
return StartAction(self, self.path)
def __repr__(self) -> str:
"""Return a readable name for smart task."""
state = "on" if self.state else "off"
return f"<Task {self.id} - {self.task_type_name} - {state}>"
class TaskControl:
"""Class to control the tasks."""
def __init__(
self, task: SmartTask, state: bool, path: list[str], gateway: Gateway
) -> None:
"""Initialize TaskControl."""
self._task = task
self.state = state
self.path = path
self._gateway = gateway
@property
def tasks(self) -> list[StartActionItem]:
"""Return task objects of the task control."""
return [
StartActionItem(self._task, idx, self.state, self.path, self.raw)
for idx in range(len(self.raw.root_start_action))
]
def calibrate_time(self) -> Command[None]:
"""Calibrate difference between local time and gateway time."""
def process_result(result: TypeRaw) -> None:
gateway_info: GatewayInfo = GatewayInfo(result)
if not gateway_info.current_time:
return
d_now = gateway_info.current_time
d_utcnow = dt.utcnow()
diff = d_now - d_utcnow
self._task.delta_time_gateway_local = diff
return Command(
"get", [ROOT_GATEWAY, ATTR_GATEWAY_INFO], process_result=process_result
)
def set_dimmer_start_time(self, hour: int, minute: int) -> Command[None]:
"""Set start time for task (hh:mm) in iso8601.
NB: dimmer starts 30 mins before time in app
"""
new_time: dt = (
dt(100, 1, 1, hour, minute, 00) - self._task.delta_time_gateway_local
)
command: dict[str, list[dict[str, int]]] = {
ATTR_SMART_TASK_TRIGGER_TIME_INTERVAL: [
{
ATTR_SMART_TASK_TRIGGER_TIME_START_HOUR: new_time.hour,
ATTR_SMART_TASK_TRIGGER_TIME_START_MIN: new_time.minute,
}
]
}
return self._task.set_values(command)
@property
def raw(self) -> RootStartActionResponse:
"""Return raw data that it represents."""
return self._task.raw.start_action
class StartAction:
"""Class to control the start action-node."""
def __init__(self, smart_task: SmartTask, path: list[str]) -> None:
"""Initialize StartAction class."""
self.smart_task = smart_task
self.path = path
@property
def state(self) -> bool:
"""Return state of start action task."""
return self.raw.state == 1
@property
def raw(self) -> RootStartActionResponse:
"""Return raw data that it represents."""
return self.smart_task.raw.start_action
class StartActionItem:
"""Class to show settings for a task."""
def __init__(
self,
task: SmartTask,
index: int,
state: bool,
path: list[str],
raw: RootStartActionResponse,
):
"""Initialize TaskInfo."""
self.task = task
self.index = index
self.state = state
self.path = path
self._raw = raw
@property
def devices_list(self) -> list[dict[str, int]]:
"""Store task data for all tasks but the one we want to update."""
output_list: list[dict[str, int]] = []
current_data_list: list[StartActionResponse] = self._raw.root_start_action
for idx, record in enumerate(current_data_list):
if idx != self.index:
list_record: dict[str, int] = {}
list_record[ATTR_ID] = record.id
list_record[ATTR_LIGHT_DIMMER] = record.dimmer
if record.transition_time is not None:
list_record[ATTR_TRANSITION_TIME] = record.transition_time
output_list.append(list_record)
return output_list
@property
def id(self) -> int:
"""Return ID (device id) of task."""
return self.raw.id
@property
def item_controller(self) -> StartActionItemController:
"""Control a task."""
return StartActionItemController(
self, self.raw, self.state, self.path, self.devices_list
)
@property
def transition_time(self) -> int | None:
"""Transition runs for this long from the time in task_start.
Value is in seconds x 10. Default to 0 if transition is missing.
"""
if self.raw.transition_time is not None:
return round(self.raw.transition_time / 60 / 10)
return None
@property
def dimmer(self) -> int:
"""Return dimmer level."""
return self.raw.dimmer
@property
def raw(self) -> StartActionResponse:
"""Return raw data that it represents."""
return self._raw.root_start_action[self.index]
def __repr__(self) -> str:
"""Return a readable name for this class."""
return f"<StartActionItem (Device: {self.id} - Dimmer: {self.dimmer} - Time: {self.transition_time})>"
class StartActionItemController:
"""Class to edit settings for a task."""
def __init__(
self,
item: StartActionItem,
raw: StartActionResponse,
state: bool,
path: list[str],
devices_list: list[dict[str, int]],
):
"""Initialize StartActionItemController."""
self._item = item
self.raw = raw
self.state = state
self.path = path
self.devices_list = devices_list
def set_dimmer(self, dimmer: int) -> Command[None]:
"""Set final dimmer value for task."""
root_start_action_list: list[dict[str, int]] = [
{
ATTR_ID: self.raw.id,
ATTR_LIGHT_DIMMER: dimmer,
}
]
if self.raw.transition_time is not None:
root_start_action_list[0][ATTR_TRANSITION_TIME] = self.raw.transition_time
root_start_action_list.extend(self.devices_list)
command: dict[str, dict[str, Any]] = {
ATTR_START_ACTION: {
ATTR_DEVICE_STATE: int(self.state),
ROOT_START_ACTION: root_start_action_list,
}
}
return self.set_values(command)
def set_transition_time(self, transition_time: int) -> Command[None]:
"""Set time (mins) for light transition."""
root_start_action_list: list[dict[str, int]] = [
{
ATTR_ID: self.raw.id,
ATTR_LIGHT_DIMMER: self.raw.dimmer,
ATTR_TRANSITION_TIME: transition_time * 10 * 60,
}
]
root_start_action_list.extend(self.devices_list)
command: dict[str, dict[str, Any]] = {
ATTR_START_ACTION: {
ATTR_DEVICE_STATE: int(self.state),
ROOT_START_ACTION: root_start_action_list,
}
}
return self.set_values(command)
def set_values(self, values: dict[str, dict[str, Any]]) -> Command[None]:
"""
Set values on task control.
Returns a Command.
"""
return Command("put", self._item.path, values)
|
rbtools/utils/tests/test_aliases.py
|
torcolvin/rbtools
| 113 |
97319
|
<reponame>torcolvin/rbtools
"""Unit tests for rbtools.utils.aliases."""
from __future__ import unicode_literals
from rbtools.utils.aliases import replace_arguments
from rbtools.utils.testbase import RBTestBase
class AliasTests(RBTestBase):
"""Tests for rbtools.utils.aliases."""
def test_replace_arguments_basic(self):
"""Testing replace_arguments with variables and arguments"""
self.assertEqual(replace_arguments('$1', ['HEAD'], posix=True),
['HEAD'])
def test_replace_arguments_multiple(self):
"""Testing replace_arguments with multiple variables and arguments"""
self.assertEqual(replace_arguments('$1..$2', ['a', 'b'], posix=True),
['a..b'])
def test_replace_arguments_blank(self):
"""Testing replace_arguments with variables and a missing argument"""
self.assertEqual(replace_arguments('rbt post $1', [], posix=True),
['rbt', 'post'])
def test_replace_arguments_append(self):
"""Testing replace_arguments with no variables or arguments."""
self.assertEqual(
replace_arguments('echo', ['a', 'b', 'c'], posix=True),
['echo', 'a', 'b', 'c'])
def test_replace_arguments_unrecognized_variables(self):
"""Testing replace_arguments with an unrecognized variable name"""
self.assertEqual(replace_arguments('$1 $test', ['f'], posix=True),
['f', '$test'])
def test_replace_arguments_star(self):
"""Testing replace_arguments with the special $* variable"""
self.assertEqual(replace_arguments('$*', ['a', 'b', 'c'], posix=True),
['a', 'b', 'c'])
def test_replace_arguments_star_whitespace(self):
"""Testing replace_arguments with the special $* variable with
whitespace-containing arguments
"""
self.assertEqual(
replace_arguments('$*', ['a', 'b', 'c d e'], posix=True),
['a', 'b', 'c d e'])
def test_replace_arguments_unescaped_non_posix(self):
"""Testing replace_arguments in non-POSIX mode does not evaluate escape
sequences
"""
self.assertEqual(replace_arguments(r'"$1 \\"', ['a'], posix=False),
[r'"a \\"'])
def test_replace_arguments_invalid_quote(self):
"""Testing replace_arguments with invalid quotes in POSIX and non-POSIX
mode raises an error
"""
self.assertRaises(
ValueError,
lambda: replace_arguments('"foo', [], posix=True))
self.assertRaises(
ValueError,
lambda: replace_arguments('"foo', [], posix=False))
def test_replace_arguments_invalid_quote_posix(self):
"""Testing replace_arguments with escaped ending quote in non-POSIX
mode does not escape the quote
"""
self.assertEqual(replace_arguments('"\\"', [], posix=False),
['"\\"'])
def test_replace_arguments_invalid_quote_non_posix(self):
"""Testing replace_arguments with escaped ending quote in POSIX mode
raises an error
"""
self.assertRaises(
ValueError,
lambda: replace_arguments('"\\"', [], posix=True))
def test_replace_arguments_quoted_non_posix(self):
"""Testing replace_arguments in non-POSIX mode with a quoted sequence
in the command
"""
self.assertEqual(
replace_arguments("find . -iname '*.pyc' -delete", [],
posix=False),
['find', '.', '-iname', "'*.pyc'", '-delete'])
def test_replace_arguments_escaped_posix(self):
"""Testing replace_arguments in POSIX mode evaluates escape sequences
"""
self.assertEqual(
replace_arguments(r'$1 \\ "\\" "\""', ['a'], posix=True),
['a', '\\', '\\', '"'])
|
src/tools/audio.py
|
syfengcuhk/FactorizedHierarchicalVAE
| 155 |
97386
|
import os
import time
import scipy
import numpy as np
import soundfile as sf
def mel_scale(freq):
return 1127.0 * np.log(1.0 + float(freq)/700)
def inv_mel_scale(mel_freq):
return 700 * (np.exp(float(mel_freq)/1127) - 1)
class MelBank(object):
def __init__(self,
low_freq=20,
high_freq=8000,
num_bins=80,
sample_freq=16000,
frame_size=32):
self.low_freq = low_freq
self.high_freq = high_freq
self.num_bins = num_bins
self.sample_freq = sample_freq
self.frame_size = frame_size
# frame_size in millisecond
self.window_size = self.sample_freq * 0.001 * self.frame_size
self.fft_freqs = np.linspace(
0, self.sample_freq / 2, self.window_size / 2 + 1)[:-1]
self.mel_low_freq = mel_scale(self.low_freq)
self.mel_high_freq = mel_scale(self.high_freq)
mel_freqs = np.linspace(
self.mel_low_freq, self.mel_high_freq, self.num_bins+2)
self.mel_windows = [mel_freqs[i:i+3] for i in xrange(self.num_bins)]
def _weight(mel_window, mel_freq):
mel_low, mel_center, mel_high = mel_window
if mel_freq > mel_low and mel_freq < mel_high:
if mel_freq <= mel_center:
return (mel_freq - mel_low) / (mel_center - mel_low)
else:
return (mel_high - mel_freq) / (mel_high - mel_center)
else:
return 0
self.mel_banks = [[_weight(window, mel_scale(freq)) \
for freq in self.fft_freqs] for window in self.mel_windows]
self.center_freqs = [inv_mel_scale(mel_freq) \
for mel_freq in mel_freqs[1:-1]]
def hann(n):
"""
n : length of the window
"""
w=np.zeros(n)
for x in xrange(n):
w[x] = 0.5*(1 - np.cos(2*np.pi*x/n))
return w
def stft_index(wave, frame_size_n, frame_starts_n, fft_size=None, win=None):
"""
wave : 1-d float array
frame_size_n : number of samples in each frame
frame_starts_n : a list of int denoting starting sample index of each frame
fft_size : number of frequency bins
win : windowing function on amplitude; len(win) == frame_size_n
"""
wave = np.asarray(wave)
frame_starts_n = np.int32(frame_starts_n)
if fft_size is None:
fft_size = frame_size_n
if win is None:
win = np.sqrt(hann(frame_size_n))
# sanity check
if not wave.ndim == 1:
raise ValueError('wave is not mono')
elif not frame_starts_n.ndim == 1:
raise ValueError('frame_starts_n is not 1-d')
elif not len(win) == frame_size_n:
raise ValueError('win does not match frame_starts_n (%s != %s)', len(win), frame_size_n)
elif fft_size % 2 == 1:
raise ValueError('odd ffts not yet implemented')
elif np.min(frame_starts_n) < 0 or np.max(frame_starts_n) > wave.shape[0]-frame_size_n:
raise ValueError('Your starting indices contain values outside the allowed range')
spec = np.asarray([scipy.fft(wave[n:n+frame_size_n]*win, n=fft_size)[:fft_size/2+1] \
for n in frame_starts_n])
return spec
def istft_index(spec, frame_size_n, frame_starts_n, fft_size=None, win=None, awin=None):
"""
spec : 1-d complex array
frame_size_n : number of samples in each frame
frame_starts_n : a list of int denoting starting sample index of each frame
fft_size : number of frequency bins
win : windowing function on spectrogram; len(win) == frame_size_n
awin : original windowing function on amplitude; len(win) == frame_size_n
"""
frame_starts_n = np.int32(frame_starts_n)
if fft_size is None:
fft_size = frame_size_n
if win is None:
win=np.sqrt(hann(frame_size_n))
if awin is None:
awin=np.sqrt(hann(frame_size_n))
pro_win = win * awin
# sanity check
if not frame_starts_n.ndim == 1:
raise ValueError('frame_starts_n is not 1-d')
elif not len(win) == frame_size_n:
raise ValueError('win does not match frame_starts_n (%s != %s)', len(win), frame_size_n)
elif not len(awin) == frame_size_n:
raise ValueError('awin does not match frame_starts_n (%s != %s)', len(win), frame_size_n)
elif spec.shape[0] < frame_starts_n.shape[0]:
raise ValueError('Number of frames in the spectrogram cannot be \
less than the size of frame starts')
N = frame_starts_n[-1] + frame_size_n
signal = np.zeros(N)
normalizer = np.zeros(N, dtype=np.float32)
n_range = np.arange(frame_size_n)
for i, n_offset in enumerate(frame_starts_n):
frames = np.real(scipy.ifft(np.concatenate((spec[i], spec[i][-2:0:-1].conjugate())),
n=fft_size))[:frame_size_n]
signal[n_offset+n_range] += frames * win
normalizer[n_offset+n_range] += pro_win
nonzero = np.where(normalizer>0)
rest = np.where(normalizer<=0)
signal[nonzero] = signal[nonzero]/normalizer[nonzero]
signal[rest] = 0
return signal
def comp_spec_image(wave, decom, frame_size_n, shift_size_n, fft_size, awin, log_floor):
"""
RETURN:
float matrix of shape (2, T, F)
"""
frame_starts_n = np.arange(0, wave.shape[0]-frame_size_n, step=shift_size_n)
spec = stft_index(wave, frame_size_n, frame_starts_n, fft_size, awin)
if decom == "mp":
phase = np.angle(spec)
dbmag = np.log10(np.absolute(spec))
# print("max amplitude %s, max magnitude %s, max phase %s" % (
# np.max(wave), np.max(np.absolute(spec)), np.max(phase)))
dbmag[dbmag < log_floor] = log_floor
dbmag = 20 * dbmag
spec_image = np.concatenate([dbmag[None,...], phase[None,...]], axis=0)
elif decom == "ri":
real = np.real(spec)
imag = np.imag(spec)
# print("max amplitude %s, max real %s, max imag %s" % (
# np.max(wave), np.max(np.absolute(real)), np.max(np.absolute(imag))))
spec_image = np.concatenate([real[None,...], imag[None,...]], axis=0)
else:
raise ValueError("decomposition type %s not supported" % decom)
return spec_image
def est_phase_from_mag_spec(
mag_spec, frame_size_n, shift_size_n, fft_size,
awin=None, k=1000, min_avg_diff=1e-9, debug=False):
"""
for quality min_avg_diff 1e-9 is recommended
mag_spec - magnitude spectrogram (in linear) of shape (n_time, n_frequency)
"""
start_time = time.time()
debug_x = []
frame_starts_n = np.arange(len(mag_spec)) * shift_size_n
# initialize with white noise
# wave_len = frame_starts_n[-1] + frame_size_n + 1
# x = np.random.normal(0, 1, size=(wave_len))
X_phase = None
X = mag_spec * np.exp(1j * np.random.uniform(-np.pi, np.pi, mag_spec.shape))
x = istft_index(X, frame_size_n, frame_starts_n, fft_size, awin, awin)
for i in xrange(k):
X_phase = np.angle(stft_index(x, frame_size_n, frame_starts_n, fft_size, awin))
X = mag_spec * np.exp(1j * X_phase)
new_x = istft_index(X, frame_size_n, frame_starts_n, fft_size, awin, awin)
avg_diff = np.mean((x - new_x)**2)
x = new_x
if avg_diff < min_avg_diff:
break
if debug and i % 100 == 0:
print "done %s iterations, avg_diff is %s" % (i, avg_diff)
debug_x.append(x)
if debug:
print "time elapsed = %.2f" % (time.time() - start_time)
return X_phase, debug_x
def convert_to_complex_spec(
X, X_phase, decom, phase_type, add_dc=False, est_phase_opts=None):
"""
X/X_phase - matrix of shape (..., n_channel, n_time, n_frequency)
decom - `mp`: magnitude (in dB) / phase (in rad) decomposition
`ri`: real / imaginary decomposition
phase_type - `true`: X's n_channel = 2
`oracle`: use oracle phase X_phase
`zero`: use zero matrix as the phase matrix for X
`rand`: use random matrix as the phase matrix for X
`est`: estimate the phase from magnitude spectrogram
est_phase_opts - arguments for est_phase_from_mag_spec
complex_X is [..., t, f]
"""
X, X_phase = np.asarray(X), np.asarray(X_phase)
if X.shape[-3] != 1 and X.shape[-3] != 2:
raise ValueError("X's n_channel must be 1 or 2 (%s)" % X.shape[-3])
if np.any(np.iscomplex(X)):
raise ValueError("X should not be complex")
if np.any(np.iscomplex(X_phase)):
raise ValueError("X_phase should not be complex")
if add_dc:
X_dc = np.zeros(X.shape[:-1] + (1,))
X = np.concatenate([X_dc, X], axis=-1)
if X_phase:
X_phase_dc = np.zeros(X_phase.shape[:-1] + (1,))
X_phase = np.concatenate([X_phase_dc, X_phase], axis=-1)
if decom == "mp":
X_lin_mag = 10 ** (X[..., 0, :, :] / 20)
if phase_type == "true" and X.shape[-3] != 2:
raise ValueError("X should have 2 channels for phase_type %s" % (
phase_type,) + " (X shape is %s)" % (X.shape,))
X_phase = X[..., 1, :, :]
else:
if X.shape[-3] != 1:
print("WARNING: ignoring X's second channel (phase)")
if phase_type == "oracle":
if X_phase is None:
raise ValueError("X_phase shape %s invalid for phase_type %s" % (
X_phase.shape, phase_type))
elif phase_type == "zero":
X_phase = np.zeros_like(X_lin_mag)
elif phase_type == "rand":
X_phase = np.random.uniform(-np.pi, np.pi, X_lin_mag.shape)
elif phase_type == "est":
X_phase, _ = est_phase_from_mag_spec(X_lin_mag, debug=True, **est_phase_opts)
print("X_lin_mag shape %s" % (X_lin_mag.shape,))
print("X_phase shape %s" % (X_phase.shape,))
else:
raise ValueError("invalid phase type (%s)" % phase_type)
complex_X = X_lin_mag * np.exp(1j * X_phase)
elif decom == "ri":
if phase_type != "true":
raise ValueError("invalid phase type %s. only `true` is valid" % phase_type)
complex_X = X[..., 0, :, :] + 1j * X[..., 1, :, :]
else:
raise ValueError("invalid decomposition %s (mp|ri)" % decom)
return complex_X
def complex_spec_to_audio(
complex_spec, name=None, trim=0, fs=16000,
frame_size_n=400, shift_size_n=160, fft_size=400, win=None):
assert(np.asarray(complex_spec).ndim == 2)
frame_starts_n = np.arange(len(complex_spec)) * shift_size_n
signal = istft_index(complex_spec, frame_size_n, frame_starts_n, fft_size, win, win)
if trim > 0:
signal = signal[trim:-trim]
if name is not None:
if os.path.splitext(name)[1] != ".wav":
name = name + ".wav"
sf.write(file=name, data=signal, samplerate=fs)
return signal
|
mmflow/datasets/sintel.py
|
hologerry/mmflow
| 481 |
97387
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
from typing import Optional, Sequence, Union
from .base_dataset import BaseDataset
from .builder import DATASETS
@DATASETS.register_module()
class Sintel(BaseDataset):
"""Sintel optical flow dataset.
Args:
pass_style (str): Pass style for Sintel dataset, and it has 2 options
['clean', 'final']. Default: 'clean'.
scene (str, list, optional): Scene in Sintel dataset, if scene is None,
it means collecting data in all of scene of Sintel dataset.
Default: None.
"""
def __init__(self,
*args,
pass_style: str = 'clean',
scene: Optional[Union[str, Sequence[str]]] = None,
**kwargs) -> None:
all_pass = ['clean', 'final']
assert pass_style in all_pass
self.pass_style = pass_style
self.scene = scene
super().__init__(*args, **kwargs)
self.dataset_name += f' {self.pass_style} subset'
def load_data_info(self) -> None:
"""Load data information, including file path of image1, image2 and
optical flow."""
self._get_data_dir()
img1_filenames = []
img2_filenames = []
flow_filenames = []
occ_filenames = []
invalid_filenames = []
def get_filenames(data_dir, data_suffix, img_idx=None):
data_filenames = []
for data_dir_ in data_dir:
data_filenames_ = self.get_data_filename(
data_dir_, data_suffix)
data_filenames_.sort()
if img_idx == 1:
data_filenames += data_filenames_[:-1]
elif img_idx == 2:
data_filenames += data_filenames_[1:]
else:
data_filenames += data_filenames_
return data_filenames
img1_filenames = get_filenames(self.img1_dir, self.img1_suffix, 1)
img2_filenames = get_filenames(self.img2_dir, self.img2_suffix, 2)
flow_filenames = get_filenames(self.flow_dir, self.flow_suffix)
occ_filenames = get_filenames(self.occ_dir, self.occ_suffix)
invalid_filenames = get_filenames(self.invalid_dir,
self.invalid_suffix, 1)
self.load_img_info(self.data_infos, img1_filenames, img2_filenames)
self.load_ann_info(self.data_infos, flow_filenames, 'filename_flow')
self.load_ann_info(self.data_infos, occ_filenames, 'filename_occ')
self.load_ann_info(self.data_infos, invalid_filenames,
'filename_invalid')
def _get_data_dir(self) -> None:
"""Get the paths for images and optical flow."""
self.img1_suffix = '.png'
self.img2_suffix = '.png'
self.flow_suffix = '.flo'
self.occ_suffix = '.png'
self.invalid_suffix = '.png'
self.subset_dir = 'training' if self.test_mode else 'training'
self.data_root = osp.join(self.data_root, self.subset_dir)
img_root = osp.join(self.data_root, self.pass_style)
flow_root = osp.join(self.data_root, 'flow')
occ_root = osp.join(self.data_root, 'occlusions')
invalid_root = osp.join(self.data_root, 'invalid')
all_scene = os.listdir(img_root)
self.scene = all_scene if self.scene is None else self.scene
self.scene = self.scene if isinstance(self.scene,
(list, tuple)) else [self.scene]
assert set(self.scene).issubset(set(all_scene))
self.img1_dir = [osp.join(img_root, s) for s in self.scene]
self.img2_dir = [osp.join(img_root, s) for s in self.scene]
self.flow_dir = [osp.join(flow_root, s) for s in self.scene]
self.occ_dir = [osp.join(occ_root, s) for s in self.scene]
self.invalid_dir = [osp.join(invalid_root, s) for s in self.scene]
def pre_pipeline(self, results: Sequence[dict]) -> None:
"""Prepare results dict for pipeline.
For Sintel, there is an additional annotation, invalid.
"""
super().pre_pipeline(results)
results['filename_invalid'] = results['ann_info']['filename_invalid']
|
macadam/tc/t00_trainer.py
|
yongzhuo/Macadam
| 290 |
97399
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2020/5/6 21:51
# @author : Mo
# @function: trainer of text-classification
# 适配linux
import sys
import os
path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
sys.path.append(path_root)
## cpu-gpu与tf.keras
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
os.environ["TF_KERAS"] = "1"
# 地址, tf.keras
from macadam.conf.path_config import path_embed_bert, path_embed_word2vec_word, path_embed_word2vec_char
from macadam.conf.path_config import path_root, path_tc_baidu_qa_2019, path_tc_thucnews
from macadam.base.preprocess import ListPrerocessXY, FilePrerocessXY
from macadam.base.utils import txt_write, txt_read
from macadam.base.embedding import embedding_map
from macadam.conf.logger_config import logger
from macadam.tc.t00_map import graph_map
from macadam import keras, K, L, M, O
# 计算时间
import time
def trainer(path_model_dir, path_embed, path_train, path_dev,
path_checkpoint, path_config, path_vocab,
network_type="FastText", embed_type="BERT", token_type="CHAR", task="TC",
is_length_max=False, use_onehot=True, use_file=False,
layer_idx=[-1], length_max=128, embed_size=768,
learning_rate=5e-5, batch_size=32, epochs=20, early_stop=3,
decay_rate=0.999, decay_step=1000, rate=1.0,
):
"""
train model of text-classfifcation
Args:
path_model_dir: str, directory of model save, eg. "/home/model/text_cnn"
path_embed: str, directory of pre-train embedding, eg. "/home/embedding/bert"
path_train: str, path of file(json) of train data, eg. "/home/data/text_classification/THUCNews/train.json"
path_dev: str, path of file(json) of dev data, eg. "/home/data/text_classification/THUCNews/dev.json"
path_checkpoint: str, path of checkpoint file of pre-train embedding
path_config: str, path of config file of pre-train embedding
path_vocab: str, path of vocab file of pre-train embedding
network_type: str, network of text-classification, eg."FastText","TextCNN", "BiRNN", "RCNN", "CRNN", "SelfAttention"
embed_type: str, type of pre-train enbedding, eg. "Bert", "Albert", "Roberta", "Electra"
task: str, task of model, eg. "sl"(sequence-labeling), "tc"(text-classification), "re"(relation-extraction)
is_length_max: bool, whether update length_max with analysis corpus, eg.False
layer_idx: List[int], layers which you select of bert-like model, eg.[-2]
use_onehot: bool, whether use onehot of y(label), eg.False
use_file: bool, use ListPrerocessXY or FilePrerocessXY
length_max: int, max length of sequence, eg.128
embed_size: int, dim of bert-like model, eg.768
learning_rate: float, lr of training, eg.1e-3, 5e-5
batch_size: int, samples each step when training, eg.32
epochs: int, max epoch of training, eg.20
early_stop: int, stop training when metrice not insreasing, eg.3
decay_rate: float, decay rate of lr, eg.0.999
decay_step: decay step of training, eg.1000
Returns:
None
"""
# 获取embed和graph的类
Embedding = embedding_map[embed_type.upper()]
Graph = graph_map[network_type.upper()]
print(os.environ["CUDA_VISIBLE_DEVICES"])
# 删除先前存在的模型/embedding微调模型等
# bert-embedding等初始化
params = {"embed": {"path_embed": path_embed,
"layer_idx": layer_idx,
},
"sharing": {"length_max": length_max,
"embed_size": embed_size,
"token_type": token_type.upper(),
},
"graph": {"loss": "categorical_crossentropy" if use_onehot
else "sparse_categorical_crossentropy", # 损失函数
"use_onehot": use_onehot, # label标签是否使用独热编码
"use_crf": False # 是否使用CRF, 是否存储trans(状态转移矩阵时用)
},
"train": {"learning_rate": learning_rate, # 学习率, 必调参数, 对训练影响较大, word2vec一般设置1e-3, bert设置5e-5或2e-5
"decay_rate": decay_rate, # 学习率衰减系数, 即乘法, lr = lr * rate
"decay_step": decay_step, # 学习率每step步衰减, 每N个step衰减一次
"batch_size": batch_size, # 批处理尺寸, 设置过小会造成收敛困难、陷入局部最小值或震荡, 设置过大会造成泛化能力降低
"early_stop": early_stop, # 早停, N个轮次(epcoh)评估指标(metrics)不增长就停止训练
"epochs": epochs, # 训练最大轮次, 即最多训练N轮
},
"save": {"path_model_dir": path_model_dir, # 模型目录, loss降低则保存的依据, save_best_only=True, save_weights_only=True
"path_model_info": os.path.join(path_model_dir, "model_info.json"), # 超参数文件地址
},
"data": {"train_data": path_train, # 训练数据
"val_data": path_dev # 验证数据
},
}
embed = Embedding(params)
embed.build_embedding(path_checkpoint=path_checkpoint,
path_config=path_config,
path_vocab=path_vocab)
print(os.environ["CUDA_VISIBLE_DEVICES"])
# 模型graph初始化
graph = Graph(params)
logger.info("训练/验证语料读取完成")
# 数据预处理类初始化, 1. is_length_max: 是否指定最大序列长度, 如果不指定则根据语料智能选择length_max.
# 2. use_file: 输入List迭代或是输入path_file迭代.
if use_file:
train_data = path_train
dev_data = path_dev
pxy = FilePrerocessXY(embedding=embed, path=train_data, path_dir=path_model_dir,
length_max=length_max if is_length_max else None,
use_onehot=use_onehot, embed_type=embed_type, task=task)
from macadam.base.preprocess import FileGenerator as generator_xy
logger.info("强制使用序列最大长度为{0}, 即文本最大截断或padding长度".format(length_max))
else:
# 训练/验证数据读取, 每行一个json格式, example: {"x":{"text":"你是谁", "texts2":["你是谁呀", "是不是"]}, "y":"YES"}
train_data = txt_read(path_train)
dev_data = txt_read(path_dev)
# 只有ListPrerocessXY才支持rate(data), 训练比率
len_train_rate = int(len(train_data) * rate)
len_dev_rate = int(len(dev_data) * rate)
train_data = train_data[:len_train_rate]
dev_data = dev_data[:len_dev_rate]
pxy = ListPrerocessXY(embedding=embed, data=train_data, path_dir=path_model_dir,
length_max=length_max if is_length_max else None,
use_onehot=use_onehot, embed_type=embed_type, task=task)
from macadam.base.preprocess import ListGenerator as generator_xy
logger.info("强制使用序列最大长度为{0}, 即文本最大截断或padding长度".format(length_max))
print(os.environ["CUDA_VISIBLE_DEVICES"])
logger.info("预处理类初始化完成")
if not pxy.length_max:
print(pxy.length_max)
pxy.length_max = 33
# 更新最大序列长度, 类别数
graph.length_max = pxy.length_max
graph.label = len(pxy.l2i)
graph.hyper_parameters["sharing"]["length_max"] = graph.length_max
graph.hyper_parameters["train"]["label"] = graph.label
# length_max更新, ListPrerocessXY的embedding更新
if length_max != graph.length_max and not is_length_max:
logger.info("根据bert-embedding等的最大长度不大于512, 根据语料自动确定序列最大长度为{0}".format(graph.length_max))
params["sharing"]["length_max"] = graph.length_max
embed = Embedding(params)
embed.build_embedding(path_checkpoint=path_checkpoint,
path_config=path_config,
path_vocab=path_vocab)
pxy.embedding = embed
print(os.environ["CUDA_VISIBLE_DEVICES"])
# 更新维度空间
graph.embed_size = embed.embed_size
graph.hyper_parameters["sharing"]["embed_size"] = graph.embed_size
logger.info("预训练模型加载完成")
# graph更新
graph.build_model(inputs=embed.model.input, outputs=embed.model.output)
graph.create_compile()
logger.info("网络(network or graph)初始化完成")
logger.info("开始训练: ")
# 训练
time_start = time.time()
print(os.environ["CUDA_VISIBLE_DEVICES"])
graph.fit(pxy, generator_xy, train_data, dev_data=dev_data, rate=rate)
time_collection = str(time.time()-time_start)
logger.info("训练完成, 耗时:" + str(time.time()-time_start))
return time_collection
if __name__=="__main__":
# bert-embedding地址, 必传
path_embed = path_embed_bert # path_embed_bert, path_embed_word2vec_word, path_embed_word2vec_char
path_checkpoint = os.path.join(path_embed + "bert_model.ckpt")
path_config = os.path.join(path_embed + "bert_config.json")
path_vocab = os.path.join(path_embed + "vocab.txt")
# 训练/验证数据地址
# path_train = os.path.join(path_tc_thucnews, "train.json")
# path_dev = os.path.join(path_tc_thucnews, "dev.json")
path_train = os.path.join(path_tc_baidu_qa_2019, "train.json")
path_dev = os.path.join(path_tc_baidu_qa_2019, "dev.json")
# print(path_train)
# 网络结构
# "Finetune", "FastText", "TextCNN", "CharCNN", "BiRNN", "RCNN", "DCNN", "CRNN",
# "DeepMoji", "SelfAttention", "HAN", "Capsule"
network_type = "FASTTEXT"
# 嵌入(embedding)类型, "ROOBERTA", "ELECTRA", "RANDOM", "ALBERT", "XLNET", "NEZHA", "GPT2", "WORD", "BERT"
# 备注: random(n-gram)时候embed_size=64, 其中ngram为bigram
embed_type = "RANDOM"
# token级别, 一般为"char"或"ngram", 只有random和word的embedding时存在"word", 大小写都可以
token_type = "NGRAM"
# 任务, "TC", "SL", "RE"
task = "TC"
# 学习率
lr = 1e-5 if embed_type in ["ROBERTA", "ELECTRA", "ALBERT", "XLNET", "NEZHA", "GPT2", "BERT"] else 1e-3
# 模型保存目录, 如果不存在则创建
path_model_dir = os.path.join(path_root, "data", "model", f"{network_type}_2020")
# if not os.path.exists(path_model_dir):
# os.mkdir(path_model_dir)
# 开始训练
trainer(path_model_dir, path_embed, path_train, path_dev, path_checkpoint, path_config, path_vocab,
network_type=network_type, embed_type=embed_type, token_type=token_type, task=task,
is_length_max=False, use_onehot=False, use_file=False, layer_idx=[-1], embed_size=64,
learning_rate=lr, batch_size=128,
epochs=3, early_stop=3, rate=1)
mm = 0
|
tests/test_models_head.py
|
kevinmtian/pytorchvideo
| 2,391 |
97475
|
<reponame>kevinmtian/pytorchvideo
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import itertools
import unittest
import numpy as np
import torch
from pytorchvideo.models.head import (
ResNetBasicHead,
ResNetRoIHead,
SequencePool,
create_res_basic_head,
create_res_roi_pooling_head,
create_vit_basic_head,
)
from torch import nn
from torchvision.ops import RoIAlign
class TestHeadHelper(unittest.TestCase):
def setUp(self):
super().setUp()
torch.set_rng_state(torch.manual_seed(42).get_state())
def test_build_simple_head(self):
"""
Test simple ResNetBasicHead (without dropout and activation layers).
"""
for input_dim, output_dim in itertools.product((4, 8), (4, 8, 16)):
model = ResNetBasicHead(
proj=nn.Linear(input_dim, output_dim),
pool=nn.AdaptiveAvgPool3d(1),
output_pool=nn.AdaptiveAvgPool3d(1),
)
# Test forwarding.
for input_tensor in TestHeadHelper._get_inputs(input_dim=input_dim):
if input_tensor.shape[1] != input_dim:
with self.assertRaises(RuntimeError):
output_tensor = model(input_tensor)
continue
else:
output_tensor = model(input_tensor)
input_shape = input_tensor.shape
output_shape = output_tensor.shape
output_shape_gt = (input_shape[0], output_dim)
self.assertEqual(
output_shape,
output_shape_gt,
"Output shape {} is different from expected shape {}".format(
output_shape, output_shape_gt
),
)
def test_build_complex_head(self):
"""
Test complex ResNetBasicHead.
"""
for input_dim, output_dim in itertools.product((4, 8), (4, 8, 16)):
model = ResNetBasicHead(
proj=nn.Linear(input_dim, output_dim),
activation=nn.Softmax(),
pool=nn.AdaptiveAvgPool3d(1),
dropout=nn.Dropout(0.5),
output_pool=nn.AdaptiveAvgPool3d(1),
)
# Test forwarding.
for input_tensor in TestHeadHelper._get_inputs(input_dim=input_dim):
if input_tensor.shape[1] != input_dim:
with self.assertRaises(Exception):
output_tensor = model(input_tensor)
continue
output_tensor = model(input_tensor)
input_shape = input_tensor.shape
output_shape = output_tensor.shape
output_shape_gt = (input_shape[0], output_dim)
self.assertEqual(
output_shape,
output_shape_gt,
"Output shape {} is different from expected shape {}".format(
output_shape, output_shape_gt
),
)
def test_build_head_with_callable(self):
"""
Test builder `create_res_basic_head`.
"""
for (pool, activation) in itertools.product(
(nn.AvgPool3d, nn.MaxPool3d, nn.AdaptiveAvgPool3d, None),
(nn.ReLU, nn.Softmax, nn.Sigmoid, None),
):
if activation is None:
activation_model = None
elif activation == nn.Softmax:
activation_model = activation(dim=1)
else:
activation_model = activation()
if pool is None:
pool_model = None
elif pool == nn.AdaptiveAvgPool3d:
pool_model = pool(1)
else:
pool_model = pool(kernel_size=[5, 7, 7], stride=[1, 1, 1])
model = create_res_basic_head(
in_features=16,
out_features=32,
pool=pool,
pool_kernel_size=(5, 7, 7),
output_size=(1, 1, 1),
dropout_rate=0.0,
activation=activation,
output_with_global_average=True,
)
model_gt = ResNetBasicHead(
proj=nn.Linear(16, 32),
activation=activation_model,
pool=pool_model,
dropout=None,
output_pool=nn.AdaptiveAvgPool3d(1),
)
model.load_state_dict(
model_gt.state_dict(), strict=True
) # explicitly use strict mode.
# Test forwarding.
for input_tensor in TestHeadHelper._get_inputs(input_dim=16):
with torch.no_grad():
if input_tensor.shape[1] != 16:
with self.assertRaises(RuntimeError):
output_tensor = model(input_tensor)
continue
else:
output_tensor = model(input_tensor)
output_tensor_gt = model_gt(input_tensor)
self.assertEqual(
output_tensor.shape,
output_tensor_gt.shape,
"Output shape {} is different from expected shape {}".format(
output_tensor.shape, output_tensor_gt.shape
),
)
self.assertTrue(
np.allclose(output_tensor.numpy(), output_tensor_gt.numpy())
)
@staticmethod
def _get_inputs(input_dim: int = 8) -> torch.tensor:
"""
Provide different tensors as test cases.
Yield:
(torch.tensor): tensor as test case input.
"""
# Prepare random tensor as test cases.
shapes = (
# Forward succeeded.
(1, input_dim, 5, 7, 7),
(2, input_dim, 5, 7, 7),
(4, input_dim, 5, 7, 7),
(4, input_dim, 5, 7, 7),
(4, input_dim, 7, 7, 7),
(4, input_dim, 7, 7, 14),
(4, input_dim, 7, 14, 7),
(4, input_dim, 7, 14, 14),
# Forward failed.
(8, input_dim * 2, 3, 7, 7),
(8, input_dim * 4, 5, 7, 7),
)
for shape in shapes:
yield torch.rand(shape)
class TestRoIHeadHelper(unittest.TestCase):
def setUp(self):
super().setUp()
torch.set_rng_state(torch.manual_seed(42).get_state())
def test_build_simple_head(self):
"""
Test simple ResNetRoIHead
(without pool_spatial, roi, dropout and activation layers).
"""
for input_dim, output_dim in itertools.product((4, 8), (4, 8, 16)):
model = ResNetRoIHead(
proj=nn.Linear(input_dim, output_dim),
pool=nn.AdaptiveAvgPool3d(1),
output_pool=nn.AdaptiveAvgPool3d(1),
)
bboxes = None
# Test forwarding.
for input_tensor in TestHeadHelper._get_inputs(input_dim=input_dim):
if input_tensor.shape[1] != input_dim:
with self.assertRaises(RuntimeError):
output_tensor = model(input_tensor, bboxes)
continue
else:
output_tensor = model(input_tensor, bboxes)
input_shape = input_tensor.shape
output_shape = output_tensor.shape
output_shape_gt = (input_shape[0], output_dim)
self.assertEqual(
output_shape,
output_shape_gt,
"Output shape {} is different from expected shape {}".format(
output_shape, output_shape_gt
),
)
def test_create_vit_basic_head(self):
batch_size = 8
seq_len = 10
input_dim = 10
out_dim = 20
head = create_vit_basic_head(
in_features=input_dim,
out_features=out_dim,
)
fake_input = torch.rand(batch_size, seq_len, input_dim)
output = head(fake_input)
gt_shape = (batch_size, out_dim)
self.assertEqual(tuple(output.shape), gt_shape)
def test_sequence_pool(self):
model = SequencePool("cls")
fake_input = torch.rand(8, 10, 10)
output = model(fake_input)
self.assertTrue(torch.equal(output, fake_input[:, 0]))
model = SequencePool("mean")
output = model(fake_input)
self.assertTrue(torch.equal(output, fake_input.mean(1)))
def test_build_complex_head(self):
"""
Test complex ResNetRoIHead.
"""
# ROI layer configs
resolution = (10, 15)
spatial_scale = 1.0 / 5.0
sampling_ratio = 0
roi_layer = RoIAlign(
resolution, spatial_scale=spatial_scale, sampling_ratio=sampling_ratio
)
for input_dim, output_dim in itertools.product((4, 8), (4, 8, 16)):
model = ResNetRoIHead(
proj=nn.Linear(input_dim, output_dim),
activation=nn.Softmax(),
pool=nn.AdaptiveAvgPool3d(1),
pool_spatial=nn.MaxPool2d(resolution, stride=1),
roi_layer=roi_layer,
dropout=nn.Dropout(0.5),
output_pool=nn.AdaptiveAvgPool3d(1),
)
# Test forwarding.
for (input_tensor, bboxes) in TestRoIHeadHelper._get_inputs(
input_dim=input_dim
):
if input_tensor.shape[1] != input_dim:
with self.assertRaises(Exception):
output_tensor = model(input_tensor, bboxes)
continue
output_tensor = model(input_tensor, bboxes)
bboxes_shape = bboxes.shape
output_shape = output_tensor.shape
output_shape_gt = (bboxes_shape[0], output_dim)
self.assertEqual(
output_shape,
output_shape_gt,
"Output shape {} is different from expected shape {}".format(
output_shape, output_shape_gt
),
)
def test_build_head_with_callable(self):
"""
Test builder `create_res_roi_pooling_head`.
"""
# ROI layer configs
resolution = (10, 15)
spatial_scale = 1.0 / 5.0
sampling_ratio = 0
roi_layer = RoIAlign(
resolution, spatial_scale=spatial_scale, sampling_ratio=sampling_ratio
)
for (pool, activation) in itertools.product(
(nn.AvgPool3d, nn.MaxPool3d, nn.AdaptiveAvgPool3d, None),
(nn.ReLU, nn.Softmax, nn.Sigmoid, None),
):
if activation is None:
activation_model = None
elif activation == nn.Softmax:
activation_model = activation(dim=1)
else:
activation_model = activation()
if pool is None:
pool_model = None
elif pool == nn.AdaptiveAvgPool3d:
pool_model = pool(1)
else:
pool_model = pool(kernel_size=[5, 1, 1], stride=[1, 1, 1])
model = create_res_roi_pooling_head(
in_features=16,
out_features=32,
resolution=resolution,
spatial_scale=spatial_scale,
sampling_ratio=sampling_ratio,
roi=RoIAlign,
pool=pool,
pool_spatial=nn.MaxPool2d,
pool_kernel_size=(5, 1, 1),
output_size=(1, 1, 1),
dropout_rate=0.0,
activation=activation,
output_with_global_average=True,
)
model_gt = ResNetRoIHead(
proj=nn.Linear(16, 32),
activation=activation_model,
pool=pool_model,
pool_spatial=nn.MaxPool2d(resolution, stride=1),
roi_layer=roi_layer,
dropout=None,
output_pool=nn.AdaptiveAvgPool3d(1),
)
model.load_state_dict(
model_gt.state_dict(), strict=True
) # explicitly use strict mode.
# Test forwarding.
for (input_tensor, bboxes) in TestRoIHeadHelper._get_inputs(input_dim=16):
with torch.no_grad():
if (
input_tensor.shape[1] != 16
or (pool is None)
or (
input_tensor.shape[-3] != 5 and pool != nn.AdaptiveAvgPool3d
)
):
with self.assertRaises(Exception):
output_tensor = model(input_tensor, bboxes)
continue
else:
output_tensor = model(input_tensor, bboxes)
output_tensor_gt = model_gt(input_tensor, bboxes)
self.assertEqual(
output_tensor.shape,
output_tensor_gt.shape,
"Output shape {} is different from expected shape {}".format(
output_tensor.shape, output_tensor_gt.shape
),
)
self.assertTrue(
np.allclose(output_tensor.numpy(), output_tensor_gt.numpy())
)
@staticmethod
def _get_inputs(input_dim: int = 8) -> torch.tensor:
"""
Provide different tensors as test cases.
Yield:
(torch.tensor): tensor as test case input.
(torch.tensor): tensor as test case bboxes.
"""
# Prepare random tensor as test cases.
shapes = (
# Forward succeeded.
(1, input_dim, 5, 7, 7),
(2, input_dim, 5, 7, 7),
(4, input_dim, 5, 7, 7),
(4, input_dim, 5, 7, 7),
(4, input_dim, 7, 7, 7),
(4, input_dim, 7, 7, 14),
(4, input_dim, 7, 14, 7),
(4, input_dim, 7, 14, 14),
# Forward failed.
(8, input_dim * 2, 3, 7, 7),
(8, input_dim * 4, 5, 7, 7),
)
for shape in shapes:
input_tensor = torch.rand(shape)
bboxes = [[i, 1, 2, 3, 4] for i in range(input_tensor.shape[0])]
bboxes = torch.Tensor(bboxes)
yield (input_tensor, bboxes)
|
src/permission/conf.py
|
dkopitsa/django-permission
| 234 |
97478
|
<filename>src/permission/conf.py
# coding=utf-8
"""
django-permission application configure
"""
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from appconf import AppConf
from permission.handlers import LogicalPermissionHandler
__all__ = ('settings',)
class PermissionConf(AppConf):
DEFAULT_PERMISSION_HANDLER = 'permission.handlers.LogicalPermissionHandler'
"""Default permission handler class"""
CHECK_PERMISSION_PRESENCE = settings.DEBUG
"""Check if the specified string permission exists"""
REPLACE_BUILTIN_IF = True
"""Whether replace builtin if templatetag"""
DEFAULT_APL_FIELD_NAME = 'author'
DEFAULT_APL_ANY_PERMISSION = False
DEFAULT_APL_CHANGE_PERMISSION = True
DEFAULT_APL_DELETE_PERMISSION = True
DEFAULT_CPL_FIELD_NAME = 'collaborators'
DEFAULT_CPL_ANY_PERMISSION = False
DEFAULT_CPL_CHANGE_PERMISSION = True
DEFAULT_CPL_DELETE_PERMISSION = False
DEFAULT_GIPL_ANY_PERMISSION = False
DEFAULT_GIPL_ADD_PERMISSION = True
DEFAULT_GIPL_CHANGE_PERMISSION = True
DEFAULT_GIPL_DELETE_PERMISSION = False
DEFAULT_OSPL_ANY_PERMISSION = False
DEFAULT_OSPL_CHANGE_PERMISSION = True
DEFAULT_OSPL_DELETE_PERMISSION = True
DEFAULT_SPL_ANY_PERMISSION = False
DEFAULT_SPL_ADD_PERMISSION = True
DEFAULT_SPL_CHANGE_PERMISSION = True
DEFAULT_SPL_DELETE_PERMISSION = False
AUTODISCOVER_MODULE_NAME = 'perms'
AUTODISCOVER_VARIABLE_NAME = 'PERMISSION_LOGICS'
AUTODISCOVER_ENABLE = True
CHECK_AUTHENTICATION_BACKENDS = True
"""Check if AUTHENTICATION_BACKENDS is correctly configured"""
CHECK_TEMPLATES_OPTIONS_BUILTINS = True
"""Check if TEMPLATES[?]['OPTIONS']['builtins'] is correctly configured"""
|
src/core/tests/widgets/test_imageview.py
|
luizoti/toga
| 1,261 |
97494
|
<filename>src/core/tests/widgets/test_imageview.py<gh_stars>1000+
import toga
import toga_dummy
from toga_dummy.utils import TestCase
class ImageViewTests(TestCase):
def setUp(self):
super().setUp()
# We need a test app to trigger app module discovery
self.app = toga.App(
formal_name="Test App",
app_id="org.beeware.test-app",
factory=toga_dummy.factory,
)
self.image_view = toga.ImageView(factory=toga_dummy.factory)
def test_widget_created(self):
self.assertEqual(self.image_view._impl.interface, self.image_view)
self.assertActionPerformed(self.image_view, 'create ImageView')
def test_setting_image_invokes_impl_method(self):
new_image = 'not a image'
# Binding a non-existent image raises an exception
try:
self.image_view.image = new_image
self.fail("Image should not bind")
except FileNotFoundError:
pass
# self.assertEqual(self.image_view._image, new_image)
# self.assertValueSet(self.image_view, 'image', new_image)
|
analyzer/codechecker_analyzer/analyzers/clangsa/ctu_autodetection.py
|
ryankurte/codechecker
| 1,601 |
97498
|
# -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
"""
Clang Static Analyzer related functions.
"""
import subprocess
from codechecker_common.logger import get_logger
from codechecker_analyzer import host_check
from codechecker_analyzer.analyzers.clangsa import clang_options, version
LOG = get_logger('analyzer.clangsa')
CTU_ON_DEMAND_OPTION_NAME = 'ctu-invocation-list'
def invoke_binary_checked(binary_path, args=None, environ=None):
"""
Invoke the binary with the specified args, and return the output if the
command finished running with zero exit code. Return False otherwise.
Possible usage can be used to check the existence binaries.
:param binary_path: The path to the executable to invoke
:param args: The arguments of the invocation
:type binary_path: str
:type args: list
:rtype str
"""
args = args or []
invocation = [binary_path]
invocation.extend(args)
try:
output = subprocess.check_output(
invocation,
env=environ,
encoding="utf-8",
errors="ignore")
except (subprocess.CalledProcessError, OSError) as e:
LOG.debug('Command invocation failed because of non-zero exit code!'
'Details: %s', str(e))
return False
return output
class CTUAutodetection:
"""
CTUAutodetection is responsible for providing the availability information
of CTU feature, the the relevant mapping tool path and the mapping file
name.
"""
def __init__(self, analyzer_binary, environ):
self.__analyzer_binary = analyzer_binary
self.environ = environ
self.__analyzer_version_info = None
if self.__analyzer_binary is None:
LOG.debug(
'Trying to detect CTU capability, but analyzer binary is not '
'set!')
return None
analyzer_version = invoke_binary_checked(
self.__analyzer_binary, ['--version'], self.environ)
if analyzer_version is False:
LOG.debug('Failed to invoke command to get Clang version!')
return None
version_parser = version.ClangVersionInfoParser(self.__analyzer_binary)
version_info = version_parser.parse(analyzer_version)
if not version_info:
LOG.debug('Failed to parse Clang version information!')
return None
self.__analyzer_version_info = version_info
@property
def analyzer_version_info(self):
"""
Returns the relevant parameters of the analyzer by parsing the
output of the analyzer binary when called with version flag.
"""
if not self.__analyzer_version_info:
return False
return self.__analyzer_version_info
@property
def major_version(self):
"""
Returns the major version of the analyzer, which is used for
CTU analysis.
"""
return self.analyzer_version_info.major_version
@property
def installed_dir(self):
"""
Returns the installed directory of the analyzer, which is used for
CTU analysis.
"""
return self.analyzer_version_info.installed_dir
@property
def mapping_tool_path(self):
"""Return the path to the mapping tool."""
tool_path, _ = clang_options.ctu_mapping(self.analyzer_version_info)
if tool_path:
return tool_path
return False
@property
def display_progress(self):
"""
Return analyzer args if it is capable to display ctu progress.
Returns None if the analyzer can not display ctu progress.
The ctu display progress arguments depend on
the clang analyzer version.
"""
if not self.analyzer_version_info:
return None
ctu_display_progress_args = ['-Xclang',
'-analyzer-config',
'-Xclang',
'display-ctu-progress=true']
ok = host_check.has_analyzer_config_option(
self.__analyzer_binary, "display-ctu-progress", self.environ)
if not ok:
return None
return ctu_display_progress_args
@property
def mapping_file_name(self):
"""
Returns the installed directory of the analyzer, which is used for
CTU analysis.
"""
_, mapping_file_name = \
clang_options.ctu_mapping(self.analyzer_version_info)
if mapping_file_name:
return mapping_file_name
return False
@property
def is_ctu_capable(self):
"""
Detects if the current clang is CTU compatible. Tries to autodetect
the correct one based on clang version.
"""
tool_path = self.mapping_tool_path
if not tool_path:
return False
return invoke_binary_checked(tool_path, ['-version'], self.environ) \
is not False
@property
def is_on_demand_ctu_available(self):
"""
Detects if the current Clang supports on-demand parsing of ASTs for
CTU analysis.
"""
analyzer_options = invoke_binary_checked(
self.__analyzer_binary, ['-cc1', '-analyzer-config-help'],
self.environ)
if analyzer_options is False:
return False
return CTU_ON_DEMAND_OPTION_NAME in analyzer_options
|
aries_cloudagent/resolver/tests/test_routes.py
|
SNU-Blockchain-2021-Fall-Group-H/aries-cloudagent-python
| 247 |
97512
|
<reponame>SNU-Blockchain-2021-Fall-Group-H/aries-cloudagent-python
"""Test resolver routes."""
# pylint: disable=redefined-outer-name
import pytest
from asynctest import mock as async_mock
from pydid import DIDDocument
from ...admin.request_context import AdminRequestContext
from .. import routes as test_module
from ..base import (
DIDMethodNotSupported,
DIDNotFound,
ResolutionMetadata,
ResolutionResult,
ResolverError,
ResolverType,
)
from ..did_resolver import DIDResolver
from . import DOC
@pytest.fixture
def did_doc():
yield DIDDocument.deserialize(DOC)
@pytest.fixture
def resolution_result(did_doc):
metadata = ResolutionMetadata(
resolver_type=ResolverType.NATIVE,
resolver="mock_resolver",
retrieved_time="some time",
duration=10,
)
yield ResolutionResult(did_doc, metadata)
@pytest.fixture
def mock_response():
json_response = async_mock.MagicMock()
temp_value = test_module.web.json_response
test_module.web.json_response = json_response
yield json_response
test_module.web.json_response = temp_value
@pytest.fixture
def mock_resolver(resolution_result):
did_resolver = async_mock.MagicMock()
did_resolver.resolve = async_mock.CoroutineMock(return_value=did_doc)
did_resolver.resolve_with_metadata = async_mock.CoroutineMock(
return_value=resolution_result
)
yield did_resolver
@pytest.fixture
def mock_request(mock_resolver):
context = AdminRequestContext.test_context({DIDResolver: mock_resolver})
outbound_message_router = async_mock.CoroutineMock()
request_dict = {
"context": context,
"outbound_message_router": outbound_message_router,
}
request = async_mock.MagicMock(
match_info={
"did": "did:ethr:mainnet:0xb9c5714089478a327f09197987f16f9e5d936e8a",
},
query={},
json=async_mock.CoroutineMock(return_value={}),
__getitem__=lambda _, k: request_dict[k],
)
yield request
@pytest.mark.asyncio
async def test_resolver(mock_request, mock_response: async_mock.MagicMock, did_doc):
await test_module.resolve_did(mock_request)
mock_response.call_args[0][0] == did_doc.serialize()
# TODO: test http response codes
@pytest.mark.asyncio
@pytest.mark.parametrize(
"side_effect, error",
[
(DIDNotFound, test_module.web.HTTPNotFound),
(DIDMethodNotSupported, test_module.web.HTTPNotImplemented),
(ResolverError, test_module.web.HTTPInternalServerError),
],
)
async def test_resolver_not_found_error(
mock_resolver, mock_request, side_effect, error
):
mock_resolver.resolve_with_metadata = async_mock.CoroutineMock(
side_effect=side_effect()
)
with pytest.raises(error):
await test_module.resolve_did(mock_request)
@pytest.mark.asyncio
async def test_register():
mock_app = async_mock.MagicMock()
mock_app.add_routes = async_mock.MagicMock()
await test_module.register(mock_app)
mock_app.add_routes.assert_called_once()
@pytest.mark.asyncio
async def test_post_process_routes():
mock_app = async_mock.MagicMock(_state={"swagger_dict": {}})
test_module.post_process_routes(mock_app)
assert "tags" in mock_app._state["swagger_dict"]
|
aetros/commands/RunCommand.py
|
aetros/aetros-cli
| 120 |
97562
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import argparse
import sys
import os
from math import ceil
import psutil
import six
from cpuinfo import cpuinfo
import aetros.utils.git
from aetros.cuda_gpu import get_ordered_devices, CudaNotImplementedException
from aetros.starter import start_command
from aetros.backend import JobBackend
from aetros.utils import human_size, lose_parameters_to_full, extract_parameters, find_config, loading_text, \
read_home_config, ensure_docker_installed, docker_call
class RunCommand:
def __init__(self, logger):
self.logger = logger
self.client = None
self.registered = False
self.active = True
def main(self, args):
import aetros.const
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,
prog=aetros.const.__prog__ + ' run')
parser.add_argument('command', nargs='?', help="The command to run. Default read in configuration file")
parser.add_argument('-i', '--image', help="Which Docker image to use for the command. Default read in configuration file. If not specified, command is executed on the host.")
parser.add_argument('--no-image', action='store_true', help="Forces not to use docker, even when image is defined in the configuration file.")
parser.add_argument('-s', '--server', action='append', help="Limits the server pool to this server. Default not limitation or read in configuration file. Multiple --server allowed.")
parser.add_argument('-m', '--model', help="Under which model this job should be listed. Default read in configuration file")
parser.add_argument('-l', '--local', action='store_true', help="Start the job immediately on the current machine.")
parser.add_argument('-c', '--config', help="Default aetros.yml in current working directory.")
parser.add_argument('--priority', help="Increases or decreases priority. Default is 0.")
parser.add_argument('--cpu', help="How many CPU cores should be assigned to job. Docker only.")
parser.add_argument('--memory', help="How much memory should be assigned to job. Docker only.")
parser.add_argument('--gpu', help="How many GPU cards should be assigned to job. Docker only.")
parser.add_argument('--gpu_memory', help="Memory requirement for the GPU. Docker only.")
parser.add_argument('--offline', '-o', action='store_true', help="Whether the execution should happen offline.")
parser.add_argument('--rebuild-image', action='store_true', help="Makes sure the Docker image is re-built without cache.")
parser.add_argument('--max-time', help="Limit execution time in seconds. Sends SIGINT to the process group when reached.")
parser.add_argument('--max-epochs', help="Limit execution epochs. Sends SIGINT to the process group when reached.")
parser.add_argument('--gpu-device', action='append', help="Which device id should be mapped into the NVIDIA docker container. Only when --local")
parser.add_argument('--volume', '-v', action='append', help="Volume into docker. Only when --local")
parser.add_argument('-e', action='append', help="Sets additional environment variables. '-e name=value' to set value, or '-e name' to read from current env")
parser.add_argument('-p', '--param', action='append', help="Sets a hyperparameter, example '--param name=value'. Multiple --param allowed.")
parsed_args = parser.parse_args(args)
if parsed_args.config and not os.path.exists(parsed_args.config):
self.logger.error("fatal: file %s does not exist." % (parsed_args.config,))
sys.exit(2)
config = find_config(parsed_args.config)
home_config = read_home_config()
if config['model'] and not parsed_args.model:
parsed_args.model = config['model']
if not parsed_args.model:
print("fatal: no model defined. Use --model or switch into a directory where you executed 'aetros init model-name'.")
sys.exit(2)
if not parsed_args.local and parsed_args.volume:
print("fatal: can not use volume with jobs on the cluster. Use datasets instead.")
sys.exit(1)
if parsed_args.local and parsed_args.priority:
print("fatal: the priority can only be set for jobs in the cluster.")
sys.exit(1)
if config['image']:
ensure_docker_installed(self.logger)
env = {}
if parsed_args.e:
for item in parsed_args.e:
if '=' in item:
k, v = item.split('=')
else:
k = item
v = os.getenv(k)
env[k] = v
if ('command' not in config or not config['command']) and not parsed_args.command:
self.logger.error('No command given. Define the command in aetros.yml or use command argument.')
sys.exit(1)
job_backend = JobBackend(parsed_args.model, self.logger)
ignore = []
if 'ignore' in config:
ignore = config['ignore']
job_backend.job = {'config': {'ignore': ignore}}
adding_files = loading_text("- Adding job files to index ... ")
files_added, size_added = job_backend.add_files(config['root'], report=False)
adding_files("done with %d file%s added (%s)."
% (files_added, 's' if files_added != 1 else '', human_size(size_added, 2)))
create_info = {
'type': 'custom',
'config': config
}
incoming_hyperparameter = {}
if parsed_args.param:
for param in parsed_args.param:
if '=' not in param:
raise Exception('--param ' + param + ' does not contain a `=`. Please use "--param name=value"')
name, value = param.split('=')
incoming_hyperparameter[name] = value
# first transform simple format in the full definition with parameter types
# (string, number, group, choice_group, etc)
full_hyperparameters = lose_parameters_to_full(config['parameters'])
# now extract hyperparameters from full definition, and overwrite stuff using
# incoming_hyperparameter if available
hyperparameter = extract_parameters(full_hyperparameters, incoming_hyperparameter)
create_info['config']['parameters'] = hyperparameter
if parsed_args.rebuild_image:
create_info['config']['rebuild_image'] = True
if parsed_args.max_epochs:
create_info['config']['maxEpochs'] = int(parsed_args.max_epochs)
create_info['config']['priority'] = 0
if parsed_args.priority:
create_info['config']['priority'] = float(parsed_args.priority)
if parsed_args.max_time:
create_info['config']['maxTime'] = float(parsed_args.max_time)
if parsed_args.command:
create_info['config']['command'] = parsed_args.command
if parsed_args.image:
# reset install options, since we can't make sure if the base image still fits
if 'image' in config and config['image'] and config['image'] != parsed_args.image:
create_info['config']['install'] = None
# reset dockerfile, since we specified manually an image
create_info['config']['dockerfile'] = None
create_info['config']['image'] = parsed_args.image
if parsed_args.no_image:
create_info['config']['image'] = None
if parsed_args.server:
create_info['config']['servers'] = []
for name in parsed_args.server:
create_info['config']['servers'].append(name)
create_info['config']['resources'] = create_info['config'].get('resources', {})
resources = create_info['config']['resources']
default_cpu_and_memory = 1 if create_info['config']['image'] else 0
resources['cpu'] = int(parsed_args.cpu or resources.get('cpu', default_cpu_and_memory))
resources['memory'] = int(parsed_args.memory or resources.get('memory', default_cpu_and_memory))
resources['gpu'] = int(parsed_args.gpu or resources.get('gpu', 0))
resources['gpu_memory'] = int(parsed_args.gpu_memory or resources.get('gpu_memory', 0))
if parsed_args.local:
create_info['server'] = 'local'
# make sure we do not limit the resources to something that is not available on the local machine
warning = []
cpu = cpuinfo.get_cpu_info()
mem = psutil.virtual_memory().total
gpu = 0
try:
gpu = len(get_ordered_devices())
except CudaNotImplementedException: pass
if not create_info['config']['image'] and not all([x == 0 for x in six.itervalues(resources)]):
self.logger.warning("! No Docker virtualization since no `image` defined, resources limitation ignored.")
if create_info['config']['image'] and resources['gpu'] > 0:
if not (sys.platform == "linux" or sys.platform == "linux2"):
self.logger.warning("! Your operating system does not support GPU allocation for "
"Docker virtualization. "
"NVIDIA-Docker2 is only supported on Linux.")
local_max_resources = {'cpu': cpu['count'], 'memory': ceil(mem / 1024 / 1024 / 1024), 'gpu': gpu}
if create_info['config']['image']:
# read max hardware within Docker
out = docker_call(['run', 'alpine', 'sh', '-c', 'nproc && cat /proc/meminfo | grep MemTotal'])
cpus, memory = out.decode('utf-8').strip().split('\n')
local_max_resources['cpu'] = int(cpus)
memory = memory.replace('MemTotal:', '').replace('kB', '').strip()
local_max_resources['memory'] = ceil(int(memory) / 1024 / 1024)
if local_max_resources['cpu'] < resources['cpu']:
warning.append('CPU cores %d -> %d' % (resources['cpu'], local_max_resources['cpu']))
resources['cpu'] = local_max_resources['cpu']
if local_max_resources['memory'] < resources['memory']:
warning.append('memory %dGB -> %dGB' % (resources['memory'], local_max_resources['memory']))
resources['memory'] = local_max_resources['memory']
if local_max_resources['gpu'] < resources['gpu']:
warning.append('GPU cards %d -> %d' % (resources['gpu'], local_max_resources['gpu']))
resources['gpu'] = local_max_resources['gpu']
if warning:
self.logger.warning("! Resources downgrade due to missing hardware: %s." % (', '.join(warning),))
if parsed_args.config and not create_info['config']['configPath']:
create_info['config']['configPath'] = parsed_args.config
create_info['config']['sourcesAttached'] = True
creating_git_job = loading_text("- Create job in local Git ... ")
if aetros.utils.git.get_current_commit_hash():
create_info['origin_git_source'] = {
'origin': aetros.utils.git.get_current_remote_url(),
'author': aetros.utils.git.get_current_commit_author(),
'message': aetros.utils.git.get_current_commit_message(),
'branch': aetros.utils.git.get_current_branch(),
'commit': aetros.utils.git.get_current_commit_hash(),
}
job_backend.create(create_info=create_info, server=None)
creating_git_job("created %s in %s." % (job_backend.job_id[0:9], job_backend.model_name))
summary = "➤ Summary: Job running "
if parsed_args.local:
summary += 'locally'
else:
summary += 'on the cluster'
if create_info['config']['image']:
summary += ' in Docker using image %s with %d CPU cores, %dGB memory and %d GPUs.' \
% (create_info['config']['image'], resources['cpu'], resources['memory'], resources['gpu'])
else:
summary += ' on host using all available resources.'
print(summary)
# tasks = []
#
# if 'tasks' in config:
# for name, task_config in six.iteritems(config['tasks']):
# replica = 1
# if 'replica' in task_config:
# replica = int(task_config['replica'])
# for index in range(0, replica):
# tasks.append(job_backend.create_task(job_id, task_config, name, index))
if parsed_args.offline:
if not parsed_args.local:
self.logger.warning("Can not create a remote job in offline mode.")
sys.exit(1)
self.logger.warning("Execution started offline.")
else:
adding_files = loading_text("- Connecting to "+home_config['host']+" ... ")
if job_backend.connect():
adding_files("connected.")
else:
parsed_args.offline = True
adding_files("failed. Continue in offline mode.")
if not parsed_args.offline:
sys.stdout.write("- Uploading job data ... ")
job_backend.git.push()
job_backend.client.wait_until_queue_empty(['files'], clear_end=False)
sys.stdout.write(" done.\n")
link = "%s/model/%s/job/%s" % (home_config['url'], job_backend.model_name, job_backend.job_id)
sys.__stdout__.write(u"➤ Monitor job at %s\n" % (link))
if parsed_args.local:
job_backend.start(collect_system=False, offline=parsed_args.offline, push=False)
if not parsed_args.offline:
job_backend.git.start_push_sync()
cpus = create_info['config']['resources']['cpu']
memory = create_info['config']['resources']['memory']
if not parsed_args.gpu_device and create_info['config']['resources']['gpu'] > 0:
# if requested 2 GPUs and we have 3 GPUs with id [0,1,2], gpus should be [0,1]
parsed_args.gpu_device = []
for i in range(0, create_info['config']['resources']['gpu']):
parsed_args.gpu_device.append(i)
start_command(self.logger, job_backend, env, parsed_args.volume, cpus=cpus, memory=memory, gpu_devices=parsed_args.gpu_device,
offline=parsed_args.offline)
|
problems/can-scramble/can-scramble.py
|
vidyadeepa/the-coding-interview
| 1,571 |
97617
|
from collections import Counter
def can_scramble(source, dest):
if len(source) != len(dest):
return False
return Counter(source) == Counter(dest)
assert(can_scramble("abc", "cba") == True)
assert(can_scramble("abc", "ccc") == False)
assert(can_scramble("aab", "bbc") == False)
assert(can_scramble("aabaaaa", "bbc") == False)
|
users/views.py
|
hrbhat/twissandra
| 308 |
97628
|
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponseRedirect
from users.forms import LoginForm, RegistrationForm
import cass
def login(request):
login_form = LoginForm()
register_form = RegistrationForm()
next = request.REQUEST.get('next')
if 'kind' in request.POST:
if request.POST['kind'] == 'login':
login_form = LoginForm(request.POST)
if login_form.is_valid():
username = login_form.get_username()
request.session['username'] = username
if next:
return HttpResponseRedirect(next)
return HttpResponseRedirect('/')
elif request.POST['kind'] == 'register':
register_form = RegistrationForm(request.POST)
if register_form.is_valid():
username = register_form.save()
request.session['username'] = username
if next:
return HttpResponseRedirect(next)
return HttpResponseRedirect('/')
context = {
'login_form': login_form,
'register_form': register_form,
'next': next,
}
return render_to_response(
'users/login.html', context, context_instance=RequestContext(request))
def logout(request):
request.session.pop('username', None)
return render_to_response(
'users/logout.html', {}, context_instance=RequestContext(request))
def find_friends(request):
friend_usernames = []
if request.user['is_authenticated']:
friend_usernames = cass.get_friend_usernames(
request.session['username']) + [request.session['username']]
q = request.GET.get('q')
result = None
searched = False
if q is not None:
searched = True
try:
result = cass.get_user_by_username(q)
result = {
'username': result.username,
'friend': q in friend_usernames
}
except cass.DatabaseError:
pass
context = {
'q': q,
'result': result,
'searched': searched,
'friend_usernames': friend_usernames,
}
return render_to_response(
'users/add_friends.html', context, context_instance=RequestContext(request))
def modify_friend(request):
next = request.REQUEST.get('next')
added = False
removed = False
if request.user['is_authenticated']:
if 'add-friend' in request.POST:
cass.add_friends(
request.session['username'],
[request.POST['add-friend']]
)
added = True
if 'remove-friend' in request.POST:
cass.remove_friends(
request.session['username'],
[request.POST['remove-friend']]
)
removed = True
if next:
return HttpResponseRedirect(next)
context = {
'added': added,
'removed': removed,
}
return render_to_response(
'users/modify_friend.html', context, context_instance=RequestContext(request))
|
nautobot/core/management/commands/start.py
|
psmware-ltd/nautobot
| 384 |
97640
|
from django_webserver.management.commands.pyuwsgi import Command as uWSGICommand
class Command(uWSGICommand):
help = "Start Nautobot uWSGI server."
|
ibrnet/data_loaders/spaces_dataset.py
|
QiuhongAnnaWei/IBRNet
| 254 |
97714
|
<filename>ibrnet/data_loaders/spaces_dataset.py
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# some code in this file is adapted from https://github.com/augmentedperception/spaces_dataset/
import sys
sys.path.append('../')
import os
import numpy as np
from PIL import Image
import imageio
import torch
from torch.utils.data import Dataset
from .data_utils import quaternion_about_axis, quaternion_matrix, random_crop, random_flip
import json
def view_obj2camera_rgb(view):
image_path = view.image_path
intrinsics = view.camera.intrinsics
h_in_view, w_in_view = view.shape
rgb = imageio.imread(image_path).astype(np.float32) / 255.
h_img, w_img = rgb.shape[:2]
if h_in_view != h_img or w_in_view != w_img:
intrinsics[0] *= w_img / w_in_view
intrinsics[1] *= h_img / h_in_view
intrinsics_4x4 = np.eye(4)
intrinsics_4x4[:3, :3] = intrinsics
c2w = view.camera.w_f_c
ref_camera = np.concatenate([list(rgb.shape[:2]), intrinsics_4x4.flatten(), c2w.flatten()])
return ref_camera, rgb
def view_obj2camera_rgb_path(view):
img_size = view.shape
image_path = view.image_path
intrinsics = view.camera.intrinsics
intrinsics_4x4 = np.eye(4)
intrinsics_4x4[:3, :3] = intrinsics
c2w = view.camera.w_f_c
return image_path, img_size, intrinsics_4x4, c2w
def sample_target_view_for_training(views, input_rig_id, input_ids):
input_rig_views = views[input_rig_id]
input_cam_positions = np.array([input_rig_views[i].camera.w_f_c[:3, 3] for i in input_ids])
remaining_rig_ids = []
remaining_cam_ids = []
for i, rig in enumerate(views):
for j, cam in enumerate(rig):
if i == input_rig_id and j in input_ids:
continue
else:
cam_loc = views[i][j].camera.w_f_c[:3, 3]
# if i != input_rig_id:
# print(np.min(np.linalg.norm(input_cam_positions - cam_loc, axis=1)))
if np.min(np.linalg.norm(input_cam_positions - cam_loc, axis=1)) < 0.15:
remaining_rig_ids.append(i)
remaining_cam_ids.append(j)
selected_id = np.random.choice(len(remaining_rig_ids))
selected_view = views[remaining_rig_ids[selected_id]][remaining_cam_ids[selected_id]]
return selected_view
def get_all_views_in_scene(all_views):
cameras = []
rgbs = []
for rig in all_views:
for i in range(len(rig)):
camera, rgb = view_obj2camera_rgb(rig[i])
cameras.append(camera)
rgbs.append(rgb)
return cameras, rgbs
def get_all_views_in_scene_cam_path(all_views):
c2w_mats = []
intrinsicss = []
rgb_paths = []
img_sizes = []
for rig in all_views:
for i in range(len(rig)):
image_path, img_size, intrinsics_4x4, c2w = view_obj2camera_rgb_path(rig[i])
rgb_paths.append(image_path)
intrinsicss.append(intrinsics_4x4)
c2w_mats.append(c2w)
img_sizes.append(img_size)
return rgb_paths, img_sizes, intrinsicss, c2w_mats
def sort_nearby_views_by_angle(query_pose, ref_poses):
query_direction = np.sum(query_pose[:3, 2:4], axis=-1)
query_direction = query_direction / np.linalg.norm(query_direction)
ref_directions = np.sum(ref_poses[:, :3, 2:4], axis=-1)
ref_directions = ref_directions / np.linalg.norm(ref_directions, axis=-1, keepdims=True)
inner_product = np.sum(ref_directions * query_direction[None, ...], axis=1)
sorted_inds = np.argsort(inner_product)[::-1]
return sorted_inds
class Camera(object):
"""Represents a Camera with intrinsics and world from/to camera transforms.
Attributes:
w_f_c: The world from camera 4x4 matrix.
c_f_w: The camera from world 4x4 matrix.
intrinsics: The camera intrinsics as a 3x3 matrix.
inv_intrinsics: The inverse of camera intrinsics matrix.
"""
def __init__(self, intrinsics, w_f_c):
"""Constructor.
Args:
intrinsics: A numpy 3x3 array representing intrinsics.
w_f_c: A numpy 4x4 array representing wFc.
"""
self.intrinsics = intrinsics
self.inv_intrinsics = np.linalg.inv(intrinsics)
self.w_f_c = w_f_c
self.c_f_w = np.linalg.inv(w_f_c)
class View(object):
"""Represents an image and associated camera geometry.
Attributes:
camera: The camera for this view.
image: The np array containing the image data.
image_path: The file path to the image.
shape: The 2D shape of the image.
"""
def __init__(self, image_path, shape, camera):
self.image_path = image_path
self.shape = shape
self.camera = camera
self.image = None
def _WorldFromCameraFromViewDict(view_json):
"""Fills the world from camera transform from the view_json.
Args:
view_json: A dictionary of view parameters.
Returns:
A 4x4 transform matrix representing the world from camera transform.
"""
transform = np.identity(4)
position = view_json['position']
transform[0:3, 3] = (position[0], position[1], position[2])
orientation = view_json['orientation']
angle_axis = np.array([orientation[0], orientation[1], orientation[2]])
angle = np.linalg.norm(angle_axis)
epsilon = 1e-7
if abs(angle) < epsilon:
# No rotation
return transform
axis = angle_axis / angle
rot_mat = quaternion_matrix(quaternion_about_axis(-angle, axis))
transform[0:3, 0:3] = rot_mat[0:3, 0:3]
return transform
def _IntrinsicsFromViewDict(view_params):
"""Fills the intrinsics matrix from view_params.
Args:
view_params: Dict view parameters.
Returns:
A 3x3 matrix representing the camera intrinsics.
"""
intrinsics = np.identity(3)
intrinsics[0, 0] = view_params['focal_length']
intrinsics[1, 1] = (view_params['focal_length'] * view_params['pixel_aspect_ratio'])
intrinsics[0, 2] = view_params['principal_point'][0]
intrinsics[1, 2] = view_params['principal_point'][1]
return intrinsics
def ReadView(base_dir, view_json):
return View(
image_path=os.path.join(base_dir, view_json['relative_path']),
shape=(int(view_json['height']), int(view_json['width'])),
camera=Camera(
_IntrinsicsFromViewDict(view_json),
_WorldFromCameraFromViewDict(view_json)))
def ReadScene(base_dir):
"""Reads a scene from the directory base_dir."""
with open(os.path.join(base_dir, 'models.json')) as f:
model_json = json.load(f)
all_views = []
for views in model_json:
all_views.append([ReadView(base_dir, view_json) for view_json in views])
return all_views
def InterpolateDepths(near_depth, far_depth, num_depths):
"""Returns num_depths from (far_depth, near_depth), interpolated in inv depth.
Args:
near_depth: The first depth.
far_depth: The last depth.
num_depths: The total number of depths to create, include near_depth and
far_depth are always included and other depths are interpolated between
them, in inverse depth space.
Returns:
The depths sorted in descending order (so furthest first). This order is
useful for back to front compositing.
"""
inv_near_depth = 1.0 / near_depth
inv_far_depth = 1.0 / far_depth
depths = []
for i in range(0, num_depths):
fraction = float(i) / float(num_depths - 1)
inv_depth = inv_far_depth + (inv_near_depth - inv_far_depth) * fraction
depths.append(1.0 / inv_depth)
return depths
def ReadViewImages(views):
"""Reads the images for the passed views."""
for view in views:
# Keep images unnormalized as uint8 to save RAM and transmission time to
# and from the GPU.
view.image = np.array(Image.open(view.image_path))
def WriteNpToImage(np_image, path):
"""Writes an image as a numpy array to the passed path.
If the input has more than four channels only the first four will be
written. If the input has a single channel it will be duplicated and
written as a three channel image.
Args:
np_image: A numpy array.
path: The path to write to.
Raises:
IOError: if the image format isn't recognized.
"""
min_value = np.amin(np_image)
max_value = np.amax(np_image)
if min_value < 0.0 or max_value > 255.1:
print('Warning: Outside image bounds, min: %f, max:%f, clipping.', min_value, max_value)
np.clip(np_image, 0.0, 255.0)
if np_image.shape[2] == 1:
np_image = np.concatenate((np_image, np_image, np_image), axis=2)
if np_image.shape[2] == 3:
image = Image.fromarray(np_image.astype(np.uint8))
elif np_image.shape[2] == 4:
image = Image.fromarray(np_image.astype(np.uint8), 'RGBA')
_, ext = os.path.splitext(path)
ext = ext[1:]
if ext.lower() == 'png':
image.save(path, format='PNG')
elif ext.lower() in ('jpg', 'jpeg'):
image.save(path, format='JPEG')
else:
raise IOError('Unrecognized format for %s' % path)
# only for training
class SpacesDataset(Dataset):
def __init__(self, args, mode, **kwargs):
self.folder_path = os.path.join(args.rootdir, 'data/spaces_dataset/data/800/')
self.num_source_views = args.num_source_views
self.mode = mode
assert mode in ['train', 'test', 'validation']
eval_scene_ids = [0, 9, 10, 23, 24, 52, 56, 62, 63, 73]
train_scene_ids = [i for i in np.arange(0, 100) if i not in eval_scene_ids]
if mode == 'train':
self.scene_dirs = [os.path.join(self.folder_path, 'scene_{:03d}'.format(i)) for i in train_scene_ids]
else:
self.scene_dirs = [os.path.join(self.folder_path, 'scene_{:03d}'.format(i)) for i in eval_scene_ids]
self.all_views_scenes = []
for scene_dir in self.scene_dirs:
views = ReadScene(scene_dir)
self.all_views_scenes.append(views)
self.input_view_types = ["small_quad", "medium_quad", "large_quad", "dense"]
self.eval_view_indices_dict = {
"small_quad": [5, 6, 7],
"medium_quad": [2, 4, 5, 6, 7, 11],
"large_quad": [1, 2, 4, 5, 6, 7, 8, 10, 11],
"dense": [5, 7, 10, 11]
}
self.input_indices_dict = {
"small_quad": [1, 2, 10, 11],
"medium_quad": [1, 3, 10, 12],
"large_quad": [0, 3, 9, 12],
"dense": [0, 1, 2, 3, 4, 6, 8, 9, 12, 13, 14, 15]
}
def __len__(self):
return len(self.all_views_scenes)
def __getitem__(self, idx):
all_views = self.all_views_scenes[idx]
num_rigs = len(all_views)
selected_rig_id = np.random.randint(low=0, high=num_rigs) # select a rig position
rig_selected = all_views[selected_rig_id]
type = np.random.choice(self.input_view_types) # select an input type
input_ids = self.input_indices_dict[type]
if len(input_ids) > self.num_source_views:
input_ids = np.random.choice(input_ids, self.num_source_views, replace=False)
ref_cameras = []
ref_rgbs = []
w_max, h_max = 0, 0
for id in input_ids:
ref_camera, ref_rgb = view_obj2camera_rgb(rig_selected[id])
ref_rgbs.append(ref_rgb)
ref_cameras.append(ref_camera)
h, w = ref_rgb.shape[:2]
w_max = max(w, w_max)
h_max = max(h, h_max)
ref_rgbs_np = np.zeros((len(ref_rgbs), h_max, w_max, 3), dtype=np.float32)
for i, ref_rgb in enumerate(ref_rgbs):
orig_h, orig_w = ref_rgb.shape[:2]
h_start = int((h_max - orig_h) / 2.)
w_start = int((w_max - orig_w) / 2.)
ref_rgbs_np[i, h_start:h_start+orig_h, w_start:w_start+orig_w] = ref_rgb
ref_cameras[i][4] += (w_max - orig_w) / 2.
ref_cameras[i][8] += (h_max - orig_h) / 2.
ref_cameras[i][0] = h_max
ref_cameras[i][1] = w_max
# select target view
if self.mode != 'train':
target_id = np.random.choice(self.eval_view_indices_dict[type])
target_view = rig_selected[target_id]
target_camera, target_rgb = view_obj2camera_rgb(target_view)
else:
target_view = sample_target_view_for_training(all_views, selected_rig_id, input_ids)
target_camera, target_rgb = view_obj2camera_rgb(target_view)
ref_cameras = np.array(ref_cameras)
if np.random.choice([0, 1], p=[0.5, 0.5]) and self.mode == 'train':
target_rgb, target_camera, ref_rgbs_np, ref_cameras = random_flip(target_rgb, target_camera,
ref_rgbs_np, ref_cameras)
near_depth = 1.
far_depth = 100.
depth_range = torch.tensor([near_depth, far_depth])
return {'rgb': torch.from_numpy(target_rgb).float(),
'camera': torch.from_numpy(target_camera).float(),
'rgb_path': target_view.image_path,
'src_rgbs': torch.from_numpy(ref_rgbs_np).float(),
'src_cameras': torch.from_numpy(np.stack(ref_cameras, axis=0)).float(),
'depth_range': depth_range
}
class SpacesFreeDataset(Dataset):
def __init__(self, args, mode, **kwargs):
self.folder_path = os.path.join(args.rootdir, 'data/spaces_dataset/data/800/')
self.mode = mode
self.num_source_views = args.num_source_views
self.random_crop = True
assert mode in ['train', 'test', 'validation']
# eval_scene_ids = [0, 9, 10, 23, 24, 52, 56, 62, 63, 73]
eval_scene_ids = []
# use all 100 scenes in spaces dataset for training
train_scene_ids = [i for i in np.arange(0, 100) if i not in eval_scene_ids]
if mode == 'train':
self.scene_dirs = [os.path.join(self.folder_path, 'scene_{:03d}'.format(i)) for i in train_scene_ids]
else:
self.scene_dirs = [os.path.join(self.folder_path, 'scene_{:03d}'.format(i)) for i in eval_scene_ids]
self.all_views_scenes = []
self.all_rgb_paths_scenes = []
self.all_intrinsics_scenes = []
self.all_img_sizes_scenes = []
self.all_c2w_scenes = []
for scene_dir in self.scene_dirs:
views = ReadScene(scene_dir)
self.all_views_scenes.append(views)
rgb_paths, img_sizes, intrinsicss, c2w_mats = get_all_views_in_scene_cam_path(views)
self.all_rgb_paths_scenes.append(rgb_paths)
self.all_img_sizes_scenes.append(img_sizes)
self.all_intrinsics_scenes.append(intrinsicss)
self.all_c2w_scenes.append(c2w_mats)
def __len__(self):
return len(self.all_views_scenes)
def __getitem__(self, idx):
all_views = self.all_views_scenes[idx]
num_rigs = len(all_views)
selected_rig_id = np.random.randint(low=0, high=num_rigs) # select a rig position
rig_selected = all_views[selected_rig_id]
cam_id_selected = np.random.choice(16)
cam_selected = rig_selected[cam_id_selected]
render_camera, render_rgb = view_obj2camera_rgb(cam_selected)
all_c2w_mats = self.all_c2w_scenes[idx]
all_rgb_paths = self.all_rgb_paths_scenes[idx]
all_intrinsics = self.all_intrinsics_scenes[idx]
all_img_sizes = self.all_img_sizes_scenes[idx]
sorted_ids = sort_nearby_views_by_angle(render_camera[-16:].reshape(4, 4), np.array(all_c2w_mats))
nearby_view_ids_selected = np.random.choice(sorted_ids[1:],
self.num_source_views, replace=False)
ref_cameras = []
ref_rgbs = []
w_max, h_max = 0, 0
for id in nearby_view_ids_selected:
rgb_path = all_rgb_paths[id]
ref_rgb = imageio.imread(rgb_path).astype(np.float32) / 255.
h_in_view, w_in_view = all_img_sizes[id]
h_img, w_img = ref_rgb.shape[:2]
ref_rgbs.append(ref_rgb)
ref_intrinsics = all_intrinsics[id]
if h_in_view != h_img or w_in_view != w_img:
ref_intrinsics[0] *= w_img / w_in_view
ref_intrinsics[1] *= h_img / h_in_view
ref_c2w = all_c2w_mats[id]
ref_camera = np.concatenate([list(ref_rgb.shape[:2]), ref_intrinsics.flatten(), ref_c2w.flatten()])
ref_cameras.append(ref_camera)
h, w = ref_rgb.shape[:2]
w_max = max(w, w_max)
h_max = max(h, h_max)
ref_rgbs_np = np.ones((len(ref_rgbs), h_max, w_max, 3), dtype=np.float32)
for i, ref_rgb in enumerate(ref_rgbs):
orig_h, orig_w = ref_rgb.shape[:2]
h_start = int((h_max - orig_h) / 2.)
w_start = int((w_max - orig_w) / 2.)
ref_rgbs_np[i, h_start:h_start+orig_h, w_start:w_start+orig_w] = ref_rgb
ref_cameras[i][4] += (w_max - orig_w) / 2.
ref_cameras[i][8] += (h_max - orig_h) / 2.
ref_cameras[i][0] = h_max
ref_cameras[i][1] = w_max
ref_cameras = np.array(ref_cameras)
if self.mode == 'train' and self.random_crop:
render_rgb, render_camera, ref_rgbs_np, ref_cameras = random_crop(render_rgb, render_camera,
ref_rgbs_np, ref_cameras)
if self.mode == 'train' and np.random.choice([0, 1]):
render_rgb, render_camera, ref_rgbs_np, ref_cameras = random_flip(render_rgb, render_camera,
ref_rgbs_np, ref_cameras)
near_depth = 0.7
far_depth = 100
depth_range = torch.tensor([near_depth, far_depth])
return {'rgb': torch.from_numpy(render_rgb).float(),
'camera': torch.from_numpy(render_camera).float(),
'rgb_path': cam_selected.image_path,
'src_rgbs': torch.from_numpy(ref_rgbs_np).float(),
'src_cameras': torch.from_numpy(np.stack(ref_cameras, axis=0)).float(),
'depth_range': depth_range
}
|
python/tests/test_packages.py
|
lambdaofgod/keepsake
| 810 |
97742
|
<reponame>lambdaofgod/keepsake
import datetime
from keepsake.packages import get_imported_packages
def test_get_imported_packages():
assert "keepsake" in get_imported_packages()
|
InvenTree/stock/migrations/0013_auto_20190908_0916.py
|
ArakniD/InvenTree
| 656 |
97753
|
# Generated by Django 2.2.5 on 2019-09-08 09:16
from django.db import migrations
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('stock', '0012_auto_20190908_0405'),
]
operations = [
migrations.AlterField(
model_name='stockitem',
name='location',
field=mptt.fields.TreeForeignKey(blank=True, help_text='Where is this stock item located?', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='stock_items', to='stock.StockLocation'),
),
]
|
tests/commands/test_update.py
|
kapb14/hatch
| 2,549 |
97765
|
<reponame>kapb14/hatch<filename>tests/commands/test_update.py
import os
from click.testing import CliRunner
from hatch.cli import hatch
from hatch.config import get_venv_dir
from hatch.env import (
get_installed_packages, get_python_implementation, install_packages
)
from hatch.utils import env_vars, remove_path, temp_chdir
from hatch.venv import create_venv, get_new_venv_name, is_venv, venv
from ..utils import get_version_as_bytes
from ..utils import requires_internet, wait_for_os, wait_until
@requires_internet
def test_project_no_venv():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['init', 'ok', '-ne'])
venv_dir = os.path.join(d, 'venv')
assert not os.path.exists(venv_dir)
with env_vars({'_IGNORE_VENV_': '1'}):
result = runner.invoke(hatch, ['update', 'six'])
wait_until(is_venv, venv_dir)
assert os.path.exists(venv_dir)
with venv(venv_dir):
assert 'ok' in get_installed_packages()
assert result.exit_code == 0
assert 'A project has been detected!' in result.output
assert 'Creating a dedicated virtual env... complete!' in result.output
assert 'Installing this project in the virtual env... complete!' in result.output
assert 'Updating for this project...' in result.output
@requires_internet
def test_project_existing_venv():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['init', 'ok'])
venv_dir = os.path.join(d, 'venv')
wait_until(is_venv, venv_dir)
assert os.path.exists(venv_dir)
with env_vars({'_IGNORE_VENV_': '1'}):
runner.invoke(hatch, ['install', 'six==1.9.0'])
wait_for_os()
with venv(venv_dir):
assert 'ok' in get_installed_packages()
initial_version = get_version_as_bytes('six')
with env_vars({'_IGNORE_VENV_': '1'}):
result = runner.invoke(hatch, ['update', 'six'])
wait_for_os()
with venv(venv_dir):
final_version = get_version_as_bytes('six')
assert result.exit_code == 0
assert initial_version < final_version
assert 'A project has been detected!' not in result.output
assert 'Updating for this project...' in result.output
@requires_internet
def test_project_not_detected_when_venv_active():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['init', 'ok', '-ne'])
runner.invoke(hatch, ['new', 'ko'])
venv_dir = os.path.join(d, 'ko', 'venv')
wait_until(is_venv, venv_dir)
assert os.path.exists(venv_dir)
with venv(venv_dir):
runner.invoke(hatch, ['install', 'six==1.9.0'])
wait_for_os()
initial_version = get_version_as_bytes('six')
result = runner.invoke(hatch, ['update', 'six'])
wait_for_os()
final_version = get_version_as_bytes('six')
assert result.exit_code == 0
assert initial_version < final_version
assert 'A project has been detected!' not in result.output
assert 'Updating...' in result.output
@requires_internet
def test_project_existing_venv_all_packages():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['init', 'ok'])
venv_dir = os.path.join(d, 'venv')
wait_until(is_venv, venv_dir)
assert os.path.exists(venv_dir)
with env_vars({'_IGNORE_VENV_': '1'}):
runner.invoke(hatch, ['install', 'six==1.9.0'])
wait_for_os()
assert os.path.exists(venv_dir)
with venv(venv_dir):
assert 'ok' in get_installed_packages()
initial_version = get_version_as_bytes('six')
with env_vars({'_IGNORE_VENV_': '1'}):
result = runner.invoke(hatch, ['update', '--all'])
wait_for_os()
with venv(venv_dir):
final_version = get_version_as_bytes('six')
assert result.exit_code == 0
assert initial_version < final_version
assert 'A project has been detected!' not in result.output
assert 'Updating for this project...' in result.output
@requires_internet
def test_requirements():
with temp_chdir() as d:
with open(os.path.join(d, 'requirements.txt'), 'w') as f:
f.write('requests==2.18.1\n')
venv_dir = os.path.join(d, 'venv')
create_venv(venv_dir)
with venv(venv_dir):
install_packages(['requests==2.17.3'])
initial_version = get_version_as_bytes('requests')
runner = CliRunner()
result = runner.invoke(hatch, ['update', '-nd'])
final_version = get_version_as_bytes('requests')
assert result.exit_code == 0
assert initial_version < final_version
@requires_internet
def test_dev_requirements():
with temp_chdir() as d:
with open(os.path.join(d, 'dev-requirements.txt'), 'w') as f:
f.write('requests==2.18.1\n')
venv_dir = os.path.join(d, 'venv')
create_venv(venv_dir)
with venv(venv_dir):
install_packages(['requests==2.17.3'])
initial_version = get_version_as_bytes('requests')
runner = CliRunner()
result = runner.invoke(hatch, ['update', '-nd'])
final_version = get_version_as_bytes('requests')
assert result.exit_code == 0
assert initial_version < final_version
@requires_internet
def test_requirements_dev():
with temp_chdir() as d:
with open(os.path.join(d, 'requirements-dev.txt'), 'w') as f:
f.write('requests==2.18.1\n')
venv_dir = os.path.join(d, 'venv')
create_venv(venv_dir)
with venv(venv_dir):
install_packages(['requests==2.17.3'])
initial_version = get_version_as_bytes('requests')
runner = CliRunner()
result = runner.invoke(hatch, ['update', '-nd'])
final_version = get_version_as_bytes('requests')
assert result.exit_code == 0
assert initial_version < final_version
@requires_internet
def test_requirements_includes_hatch():
with temp_chdir() as d:
runner = CliRunner()
with open(os.path.join(d, 'requirements.txt'), 'w') as f:
f.write('requests==2.18.1\nhatch>=0.0.1\n')
venv_dir = os.path.join(d, 'venv')
create_venv(venv_dir)
with venv(venv_dir):
install_packages(['requests==2.17.3'])
initial_version = get_version_as_bytes('requests')
result = runner.invoke(hatch, ['update', '-nd'])
final_version = get_version_as_bytes('requests')
installed_packages = get_installed_packages()
assert result.exit_code == 0
assert initial_version < final_version
assert 'hatch' not in installed_packages
def test_requirements_none():
with temp_chdir() as d:
venv_dir = os.path.join(d, 'venv')
create_venv(venv_dir)
with venv(venv_dir):
runner = CliRunner()
result = runner.invoke(hatch, ['update', '-nd'])
assert result.exit_code == 1
assert 'Unable to locate a requirements file.' in result.output
@requires_internet
def test_packages():
with temp_chdir() as d:
runner = CliRunner()
venv_dir = os.path.join(d, 'venv')
create_venv(venv_dir)
with venv(venv_dir):
install_packages(['requests==2.17.3', 'six==1.9.0'])
initial_version_requests = get_version_as_bytes('requests')
initial_version_six = get_version_as_bytes('six')
result = runner.invoke(hatch, ['update', '-nd', 'six'])
final_version_requests = get_version_as_bytes('requests')
final_version_six = get_version_as_bytes('six')
assert result.exit_code == 0
assert initial_version_requests == final_version_requests
assert initial_version_six < final_version_six
def test_packages_only_hatch():
with temp_chdir():
runner = CliRunner()
result = runner.invoke(hatch, ['update', '-nd', 'hatch'])
assert result.exit_code == 1
assert 'No packages to install.' in result.output
@requires_internet
def test_all_packages():
with temp_chdir() as d:
venv_dir = os.path.join(d, 'venv')
create_venv(venv_dir)
with venv(venv_dir):
install_packages(['requests==2.17.3'])
initial_version = get_version_as_bytes('requests')
runner = CliRunner()
result = runner.invoke(hatch, ['update', '-nd', '--all'])
final_version = get_version_as_bytes('requests')
assert result.exit_code == 0
assert initial_version < final_version
def test_all_packages_none():
with temp_chdir() as d:
venv_dir = os.path.join(d, 'venv')
create_venv(venv_dir)
with venv(venv_dir):
runner = CliRunner()
result = runner.invoke(hatch, ['update', '-nd', '--all'])
if get_python_implementation() in {'PyPy'}: # no cov
assert result.exit_code == 0
else:
assert result.exit_code == 1
assert 'No packages installed.' in result.output
def test_env_not_exist():
with temp_chdir():
runner = CliRunner()
env_name = get_new_venv_name()
result = runner.invoke(hatch, ['update', '-e', env_name])
assert result.exit_code == 1
assert 'Virtual env named `{}` does not exist.'.format(env_name) in result.output
@requires_internet
def test_env():
with temp_chdir():
runner = CliRunner()
env_name = get_new_venv_name()
venv_dir = os.path.join(get_venv_dir(), env_name)
create_venv(venv_dir)
try:
with venv(venv_dir):
install_packages(['requests==2.17.3'])
initial_version = get_version_as_bytes('requests')
result = runner.invoke(hatch, ['update', '-e', env_name, '--all'])
with venv(venv_dir):
final_version = get_version_as_bytes('requests')
finally:
remove_path(venv_dir)
assert result.exit_code == 0
assert initial_version < final_version
@requires_internet
def test_infra():
with temp_chdir() as d:
runner = CliRunner()
venv_dir = os.path.join(d, 'venv')
create_venv(venv_dir)
with venv(venv_dir):
install_packages(['setuptools==36.0.1'])
initial_version = get_version_as_bytes('setuptools')
result = runner.invoke(hatch, ['update', '-nd', '--infra'])
final_version = get_version_as_bytes('setuptools')
assert result.exit_code == 0
assert initial_version < final_version
@requires_internet
def test_infra_env():
with temp_chdir():
runner = CliRunner()
env_name = get_new_venv_name()
venv_dir = os.path.join(get_venv_dir(), env_name)
create_venv(venv_dir)
try:
with venv(venv_dir):
install_packages(['setuptools==36.0.1'])
initial_version = get_version_as_bytes('setuptools')
result = runner.invoke(hatch, ['update', '-e', env_name, '--infra'])
with venv(venv_dir):
final_version = get_version_as_bytes('setuptools')
finally:
remove_path(venv_dir)
assert result.exit_code == 0
assert initial_version < final_version
|
spotpy/examples/getting_started.py
|
cheginit/spotpy
| 182 |
97788
|
<filename>spotpy/examples/getting_started.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
'''
Copyright 2015 by <NAME>
This file is part of Statistical Parameter Estimation Tool (SPOTPY).
:author: <NAME>
This class holds the example code from the getting_started web-documention.
'''
from __future__ import print_function, division, absolute_import, unicode_literals
# Getting started
#To start your experience with SPOT you need to have SPOT installed. Please see the [Installation chapter](index.md) for further details.
#To use SPOT we have to import it and use one of the pre-build examples:
import spotpy # Load the SPOT package into your working storage
from spotpy.examples.spot_setup_rosenbrock import spot_setup # Import the two dimensional Rosenbrock example
#The example comes along with parameter boundaries, the Rosenbrock function, the optimal value of the function and RMSE as a likelihood.
#So we can directly start to analyse the Rosenbrock function with one of the algorithms. We start with a simple Monte Carlo sampling:
if __name__ == '__main__':
# Give Monte Carlo algorithm the example setup and saves results in a RosenMC.csv file
#spot_setup.slow = True
sampler = spotpy.algorithms.mc(spot_setup(), dbname='RosenMC', dbformat='ram')
#Now we can sample with the implemented Monte Carlo algortihm:
sampler.sample(10000) # Sample 100.000 parameter combinations
results=sampler.getdata()
#Now we want to have a look at the results. First we want to know, what the algorithm has done during the 10.000 iterations:
#spot.analyser.plot_parametertrace(results) # Use the analyser to show the parameter trace
spotpy.analyser.plot_parameterInteraction(results)
posterior=spotpy.analyser.get_posterior(results)
spotpy.analyser.plot_parameterInteraction(posterior)
#spotpy.analyser.plot_posterior_parametertrace(results, threshold=0.9)
print(spotpy.analyser.get_best_parameterset(results))
|
tests/r/test_utilities2.py
|
hajime9652/observations
| 199 |
97811
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.utilities2 import utilities2
def test_utilities2():
"""Test module utilities2.py by downloading
utilities2.csv and testing shape of
extracted data has 117 rows and 19 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = utilities2(test_path)
try:
assert x_train.shape == (117, 19)
except:
shutil.rmtree(test_path)
raise()
|
tests/database.py
|
roor0/dispatch
| 3,417 |
97820
|
from sqlalchemy.orm import scoped_session, sessionmaker
Session = scoped_session(sessionmaker())
|
test_cases/geo_quiz.py
|
edupylearn/simplegui
| 413 |
97824
|
<reponame>edupylearn/simplegui
__author__ = '<NAME>'
# Ref:
# http://stackoverflow.com/questions/29767777/gui-quiz-using-easygui-and-pygame-issue-with-quieting-the-game-and-playing-sound
#We start by importing a few libraries.
#Easygui provides our GUI for the game.
import sys
sys.path.append('..') ;# This is only needed in Robert Lugg's development environment
from easygui import *
#Time is a library that introduces the concept of time to the game.
import time
#Pygame is a series of multimedia tools that can create games using Python.
import pygame
#To start pygame we need to initialise it.
pygame.init()
#To use the audio facilities of Pygame we need to tell Pygame that we wish to use them.
pygame.mixer.init()
#Now we create three functions, these functions contain the code to play each audio track.
#The audio for each of these functions should be in the same folder as this code.
def intro():
# pygame.mixer.init(frequency=22050, size=-16, channels=2, buffer=4096)
# pygame.mixer.music.load("audio/intro.ogg")
# pygame.mixer.music.play()
intro = pygame.mixer.Sound('audio/intro.ogg')
intro.play(1)
def win():
win = pygame.mixer.Sound('audio/correct.mp3')
win.play(1)
def lose():
lose = pygame.mixer.Sound('audio/wrong.mp3')
lose.play(1)
#To keep our score, we create a variable called score and set it to zero.
score = 0
#The next variable contains the location of the KS2 geography project logo.
logo = "./images/globe.jpg"
#This is a list, sometimes called an array. In here I store two items.
play = ["Yes","No"]
#I start the game by calling the intro() function, and this plays the quiz theme.
intro()
#Here we create a variable called game_start and it will store the answer to the question "Would you like to play the quiz?"
#To capture the answer I use the buttonbox function from easygui. This function has many options, for this I use.
#title = The text at the top of the dialog box.
#image = logo, the variable I earlier created.
#msg = This is the question that I ask the player.
#choices = play. I use this to reference the earlier created list and use the values contained as the choices for the player.
start_title = "Welcome to KS2 Geography Game Quiz"
start_msg = "Would you like to play the Quiz?"
game_start = buttonbox(title=start_title,image=logo,msg=start_msg,choices=play)
#For debugging purposes I have the answer given by the player printed to the Python shell.
print(game_start)#Here we see some conditional logic that tests to see if the answer was "Yes" If the answer is not equal to No, it proceeds.
if game_start != "No":
#Here is another easygui dialog box, a message box. It has the same syntax as the previous box we created.
#You can see str(score) in the line below. In order to join a string of text, our message, with the value
#of the score we need to wrap the score, which is an integer, in a helper function that converts integers
#and floats into strings
msgbox(title="Let us begin",msg="Your score is "+str(score))
count = 0
#Question 1
for i in range(0,4):
msg = "Where is capital of the Netherlands?"
hint1 = "It's not Tehran"
hint2 = "It's not London"
title = "Question 1"
q1choices = ["Tehran","London","Amsterdam","Abu Dhabi"]
if count==0:
q1 = choicebox(msg,title,q1choices)
elif count ==1:
msg += hint1
q1 = choicebox(msg,title,q1choices)
else:
msg += hint2
q1 = choicebox(msg,title,q1choices)
if q1 is None:
print("ok, end of game")
exit()
if q1 == "Amsterdam":
win()
if count == 0:
score += 1
elif count ==1:
score +=0.8
else:
score +=0.6
correct = ("Well done you got it right. Your score is "+str(score))
image = "./images/tick.gif"
msgbox(title="CORRECT",image=image,msg=correct)
count = 0
break
else:
lose()
wrong = "I'm sorry that's the wrong answer"
image = "./images/cross.gif"
msgbox(title="Wrong Answer",image=image,msg=wrong)
count +=1
#Question 2
for i in range(0,4):
msg = "Which Continent is Britian part of?"
hint1 = " You should know this one!"
hint2 = " It is the smallest of them all..."
title = "Question 2"
q2choices = ["Europe","America", "Asia","Africa"]
if count == 0:
q2 = choicebox(msg,title,q2choices)
elif count ==1:
msg += hint1
q2 = choicebox(msg,title,q2choices)
else:
msg += hint2
q2 = choicebox(msg,title,q2choices)
if q2 == "Europe":
win()
if count ==0:
score += 1
elif count ==1:
score += 0.8
else:
score += 0.6
correct = ("Well done you got it right. Your score is "+str(score))
image = "./images/tick.gif"
msgbox(title="CORRECT",image=image,msg=correct)
count =0
break
else:
lose()
wrong = "I'm sorry that's the wrong answer"
image = "./images/cross.gif"
msgbox(title="Wrong Answer",image=image,msg=wrong)
count += 1
#Question 3
for i in range(0,4):
msg = "Which of these countries are not in European Union?"
hint1 = " located next to Greece!"
hint2 = " Capital city of this country called Tirana!"
title = "Question 3"
q3choices = ["Latvia","Albania","Estonia","France"]
if count == 0:
q3 = choicebox(msg,title,q3choices)
elif count ==1:
msg += hint1
q3 = choicebox(msg,title,q3choices)
else:
msg += hint2
q3 = choicebox(msg,title,q3choices)
if q3 == "Albania":
win()
if count ==0:
score += 1
elif count ==1:
score += 0.8
else:
score += 0.6
correct = ("Well done you got Albania! hard wasnt it? Your score is "+str(score))
image = "./images/tick.gif"
msgbox(title="CORRECT",image=image,msg=correct)
count = 0
break
else:
lose()
wrong = "I'm sorry that's the wrong answer only 3rd Question!"
image = "./images/cross.gif"
msgbox(title="Wrong Answer",image=image,msg=wrong)
count += 1
#Question 4
for i in range(0,4):
msg = "How many continents are in the world?"
hint1 = " count all of them! "
hint2 = " Really? "
title = "Question 4"
q4choices = ["7","3","5","4"]
if count == 0:
q4 = choicebox(msg,title,q4choices)
elif count ==1:
msg += hint1
q4 = choicebox(msg,title,q4choices)
else:
msg += hint2
q4 = choicebox(msg,title,q4choices)
if q4 == "7":
win()
if count ==0:
score +=1
elif count ==1:
score += 0.8
else:
score += 0.6
correct = ("Was easy right? Your score is "+str(score))
image = "./images/tick.gif"
msgbox(title="CORRECT",image=image,msg=correct)
count =0
break
else:
lose()
wrong = "nice try! Think again and dont forget to add them all up..."
image = "./images/cross.gif"
msgbox(title="Wrong Answer",image=image,msg=wrong)
count+=1
#Question 5
for i in range(0,4):
msg = "Where is the largest country in Europe?"
hint1 = " It is outside EU!"
hint2 = " It is also the Largest country in the world!"
title = "Question 5"
q5choices = ["France","Germany","Russia","UK"]
if count ==0:
q5 = choicebox(msg,title,q5choices)
elif count ==1:
msg+=hint1
q5 = choicebox(msg,title,q5choices)
else:
msg+=hint2
q5 = choicebox(msg,title,q5choices)
if q5 == "Russia":
win()
if count==0:
score += 1
elif count ==1:
score+=0.8
else:
score+=0.6
correct = ("Well done you got it right. Your score is "+str(score))
image = "./images/tick.gif"
msgbox(title="CORRECT",image=image,msg=correct)
count=0
break
else:
lose()
wrong = "I'm sorry that's the wrong answer"
image = "./images/cross.gif"
msgbox(title="Wrong Answer",image=image,msg=wrong)
count+=1
#Question 6
for i in range(0,4):
msg = "What is a book of maps called?"
hint1 = " I Think you pressed the wrong choice by mistake!"
hint2 = " Really?"
title = "Question 6"
q6choices = ["Dictionary","Book","Atlas","Atlantic"]
if count ==0:
q6 = choicebox(msg,title,q6choices)
elif count ==1:
msg+=hint1
q6 = choicebox(msg,title,q6choices)
else:
msg+=hint2
q6 = choicebox(msg,title,q6choices)
if q6 == "Atlas":
win()
if count ==0:
score += 1
elif count ==1:
score += 0.8
else:
score += 0.6
correct = ("Din not need to think about it right? Your score is "+str(score))
image = "./images/tick.gif"
msgbox(title="CORRECT",image=image,msg=correct)
count=0
break
else:
lose()
wrong = "I'm sorry that's the wrong answer! but keep thinking"
image = "./images/cross.gif"
msgbox(title="Wrong Answer",image=image,msg=wrong)
count+=1
#Question 7
for i in range(0,4):
msg = "Which is the largest desert in the world?"
hint1 = " The area of this desert is 9 400 000 SQ KM"
hint2 = " it is located in Africa"
title = "Question 7"
q7choices = ["Malavi","Sahara","Gobi","Arabia"]
if count == 0:
q7 = choicebox(msg,title,q7choices)
elif count ==1:
msg+= hint1
q7 = choicebox(msg,title,q7choices)
else:
msg+=hint2
q7 = choicebox(msg,title,q7choices)
if q7 == "Sahara":
win()
if count ==0:
score += 1
elif count ==1:
score += 0.8
else:
score += 0.6
correct = ("GOOD job mate! hard ones are comimg... Your score is "+str(score))
image = "./images/tick.gif"
msgbox(title="CORRECT",image=image,msg=correct)
count=0
break
else:
lose()
wrong = "I'm sorry that's the wrong answer"
image = "./images/cross.gif"
msgbox(title="Wrong Answer",image=image,msg=wrong)
count+=1
#Question 8
for i in range(0,4):
msg = "Which is the highest mountain in Britain?"
hint1 = " i did not know it myslef so cant help :)"
hint2 = " It is located in Scotland somewhere!"
title = "Question 8"
q8choices = ["Everest","<NAME>","<NAME>","<NAME>"]
if count==0:
q8 = choicebox(msg,title,q8choices)
elif count ==1:
msg+=hint1
q8 = choicebox(msg,title,q8choices)
else:
msg+=hint2
q8 = choicebox(msg,title,q8choices)
if q8 == "<NAME>":
win()
if count ==0:
score += 1
elif count ==1:
score += 0.8
else:
score += 0.6
correct = ("Well done you got it right. Your score is "+str(score))
image = "./images/tick.gif"
msgbox(title="CORRECT",image=image,msg=correct)
count=0
break
else:
lose()
wrong = "I'm sorry that's the wrong answer"
image = "./images/cross.gif"
msgbox(title="Wrong Answer",image=image,msg=wrong)
count += 1
#Question 9
for i in range(0,4):
msg = "When do you see rainbow?"
hint1 = " water must be available in air to form a rainbow!"
hint2 = " vright light in air plus water will cause this beautiful phenonema!"
title = "Question 9"
q9choices = ["When Rainy & Sunny","When Windy & Sunny","When Cloudy & Rainy","When Foggy & Rainy"]
if count ==0:
q9 = choicebox(msg,title,q9choices)
elif count ==1:
msg+=hint1
q9 = choicebox(msg,title,q9choices)
else:
msg+=hint2
q9 = choicebox(msg,title,q9choices)
if q9 == "When Rainy & Sunny":
win()
if count ==0:
score += 1
elif count ==1:
score += 0.8
else:
score += 0.6
correct = ("Well done you got it right again... Your score is "+str(score))
image = "./images/tick.gif"
msgbox(title="CORRECT",image=image,msg=correct)
count =0
break
else:
lose()
wrong = "I'm sorry that's the wrong answer"
image = "./images/cross.gif"
msgbox(title="Wrong Answer",image=image,msg=wrong)
count+=1
#Question 10
for i in range(0,4):
msg = "Which is not a precipitation?"
hint1 = " Google it!"
hint2 = " it doesnt come from sky!"
title = "Question 10"
q10choices = ["Rain","Snow","Hail","Frost"]
if count ==0:
q10 = choicebox(msg,title,q10choices)
elif count ==1:
msg+=hint1
else:
msg+=hint2
q10 = choicebox(msg,title,q10choices)
if q10 == "Frost":
win()
if count ==0:
score += 1
elif count ==1:
score += 0.8
else:
score += 0.6
score += 1
correct = ("Well done you got it right. Your score is "+str(score))
image = "./images/tick.gif"
msgbox(title="CORRECT",image=image,msg=correct)
count =0
break
else:
lose()
wrong = "I'm sorry that's the wrong answer your score is lowering"
image = "./images/cross.gif"
msgbox(title="Wrong Answer",image=image,msg=wrong)
count+=1
gameover_good = "./images/well_done.gif"
gameover_bad = "./images/trymore.jpg"
intro()
game_over_title = "KS2 Geography Quiz"
msg_bad = ("Oh dear you scored "+str(score))
msg_good = ("Well done you scored "+str(score))
if score < 5:
game_over = msgbox(title = game_over_title,image = gameover_bad,msg = msg_bad)
else:
game_over = msgbox(title = game_over_title,image = gameover_good,msg = msg_good)
|
Python/palindrome.py
|
PushpneetSingh/Hello-world
| 1,428 |
97834
|
//to check if string is a palindrome or not
string=raw_input("Enter string:")
if(string==string[::-1]):
print("The string is a palindrome")
else:
print("The string isn't a palindrome")
|
src/tf_transformers/models/vit/vit.py
|
legacyai/tf-transformers
| 116 |
97903
|
# coding=utf-8
# Copyright 2021 TF-Transformers Authors.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TF 2.0 ViT Model"""
from typing import Dict, Union
import tensorflow as tf
from absl import logging
from tf_transformers.activations import get_activation
from tf_transformers.core import LegacyLayer, LegacyModel
from tf_transformers.layers import PatchEmbeddings, PositionEmbeddingImage
from tf_transformers.layers.transformer import TransformerVIT
from tf_transformers.utils.docstring_file_utils import add_start_docstrings
from tf_transformers.utils.docstring_utils import (
CALL_ENCODER_DOCSTRING_IMAGE,
ENCODER_CLASS_DOCSTRING,
MAIN_CALL_DOCSTRING,
)
logging.set_verbosity("INFO")
@add_start_docstrings(
"ViT Model :",
ENCODER_CLASS_DOCSTRING.format("tf_transformers.models.vit.ViTConfig"),
)
class ViTEncoder(LegacyLayer):
def __init__(
self,
config: Dict,
mask_mode: str = "user_defined",
name: str = "vit",
use_dropout: bool = False,
is_training: bool = False,
use_auto_regressive: bool = False,
use_decoder: bool = False,
batch_size: bool = None,
sequence_length: bool = None,
classification_labels: int = None,
return_all_layer_outputs: bool = False,
**kwargs,
):
# IMPORTANT: Because saved_model causes some serialization problems here
# self.config = config
# Default initializer
_stddev = config["initializer_range"]
self._initializer = tf.keras.initializers.TruncatedNormal(stddev=_stddev)
self._initializer = tf.keras.initializers.get(self._initializer)
self._activation = get_activation(config["hidden_act"])
self._intermediate_activation = get_activation(config["intermediate_act"])
self._mask_mode = mask_mode
self._model_name = "tf_transformers/" + name
self._use_dropout = use_dropout
self._is_training = is_training
self._use_auto_regressive = use_auto_regressive
self._use_decoder = use_decoder
self._batch_size = batch_size
self._sequence_length = sequence_length
self._return_all_layer_outputs = return_all_layer_outputs
self._classification_labels = classification_labels
self._patch_size = config['patch_size']
self._image_size = config['image_size']
self._num_channels = config['num_channels']
one_side_patch = config['image_size'] // config['patch_size']
self._num_patches = (one_side_patch * one_side_patch) + 1 # 1 for CLS token
# self._self_setattr_tracking = False
super(ViTEncoder, self).__init__(
is_training=self._is_training, use_dropout=self._use_dropout, name=self._model_name, **kwargs
)
# Configuration
self._config_dict = {
"initializer": tf.keras.initializers.serialize(self._initializer),
"activation": tf.keras.activations.serialize(self._activation),
"mask_mode": self._mask_mode,
"name": self._model_name,
"is_training": self._is_training,
"use_auto_regressive": self._use_auto_regressive,
"use_decoder": self._use_decoder,
"use_dropout": self._use_dropout,
"batch_size": self._batch_size,
"sequence_length": self._sequence_length,
"return_all_layer_outputs": self._return_all_layer_outputs,
"num_patches": self._num_patches,
"patch_size": self._patch_size,
"image_size": self._image_size,
"num_channels": self._num_channels,
}
# Update config dict with passed config
self._config_dict.update(config)
self._cls_token = tf.Variable(
tf.zeros((1, 1, config['embedding_size'])), name='{}/cls_token'.format(self._model_name)
)
self._embedding_layer = PatchEmbeddings(
config['image_size'], config['patch_size'], config['num_channels'], config['embedding_size']
)
self._positional_embedding_layer = PositionEmbeddingImage(self._num_patches, config['embedding_size'])
# Embedding Norm
self._last_layer_norm = tf.keras.layers.LayerNormalization(
name="last_layer_norm", axis=-1, epsilon=config["layer_norm_epsilon"], dtype=tf.float32
)
# Embedding dropout Layer
self._embedding_dropout = tf.keras.layers.Dropout(rate=config["hidden_dropout_prob"])
# Transformer Layer
self._transformer_layers = []
for i in range(config["num_hidden_layers"]):
layer = TransformerVIT(
hidden_size=config["embedding_size"],
num_attention_heads=config["num_attention_heads"],
attention_head_size=config["attention_head_size"],
intermediate_size=config["intermediate_size"],
intermediate_activation=self._intermediate_activation,
dropout_rate=config["hidden_dropout_prob"],
attention_dropout_rate=config["attention_probs_dropout_prob"],
kernel_initializer=self._initializer,
is_training=self._is_training,
use_dropout=self._use_dropout,
use_decoder=self._use_decoder,
layer_norm_epsilon=config["layer_norm_epsilon"],
use_auto_regressive=self._use_auto_regressive,
name="transformer/layer_%d" % i,
)
self._transformer_layers.append(layer)
# Add Pre-trained Classifier layer (by default VIT traines on 1000 labels)
if self._classification_labels:
self._classifier_layer = tf.keras.layers.Dense(
units=self._classification_labels,
activation=None,
kernel_initializer=self._initializer,
name="classifier_layer",
)
# CLS layer
self._pooler_layer = tf.keras.layers.Dense(
units=config["embedding_size"],
activation="tanh",
kernel_initializer=self._initializer,
name="pooler_transform",
)
self.call_fn = self.get_call_method(self._config_dict)
# Initialize model
self.model_inputs, self.model_outputs = self.get_model(initialize_only=True)
def get_model(self, initialize_only=False):
"""Convert tf.keras.Layer to a tf.keras.Model/LegacyModel.
Args:
self: model (tf.keras.Layer) instance
initialize_only: If False, model (LegacyModel) wont be returned.
"""
input_pixels = tf.keras.layers.Input(
shape=(self._config_dict['image_size'], self._config_dict['image_size'], self._config_dict['num_channels']),
batch_size=self._batch_size,
dtype=tf.float32,
name="input_pixels",
)
inputs = {}
inputs["input_pixels"] = input_pixels # Default
layer_outputs = self(inputs)
if initialize_only:
return inputs, layer_outputs
# Adding model_config is a hack
model = LegacyModel(inputs=inputs, outputs=layer_outputs, name=self._model_name)
model.model_config = self._config_dict
return model
@add_start_docstrings(
"Forward pass of Vit :",
CALL_ENCODER_DOCSTRING_IMAGE,
)
def call_encoder(self, inputs: Dict[str, Union[tf.keras.layers.Input, tf.Tensor]]) -> Dict[str, tf.Tensor]:
# 1. Collect Patch Embeddings
input_ids = inputs["input_pixels"]
batch_size = tf.shape(input_ids)[0]
# b x one_side_patch x one_side_patch x embedding_size (b x 14 x 14 x 768)
embeddings = self._embedding_layer(input_ids)
# Reshape it to (b x (one_side_patch * one_side_patch) x embedding_size) (b x 196 x 768)
embeddings = tf.reshape(embeddings, (batch_size, -1, self._config_dict['embedding_size']))
# Add CLS token to the start (b x 197 x 768)
# Replicate cls_vector (batch times) tf.tile dont work for some reason
cls_token_tiled = tf.ones([batch_size, 1, 1]) * self._cls_token
embeddings = tf.concat([cls_token_tiled, embeddings], axis=1)
# Add word_embeddings + position_embeddings + type_embeddings
# if self._type_embeddings_layer:
# input_type_ids = inputs["input_type_ids"]
# type_embeddings = self._type_embeddings_layer(input_type_ids)
# embeddings = embeddings + type_embeddings
# Addition happens internally
if self._positional_embedding_layer:
embeddings = self._positional_embedding_layer(embeddings)
# 3. Attention Mask
attention_mask = tf.ones((batch_size, self._num_patches, self._num_patches))
# 4. Transformer Outputs
encoder_outputs = []
for i in range(self._config_dict["num_hidden_layers"]):
layer = self._transformer_layers[i]
embeddings, _, _ = layer([embeddings, attention_mask])
encoder_outputs.append(embeddings)
# batch_size x sequence_length x embedding_size
token_embeddings = self._last_layer_norm(encoder_outputs[-1])
# First word of last layer outputs [CLS]
cls_token_tensor = tf.keras.layers.Lambda(lambda x: tf.squeeze(x[:, 0:1, :], axis=1))(token_embeddings)
# batch_size x embedding_size
# cls_output = self._pooler_layer(cls_token_tensor)
result = {"token_embeddings": token_embeddings, "cls_output": cls_token_tensor}
if self._classification_labels:
classifier_predictions = self._classifier_layer(cls_token_tensor)
result['class_logits'] = classifier_predictions
if self._return_all_layer_outputs:
# all_cls_token_tensors = []
all_cls_output = []
all_layer_classifier_predictions = []
for per_layer_token_embeddings in encoder_outputs:
per_layer_token_embeddings = self._last_layer_norm(per_layer_token_embeddings)
per_cls_token_tensor = tf.keras.layers.Lambda(lambda x: tf.squeeze(x[:, 0:1, :], axis=1))(
per_layer_token_embeddings
)
# all_cls_token_tensors.append(per_cls_token_tensor)
# all_cls_output.append(self._pooler_layer(per_cls_token_tensor))
all_cls_output.append(per_cls_token_tensor)
if self._classification_labels:
classifier_predictions = self._classifier_layer(cls_token_tensor)
all_layer_classifier_predictions.append(classifier_predictions)
result["all_layer_token_embeddings"] = encoder_outputs
result["all_layer_cls_output"] = all_cls_output
# result["all_layer_cls_token_tensor"] = all_cls_token_tensors
if self._classification_labels:
result["all_layer_classifier_predictions"] = all_layer_classifier_predictions
return result
def call_encoder_auto_regressive(self, inputs):
raise NotImplementedError("ViT as of now not supports decoding")
def call_decoder(self, inputs):
raise NotImplementedError("ViT as of now not supports Decoder")
def call_decoder_auto_regressive(self, inputs):
raise NotImplementedError("ViT as of now not supports Seq2Seq decoding")
@add_start_docstrings(
"Bert Call method :",
MAIN_CALL_DOCSTRING,
)
def call(self, inputs: Dict[str, tf.Tensor]):
"""Call method"""
outputs = self.call_fn(inputs)
return outputs
def get_embedding_table(self):
return NotImplementedError
def get_config(self):
return self._config_dict
@property
def transformer_layers(self):
"""List of Transformer layers in the encoder."""
return self._transformer_layers
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
|
build-s2i-python-kopf/examples/openshift-template-deployer/operator/operator.py
|
jkupferer/containers-quickstarts
| 238 |
97954
|
#!/usr/bin/env python
import kopf
import kubernetes
import os
import json
import subprocess
import yaml
operator_domain = os.environ.get('OPERATOR_DOMAIN', 'app.example.com')
config_map_label = operator_domain + '/config'
app_name_label = operator_domain + '/name'
if os.path.exists('/var/run/secrets/kubernetes.io/serviceaccount/namespace'):
kubernetes.config.load_incluster_config()
namespace = open("/var/run/secrets/kubernetes.io/serviceaccount/namespace").read()
else:
kubernetes.config.load_kube_config()
namespace = kubernetes.config.list_kube_config_contexts()[1]['context']['namespace']
core_v1_api = kubernetes.client.CoreV1Api()
custom_objects_api = kubernetes.client.CustomObjectsApi()
def owner_reference_from_resource(resource):
return dict(
apiVersion = resource['apiVersion'],
controller = True,
blockOwnerDeletion = False,
kind = resource['kind'],
name = resource['metadata']['name'],
uid = resource['metadata']['uid']
)
def process_template(owner_reference, template_name, template_namespace, template_parameters):
'''
Use `oc` to process template and produce resource list json.
'''
oc_process_cmd = [
'oc', 'process', template_namespace + '//' + template_name,
'-l', '{0}={1}'.format(app_name_label, owner_reference['name']),
'-o', 'json',
]
for k, v in template_parameters.items():
oc_process_cmd.extend(['-p', '{0}={1}'.format(k, v)])
oc_process_result = subprocess.run(oc_process_cmd, stdout=subprocess.PIPE, check=True)
resource_list = json.loads(oc_process_result.stdout)
add_owner_reference(resource_list, owner_reference)
return resource_list
def add_owner_reference(resource_list, owner_reference):
'''
Add owner references to resource definition metadata.
'''
for item in resource_list['items']:
metadata = item['metadata']
if 'ownerReferences' in metadata:
if owner_reference not in metadata['ownerReferences']:
metadata['ownerReferences'].append(owner_reference)
else:
metadata['ownerReferences'] = [owner_reference]
def sanity_check_config_map(config_map):
metadata = config_map['metadata']
name = metadata['name']
if not 'data' in config_map or 'config' not in config_map['data']:
raise kopf.PermanentError('Config map must include config data')
def deploy_app_from_config_map(config_map, logger):
'''
Deploy application based on config map
'''
sanity_check_config_map(config_map)
name = config_map['metadata']['name']
try:
config = yaml.safe_load(config_map['data']['config'])
except yaml.parser.ParserError as e:
raise kopf.PermanentError('Unable to load config YAML: {0}'.format(str(e)))
owner_reference = owner_reference_from_resource(config_map)
deploy_app(owner_reference, config, logger)
def deploy_app(owner_reference, config, logger):
logger.info("Deploying app '%s'", owner_reference['name'])
if 'template' in config:
template_name = config['template'].get('name')
template_namespace = config['template'].get('namespace', namespace)
template_parameters = config['template'].get('parameters', {})
logger.info("Processing resources from template %s//%s", template_namespace, template_name)
resource_list = process_template(owner_reference, template_name, template_namespace, template_parameters)
oc_apply_result = subprocess.run(
['oc', 'apply', '-f', '-'],
check=True,
input=json.dumps(resource_list).encode('utf-8'),
stdout=subprocess.PIPE,
)
for line in oc_apply_result.stdout.decode('utf-8').splitlines():
logger.info(line)
@kopf.on.startup()
def configure(settings: kopf.OperatorSettings, **_):
# Disable scanning for CustomResourceDefinitions
settings.scanning.disabled = True
@kopf.on.create('', 'v1', 'configmaps', labels={config_map_label: kopf.PRESENT})
def on_create_config_map(body, logger, **_):
logger.info("New app ConfigMap '%s'", body['metadata']['name'])
deploy_app_from_config_map(body, logger)
|
index_creation/vector_feeder.py
|
lukasstracke/postgres-word2vec
| 131 |
97956
|
#!/usr/bin/python3
class VectorFeeder:
def __init__(self, vectors, words, cursor=0):
self.data = vectors
self.cursor = cursor
self.words = words
print('len words', len(self.words), 'len data', len(self.data))
def get_next_batch(self, size):
batch = self.data[self.cursor:(self.cursor+size)]
word_batch = self.words[self.cursor:(self.cursor+size)]
self.cursor += size
return batch, word_batch
def has_next(self):
return self.cursor < len(self.data)
def get_cursor(self):
return self.cursor
|
exercises/isogram/isogram.py
|
kishankj/python
| 1,177 |
97975
|
<reponame>kishankj/python
def is_isogram(string):
pass
|
apps/index/views.py
|
PyCN/BlogBackendProject
| 335 |
98011
|
from django.shortcuts import render
from django.views.generic.base import View
from base.models import SiteInfo
class IndexView(View):
def get(self, request):
site_infos = SiteInfo.objects.all().filter(is_live=True)[0]
context = {
'site_infos': site_infos
}
request.session['__access_auth__'] = site_infos.access_password_encrypt
return render(request, 'index.html', context)
|
__init__.py
|
virtualcharacters/DisVoice
| 191 |
98059
|
# -*- coding: utf-8 -*-
__all__=['glottal', 'phonation', 'articulation', 'prosody', 'replearning']
|
tests/bench/test_MComp.py
|
jmabry/pyaf
| 377 |
98072
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.Bench.MComp as mcomp
#tester1 = mcomp.cMComp_Tester(tsds.load_M1_comp());
#tester1.testSignals('')
#tester1.testAllSignals()
#tester2 = mcomp.cMComp_Tester(tsds.load_M2_comp());
#tester1.testSignals('')
#tester2.testAllSignals()
#tester3 = mcomp.cMComp_Tester(tsds.load_M3_Y_comp());
#tester1.testSignals('')
#tester3.testAllSignals()
#tester4 = mcomp.cMComp_Tester(tsds.load_M3_Q_comp());
#tester1.testSignals('')
#tester4.testAllSignals()
#tester5 = mcomp.cMComp_Tester(tsds.load_M3_M_comp());
#tester1.testSignals('')
#tester5.testAllSignals()
#tester6 = mcomp.cMComp_Tester(tsds.load_M3_Other_comp());
#tester1.testSignals('')
#tester6.testAllSignals()
tester7 = mcomp.cMComp_Tester(tsds.load_M4_comp("FINANCE") , "M4COMP");
tester7.testSignals('FIN1')
# tester7.testAllSignals()
|
python/ray/autoscaler/_private/readonly/node_provider.py
|
linyiyue/ray
| 21,382 |
98074
|
<gh_stars>1000+
from typing import Tuple, List
from ray.autoscaler.node_provider import NodeProvider
from ray.autoscaler.tags import (TAG_RAY_NODE_KIND, NODE_KIND_HEAD,
TAG_RAY_USER_NODE_TYPE, TAG_RAY_NODE_NAME,
TAG_RAY_NODE_STATUS, STATUS_UP_TO_DATE)
from ray.autoscaler._private.util import format_readonly_node_type
class ReadOnlyNodeProvider(NodeProvider):
"""A node provider that merely reports the current cluster state.
This is used for laptop mode / manual cluster setup modes, in order to
provide status reporting in the same way for users."""
def __init__(self, provider_config, cluster_name):
NodeProvider.__init__(self, provider_config, cluster_name)
self.nodes = {}
def is_readonly(self):
return True
def _set_nodes(self, nodes: List[Tuple[str, str]]):
"""Update the set of nodes in the cluster.
Args:
nodes: List of (node_id, node_manager_address) tuples.
"""
new_nodes = {}
for node_id, node_manager_address in nodes:
# We make up a fake node type for each node (since each node
# could have its own unique configuration).
new_nodes[node_id] = {
# Keep prefix in sync with node config gen in monitor.py
"node_type": format_readonly_node_type(node_id),
"ip": node_manager_address,
}
self.nodes = new_nodes
def non_terminated_nodes(self, tag_filters):
return list(self.nodes.keys())
def is_running(self, node_id):
return node_id in self.nodes
def is_terminated(self, node_id):
return node_id not in self.nodes
def node_tags(self, node_id):
tags = {
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_USER_NODE_TYPE: self.nodes[node_id]["node_type"],
TAG_RAY_NODE_NAME: node_id,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE
}
return tags
def external_ip(self, node_id):
return node_id
def internal_ip(self, node_id):
return node_id
def set_node_tags(self, node_id, tags):
raise AssertionError("Readonly node provider cannot be updated")
def create_node(self, node_config, tags, count):
raise AssertionError("Readonly node provider cannot be updated")
def terminate_node(self, node_id):
raise AssertionError("Readonly node provider cannot be updated")
@staticmethod
def bootstrap_config(cluster_config):
return cluster_config
|
benchmarks/fasta.py
|
codeclimate-testing/falcon
| 115 |
98076
|
<filename>benchmarks/fasta.py
import bisect
import sys
alu = (
'GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGG'
'GAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGA'
'CCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAAT'
'ACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCA'
'GCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGG'
'AGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCC'
'AGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAA')
iub = zip('acgtBDHKMNRSVWY', [0.27, 0.12, 0.12, 0.27] + [0.02] * 11)
homosapiens = [
('a', 0.3029549426680),
('c', 0.1979883004921),
('g', 0.1975473066391),
('t', 0.3015094502008),
]
def genRandom(lim, ia=3877, ic=29573, im=139968):
seed = 42
imf = float(im)
while 1:
seed = (seed * ia + ic) % im
yield lim * seed / imf
Random = genRandom(1.)
def makeCumulative(table):
P = []
C = []
prob = 0.
for char, p in table:
prob += p
P += [prob]
C += [char]
return (P, C)
def repeatFasta(src, n):
width = 60
r = len(src)
s = src + src + src[:n % r]
for j in xrange(n // width):
i = j * width % r
#print s[i:i + width]
if n % width:
pass
#print s[-(n % width):]
def randomFasta(table, n):
width = 60
r = xrange(width)
gR = Random.next
bb = bisect.bisect
jn = ''.join
probs, chars = makeCumulative(table)
for j in xrange(n // width):
pass
#print jn([chars[bb(probs, gR())] for i in r])
if n % width:
pass
#print jn([chars[bb(probs, gR())] for i in xrange(n % width)])
if __name__ == '__main__':
N = 150000000
randomFasta(homosapiens, N)
|
Alignment/CommonAlignmentProducer/python/ALCARECOTkAlZMuMu_Output_cff.py
|
ckamtsikis/cmssw
| 852 |
98080
|
<reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
# AlCaReco for track based alignment using ZMuMu events
OutALCARECOTkAlZMuMu_noDrop = cms.PSet(
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('pathALCARECOTkAlZMuMu')
),
outputCommands = cms.untracked.vstring(
'keep *_ALCARECOTkAlZMuMu_*_*',
'keep L1AcceptBunchCrossings_*_*_*',
'keep L1GlobalTriggerReadoutRecord_gtDigis_*_*',
'keep *_TriggerResults_*_*',
'keep DcsStatuss_scalersRawToDigi_*_*',
'keep *_offlinePrimaryVertices_*_*')
)
import copy
OutALCARECOTkAlZMuMu = copy.deepcopy(OutALCARECOTkAlZMuMu_noDrop)
OutALCARECOTkAlZMuMu.outputCommands.insert(0, "drop *")
|
web/app.py
|
pwh19920920/spiders
| 390 |
98117
|
<filename>web/app.py
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.getcwd())))
from flask import Flask
from flask_cors import CORS
from web import config, error, views, log
def create_app() -> Flask:
app = Flask(__name__)
app.config.from_object(config)
CORS(app)
views.init_app(app)
error.init_app(app)
log.init_app(app)
if app.config["ENV"] == "development":
print(app.url_map)
return app
app = create_app()
if __name__ == "__main__":
app.run()
|
scripts/atc_visualizations/base.py
|
efeerdur/atomic-threat-coverage
| 542 |
98122
|
<gh_stars>100-1000
#!/usr/bin/env python3
# ########################################################################### #
# ############################ Base Classes ################################# #
# ########################################################################### #
import json
import datetime
import getpass
import requests
class BaseKibana:
"""kibana_url - link to Kibana main page"""
username = str()
password = str()
_kibana_auth = None
kibana_url = str()
kibana_usage = None
@classmethod
def init_kibana_api(cls):
# TODO: Do some checks, test connection, etc
pass
@classmethod
def omit_kibana(cls):
cls.kibana_usage = False
@classmethod
def init_credentials(cls):
if cls.omit_kibana:
_ = "n"
else:
_ = ""
while _ not in ["y", "n"]:
_ = input("Can I use Kibana? [y/n]: ")[0].lower()
if _ == "n":
cls.kibana_usage = False
return False
_ = ""
while _ not in ["y", "n"]:
_ = input("Does your Kibana instance requires " +
"auth? [y/n]: ")[0].lower()
if _ == "y":
cls._kibana_auth = True
cls.username = input("Username [%s]: " % cls.username)
cls.password = getpass.getpass(
"Password [%s]: " % "".join(["*" for val in cls.password])
)
elif _ == "n":
cls._kibana_auth = False
cls.kibana_url = input("Provide Kibana URL (main page, for instance" +
" http://localhost:5601/): ")
while True:
print("KIBANA_URL: %s" % cls.kibana_url)
_ = input("Is this correct? [y/n]: ")[0].lower()
if _ == "y":
break
else:
cls.kibana_url = input("Provide Kibana URL " +
"(main page, for instance" +
" http://localhost:5601/): ")
cls.kibana_url = cls.kibana_url if cls.kibana_url.endswith("/") else \
cls.kibana_url + "/"
cls.kibana_usage = True
return True
@classmethod
def check_kibana_vars(cls):
if not isinstance(cls.kibana_usage, bool):
return cls.init_credentials()
if isinstance(cls._kibana_auth, bool):
if cls._kibana_auth:
if not cls.username or not cls.password:
return cls.init_credentials
if not cls.kibana_url:
return cls.init_credentials
else:
return cls.init_credentials
return True
@classmethod
def search_id_of_title_by_type(cls, search_type, search_title):
"""Returns an ID (string) of an object searched using object title
search_type - string in ["index-pattern", "search"]
search_title - string
"""
search_type = search_type.lower()
if search_type not in ["index-pattern", "search"]:
raise Exception("Search type (%s) not supported" % search_type)
if cls.check_kibana_vars():
result_dict = {}
total_pages = int()
current_page = 1
suffix = "api/saved_objects/_find?" + \
"type=%s&fields=title&fields=id" % search_type
r = requests.get(cls.kibana_url + suffix)
if r.json().get("total"):
total_pages = r.json().get("total")
while current_page <= total_pages:
if r.json().get("saved_objects"):
for item in r.json().get("saved_objects"):
if item.get("attributes"):
result_dict[item.get("attributes").get("title")] =\
item.get('id')
if search_title in result_dict.keys():
return result_dict[search_title]
else:
current_page += 1
r = requests.get(
cls.kibana_url + suffix + "&pages=%s" % current_page
)
del(result_dict)
return None
class BaseKibanaAgg(BaseKibana):
"""Base Kibana Agg"""
def __init__(self, id=None, enabled=None, type=None, schema=None,
params=None):
self.id = str()
self.enabled = True # By default agg is enabled
self.type = str()
self.schema = str()
self.params = dict()
if id is not None:
self.id = id
if enabled is not None:
self.enabled = enabled
if type is not None:
self.type = type
if schema is not None:
self.schema = schema # propably 'metric'
if params is not None:
self.params = params
def validate(self):
# TODO: Write validate method
return True
def __call__(self):
if self.validate():
return self.__dict__
def __repr__(self):
return str(self.__call__())
class BaseKibanaSeriesParams(BaseKibana):
"""Base Kibana Series Params"""
def __init__(self, id, data=None, drawLinesBetweenPoints=None,
mode=None, show=None, showCircles=None, type=None,
valueAxis=None, interpolate=None):
self.data = dict()
self.drawLinesBetweenPoints = bool()
self.mode = str()
self.show = bool()
self.showCircles = bool()
self.p_type = str()
self.valueAxis = str()
self.interpolate = str()
if data is not None:
self.data = data
if drawLinesBetweenPoints is not None:
self.drawLinesBetweenPoints = drawLinesBetweenPoints
if mode is not None:
self.mode = mode
if show is not None:
self.show = show
if showCircles is not None:
self.showCircles = showCircles
if type is not None:
self.type = type
if valueAxis is not None:
self.valueAxis = valueAxis
if interpolate is not None:
self.interpolate = interpolate
def validate(self):
# TODO: Write validate method
return True
def __call__(self):
if self.validate():
return self.__dict__
def __repr__(self):
return str(self.__call__())
class BaseKibanaVisState(BaseKibana):
"""Base Kibana visState"""
def __init__(self, title=None, type=None, params=None, aggs=None):
self.title = str()
self.type = str()
self.params = dict()
self.aggs = list()
if title:
self.title = title
if type:
self.type = type
if params:
self.params = params
if aggs:
self.aggs = aggs
def validate(self):
# TODO: Write validate method
return True
def __call__(self):
if self.validate():
return json.dumps(self.__dict__)
def __repr__(self):
return str(self.__dict__)
def __iter__(self):
return iter(self.__dict__)
class BaseKibanaParams(BaseKibana):
"""Base Kibana Params"""
def __init__(self, type=None, grid=None, categoryAxes=None, valueAxes=None,
seriesParams=None, addTooltip=None, addLegend=None,
legendPosition=None, times=None, addTimeMarker=None):
self.type = str()
self.grid = dict()
self.categoryAxes = list()
self.valueAxes = list() # This isn't a mistake (not confuse with Axis)
self.seriesParams = list()
self.addTooltip = True
self.addLegend = True
self.legendPosition = str()
self.times = list()
self.addTimeMarker = False
if type:
self.type = type
if grid:
self.grid = grid
if categoryAxes:
self.categoryAxes = categoryAxes
if valueAxes:
self.valueAxes = valueAxes
if seriesParams:
self.seriesParams = seriesParams
if addTooltip:
self.addTooltip = addTooltip
if addLegend:
self.addLegend = addLegend
if legendPosition:
self.legendPosition = legendPosition
if times:
self.times = times
if addTimeMarker:
self.addTimeMarker = addTimeMarker
def validate(self):
# TODO: Write validate method
return True
def __call__(self):
if self.validate():
return self.__dict__
def __repr__(self):
return str(self.__call__())
class BaseKibanaVisualizationObject(BaseKibana):
"""Base Kibana VisualizationObject"""
def __init__(self, title=None):
self.description = str()
self.kibanaSavedObjectMeta = dict()
self.title = str()
self.uiStateJSON = str()
self.version = 1
self.visState = str() # '{ some valid JSON }'
if title:
self.title = title
def validate(self):
# TODO: Write validate method
return True
def __call__(self):
if self.validate():
return self.__dict__
def __repr__(self):
return str(self.__call__())
class BaseGridData(BaseKibana):
"""Base gridData"""
def __init__(self, x=0, y=0, w=0, h=0, vid=1):
self.w = w
self.h = h
self.x = x
self.y = y
self.i = str(vid)
def validate(self):
# TODO: Write validate method
return True
def __call__(self):
if self.validate():
return self.__dict__
def __repr__(self):
return str(self.__call__())
class BasePanelsJson(BaseKibana):
"""Base panelsJSON"""
def __init__(self, vis_uuid=None, vis_id=None, version="6.6.2",
type="visualization"):
self.embeddableConfig = dict()
self.gridData = BaseGridData()
self.id = str()
self.panelIndex = str()
self.type = type
self.version = version
if vis_uuid:
self.id = vis_uuid
if vis_id:
self.panelIndex = str(vis_id)
self.gridData.i = str(vis_id)
def validate(self):
# TODO: Write validate method
return True
def __call__(self):
if self.validate():
return self.__dict__
def __repr__(self):
return str(self.__call__())
class BaseOptionsJson(BaseKibana):
"""Base optionsJSON"""
def __init__(self):
self.darkTheme = bool()
self.hidePanelTitles = bool()
self.useMargins = bool()
def validate(self):
# TODO: Write validate method
return True
def __call__(self):
if self.validate():
return self.__dict__
def __repr__(self):
return str(self.__call__())
class BaseKibanaDoc(BaseKibana):
"""Base Kibana Doc"""
def __init__(self):
self.type = str()
self.updated_at = str()
def validate(self):
# TODO: Write validate method
return True
def __call__(self):
if self.validate():
self.updated_at = datetime.datetime.today().isoformat() + "Z"
return self.__dict__
def __repr__(self):
return str(self.__call__())
class KibanaDashboardDoc(BaseKibanaDoc):
"""Kibana Visualization Doc"""
def __init__(self):
super().__init__() # Init Base Class
self.type = "dashboard"
self.dashboard = dict()
|
src/out/NIPS18evaluation/evaluationTreeLSTM/Lantern/preprocess_data.py
|
supunab/Lantern
| 158 |
98135
|
<filename>src/out/NIPS18evaluation/evaluationTreeLSTM/Lantern/preprocess_data.py
import codecs
import functools
import os
import tempfile
import zipfile
from nltk.tokenize import sexpr
import numpy as np
from six.moves import urllib
def getAllwordsFromOneData(data):
data = data.split()
words = set()
for i in data:
if i.endswith(')'):
words.add(i.split(')')[0])
return (words)
#get all words used in dev.txt and collect the number of trees
target_file = './dev.txt'
#target_file = os.path.join(data_dir, target)
words = set()
num_tree = 0
with open(target_file, 'r') as f:
for line in f:
num_tree += 1
words.update(getAllwordsFromOneData(line))
#filter the Golve file for all words used, so we don't have to keep a big file in memory
# we assume the 2G glove original file is at this directory
glove_path = '../PyTorch/data/glove/glove.840B.300d.txt'
# we want to filter it so that we maintain a small subset of the glove embeddings locally
filtered_glove_path = os.path.join('./', 'filtered_glove.txt')
def filter_glove(words):
ncount = 0
with codecs.open(glove_path, encoding = 'utf-8') as f:
with codecs.open(filtered_glove_path, 'w', encoding='utf-8') as out:
for line in f:
line = line.strip()
if not line: continue
if line.split(u' ', 1)[0] in words:
out.write(line + '\n')
ncount += 1
print("Lantern: generating filtered_glove, taking %s lines" % ncount)
# if we already have the filtered file locally, then no need to filter again
if not os.path.exists(filtered_glove_path):
print("Lantern: need to filter the big 2G GOLVE file into a smaller subset file called %s" % filtered_glove_path)
filter_glove(words)
# we also want to generate a file containing array[double] only, for Lantern to read.
# also this function generates the word_idx, which is a mapping from word to index in the array[double]s
dev_glove_path = os.path.join('./', 'small_glove.txt')
print("Lantern: now generate embedding file called %s and word_idx" % dev_glove_path)
def filter_small_glove(words):
nread = 0
nwrote = 0
word_idx = {}
# first we need to figure out how many lines we will write
ncount = 0
with codecs.open(filtered_glove_path, encoding='utf-8') as f:
with codecs.open(dev_glove_path, 'w', encoding='utf-8') as out:
for line in f:
line = line.strip()
if not line: continue
temp = line.split(u' ', 1)
if temp[0] in words:
ncount += 1
# add a random row of 300 numbers for unseen words
ncount += 1
# then we actually write to the file
with codecs.open(filtered_glove_path, encoding='utf-8') as f:
with codecs.open(dev_glove_path, 'w', encoding='utf-8') as out:
out.write(str(ncount) + '\n') # write the number of entries in this file
for line in f:
nread += 1
line = line.strip()
if not line: continue
temp = line.split(u' ', 1)
if temp[0] in words:
#out.write(temp[0] + ' ')
out.write(temp[1] + '\n')
word_idx[temp[0]] = nwrote
nwrote += 1
# add a random row of 300 number, for unseen words
rn = np.random.uniform(-0.05, 0.05, 300).astype(np.float32)
for i in range(len(rn)):
if i == len(rn) - 1: out.write(str(rn[i]) + '\n')
else: out.write(str(rn[i]) + ' ')
print('Lantern: read %s lines, wrote %s' % (nread, nwrote + 1))
return (nwrote), word_idx
# filter Glove file and get word -> index relationship
index_unknown, word_idx = filter_small_glove(words)
# parse samples so that we have tree encoded as arrays
def parseOneSample(data):
def secondCompleteEnclosing(data, i):
count = 0
while (True):
i += 1
if data[i].endswith(')'):
count -= (data[i].count(')') - 1)
if count == 0:
return (i+1)
else:
count += 1
data_raw = data.split()
data = []
for i in range(len(data_raw)):
if data_raw[i].endswith(')'):
data[-1] = data[-1] + ' ' + data_raw[i]
else:
data.append(data_raw[i])
scores = []
values = []
for i in data:
scores.append(int(i[1:].split()[0]))
if i.endswith(')'):
entry = i.split()[-1].split(')')[0]
if entry in word_idx.keys(): encode = word_idx[entry]
else: encode = index_unknown
values.append(encode)
else:
values.append(-1)
lch = []
rch = []
for i in range(len(data)):
if data[i].endswith(')'):
lch.append(-1)
rch.append(-1)
else:
lch.append(i+1)
rch.append(secondCompleteEnclosing(data, i))
# return the arrays
return scores, values, lch, rch
# parse samples in dev.txt file and write array_encode of trees to file
array_tree_path = os.path.join('./', 'array_tree.txt')
def write_array_tree():
# read target_file, for each line, call parseOneSample to get arrays
i = 0
with open(target_file, "r") as f:
with open(array_tree_path, 'w') as out:
out.write(str(num_tree) + '\n')
for line in f:
arrays = parseOneSample(line)
i += 1
out.write(str(len(arrays[0])) + '\n')
for array in arrays:
out.write(' '.join(str(item) for item in array) + '\n')
print("Lantern: wrote %s data entries to %s" % (i, array_tree_path))
write_array_tree()
|
LinkedList/LineRemoveMiddlePoints.py
|
Amanjakhetiya/Data_Structures_Algorithms_In_Python
| 195 |
98140
|
"""Given a linked list of co-ordinates where adjacent points either form a vertical line or a horizontal line.
Delete points from the linked list which are in the middle of a horizontal or vertical line."""
"""Input: (0,10)->(1,10)->(5,10)->(7,10)
|
(7,5)->(20,5)->(40,5)
Output: Linked List should be changed to following
(0,10)->(7,10)
|
(7,5)->(40,5) """
# Node class
class Node:
# Constructor to initialise (x, y) coordinates and next
def __init__(self, x=None, y=None):
self.x = x
self.y = y
self.next = None
class SinglyLinkedList:
# Constructor to initialise head
def __init__(self):
self.head = None
# Function to find middle node of a linked list
def delete_middle_nodes(self):
current = self.head
# iterate while the next of the next node is not none
while current and current.next and current.next.next is not None:
# assign variables for next and next of next nodes
next = current.next
next_next = current.next.next
# if x coordinates are equal of current and next node i.e. horizontal line
if current.x == next.x:
# check if there are more than 2 nodes in the horizontal line
# if yes then delete the middle node and update next and next_next
while next_next is not None and next.x == next_next.x:
current.next = next_next
next = next_next
next_next = next_next.next
# if y coordinates are equal of current and next node i.e. vertical line
elif current.y == next.y:
# check if there are more than 2 nodes in the vertical line
# if yes then delete the middle node and update next and next_next
while next_next is not None and next.y == next_next.y:
current.next = next_next
next = next_next
next_next = next_next.next
# updated the current node to next node for checking the next line nodes
current = current.next
# Function to Insert data at the beginning of the linked list
def insert_at_beg(self, x, y):
node = Node(x, y)
node.next = self.head
self.head = node
# Function to print the linked list
def print_data(self):
current = self.head
while current is not None:
print('(',current.x, ',', current.y, ') -> ', end='')
current = current.next
print('None')
if __name__ == '__main__':
linked_list = SinglyLinkedList()
linked_list.insert_at_beg(40,5)
linked_list.insert_at_beg(20,5)
linked_list.insert_at_beg(7,5)
linked_list.insert_at_beg(7,10)
linked_list.insert_at_beg(5,10)
linked_list.insert_at_beg(1,10)
linked_list.insert_at_beg(0,10)
# print the linked list representing vertical and horizontal lines
linked_list.print_data()
# call the delete_middle_nodes function
linked_list.delete_middle_nodes()
# print the new linked list
linked_list.print_data()
|
tools/tests/test_check_rabbitmq_queue.py
|
TylerPham2000/zulip
| 17,004 |
98177
|
<reponame>TylerPham2000/zulip<gh_stars>1000+
import time
from unittest import TestCase, mock
from scripts.lib.check_rabbitmq_queue import CRITICAL, OK, UNKNOWN, WARNING, analyze_queue_stats
class AnalyzeQueueStatsTests(TestCase):
def test_no_stats_available(self) -> None:
result = analyze_queue_stats("name", {}, 0)
self.assertEqual(result["status"], UNKNOWN)
def test_queue_stuck(self) -> None:
"""Last update > 5 minutes ago and there's events in the queue."""
result = analyze_queue_stats("name", {"update_time": time.time() - 301}, 100)
self.assertEqual(result["status"], CRITICAL)
self.assertIn("queue appears to be stuck", result["message"])
def test_queue_just_started(self) -> None:
"""
We just started processing a burst of events, and haven't processed enough
to log productivity statistics yet.
"""
result = analyze_queue_stats(
"name",
{
"update_time": time.time(),
"current_queue_size": 10000,
"recent_average_consume_time": None,
},
10000,
)
self.assertEqual(result["status"], OK)
def test_queue_normal(self) -> None:
"""10000 events and each takes a second => it'll take a long time to empty."""
result = analyze_queue_stats(
"name",
{
"update_time": time.time(),
"current_queue_size": 10000,
"queue_last_emptied_timestamp": time.time() - 10000,
"recent_average_consume_time": 1,
},
10000,
)
self.assertEqual(result["status"], CRITICAL)
self.assertIn("clearing the backlog", result["message"])
# If we're doing 10K/sec, it's OK.
result = analyze_queue_stats(
"name",
{
"update_time": time.time(),
"current_queue_size": 10000,
"queue_last_emptied_timestamp": time.time() - 10000,
"recent_average_consume_time": 0.0001,
},
10000,
)
self.assertEqual(result["status"], OK)
# Verify logic around whether it'll take MAX_SECONDS_TO_CLEAR to clear queue.
with mock.patch.dict("scripts.lib.check_rabbitmq_queue.MAX_SECONDS_TO_CLEAR", {"name": 10}):
result = analyze_queue_stats(
"name",
{
"update_time": time.time(),
"current_queue_size": 11,
"queue_last_emptied_timestamp": time.time() - 10000,
"recent_average_consume_time": 1,
},
11,
)
self.assertEqual(result["status"], WARNING)
self.assertIn("clearing the backlog", result["message"])
result = analyze_queue_stats(
"name",
{
"update_time": time.time(),
"current_queue_size": 9,
"queue_last_emptied_timestamp": time.time() - 10000,
"recent_average_consume_time": 1,
},
9,
)
self.assertEqual(result["status"], OK)
|
tests/testing_lib/test_data.py
|
Zotkin/incremental_learning.pytorch
| 277 |
98180
|
<gh_stars>100-1000
import pytest
from inclearn.lib import data
@pytest.mark.parametrize("dataset_name,increment,n_tasks", [
("cifar100", 10, 10),
("cifar100", 2, 50)
])
def test_incremental_class(dataset_name, increment, n_tasks):
dataset = data.IncrementalDataset(
dataset_name,
increment=increment
)
assert dataset.n_tasks == n_tasks
current_class = 0
for _ in range(dataset.n_tasks):
task_info, train_loader, _, test_loader = dataset.new_task()
min_c, max_c = current_class, current_class + increment
assert task_info["increment"] == increment
assert task_info["min_class"] == min_c
assert task_info["max_class"] == max_c
for _, targets, _ in train_loader:
assert all(min_c <= t.item() < max_c for t in targets)
for _, targets, _ in test_loader:
assert all(0 <= t.item() < max_c for t in targets)
current_class += increment
|
Python/math/tribonacci.py
|
Khushboo85277/NeoAlgo
| 897 |
98185
|
<reponame>Khushboo85277/NeoAlgo
"""
Python program to find the n'th number in the tribonacci series
Tribonacci series is a generalization of the Fibonacci sequence, in which the current term
is the sum of the previous three terms.
"""
def find_tribonacci(n):
dp = [0] * n
dp[0] = 0
dp[1] = 0
dp[2] = 1
# Compute the sum of the previous three terms
for i in range(3,n):
dp[i] = dp[i-1] + dp[i-2] + dp[i-3]
return dp[n-1]
if __name__ == '__main__':
print("Enter the value of n?, where you need the n'th number in the tribonacci sequence. ", end="")
n = int(input())
if (n <= 0):
print("The given value of n is invalid.", end="")
exit()
res = find_tribonacci(n)
print("The {}'th term in the tribonacci series is {}.".format(n, res))
"""
Time Complexity - O(n)
Space Complexity - O(n)
SAMPLE INPUT AND OUTPUT
SAMPLE I
Enter the value of n?, where you need the n'th number in the tribonacci sequence. 12
The 12'th term in the tribonacci series is 149.
SAMPLE II
Enter the value of n?, where you need the n'th number in the tribonacci sequence. 1254
The 1254'th term in the tribonacci series is 4020147461713125140.
"""
|
py/elements/offsets.py
|
pombredanne/debin
| 322 |
98205
|
import traceback
import sys
import depgraph
from common import utils
from common.constants import UNKNOWN_LABEL, VOID, LOC_VAR, FUN_ARG, INT
from common.constants import ENUM_DW_FORM_exprloc, ENUM_ABBREV_CODE, TTYPES
from elements.ttype import Ttype
from elements.givs import Node
class Offset(Node):
total = 0
known = 0
unknown = 0
inf = 0
giv = 0
tp_1p = 0
fp_1p = 0
tn_1p = 0
fn_1p = 0
correct = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __repr__(self):
return 'Offset'
def __str__(self):
return repr(self)
def stat(self):
Offset.total += 1
class GivOffset(Offset):
total = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.binary = kwargs['binary']
self.offset = kwargs['offset']
self.access = kwargs['access']
self.exp = kwargs['exp']
self.name = 'GivOffset'
def __repr__(self):
return '[GivOffset {}]'.format(repr(self.offset))
def __str__(self):
return repr(self)
def stat(self):
super().stat()
GivOffset.total += 1
Offset.giv += 1
class TempOffset(Offset):
total = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.binary = kwargs['binary']
self.base_pointer = kwargs['base_pointer']
self.offset = kwargs['offset']
self.pcs = set()
def __repr__(self):
return '[TempOffset {} {}]'.format(self.base_pointer, self.offset)
def __str__(self):
return repr(self)
def add_pc(self, pc):
self.pcs.add(pc)
def stat(self):
super().stat()
TempOffset.total += 1
Offset.giv += 1
class DirectOffset(Offset):
total = 0
known = 0
unknown = 0
inf = 0
giv = 0
correct = 0
ttype_total = 0
ttype_known = 0
ttype_unknown = 0
ttype_inf = 0
ttype_tp_1p = 0
ttype_fp_1p = 0
ttype_tn_1p = 0
ttype_fn_1p = 0
ttype_correct = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.binary = kwargs['binary']
self.offset = kwargs['offset']
self.access = kwargs['access']
self.name = '@DO'
self.is_name_given = False
self.ttype = Ttype(owner=self)
self.n2p_type = self.binary.config.INF
self.train_name = UNKNOWN_LABEL
self.test_name = UNKNOWN_LABEL
self.var_type = LOC_VAR
def __repr__(self):
return '[DirectOffset {} {}]'.format(format(self.offset, '02x'), repr(self.access))
def __str__(self):
if self.test_name == self.train_name or self.is_name_given:
return '[DirectOffset {} {}]'.format(self.train_name, str(self.ttype))
else:
if self.train_name == UNKNOWN_LABEL:
return '[DirectOffset (WRONGU {} {}) {}]'.format(self.train_name, self.test_name, str(self.ttype))
else:
return '[DirectOffset (WRONGK {} {}) {}]'.format(self.train_name, self.test_name, str(self.ttype))
def train_info(self, die, ttype):
origin = self.binary.debug_info.get_name_origin(die)
name_attr = origin.attributes.get('DW_AT_name', None)
if name_attr is not None:
name = name_attr.value.decode('ascii')
if self.train_name == UNKNOWN_LABEL:
self.ttype.train_info(ttype)
self.train_name = name
else:
if self.ttype.train_name in (UNKNOWN_LABEL, VOID) and ttype != UNKNOWN_LABEL:
self.ttype.train_info(ttype)
self.train_name == name
else:
if self.train_name > name:
self.train_name = name
self.ttype.train_info(ttype)
else:
pass
def stat(self):
super().stat()
DirectOffset.total += 1
if self.is_name_given:
DirectOffset.giv += 1
Offset.giv += 1
else:
DirectOffset.inf += 1
Offset.inf += 1
if self.train_name != UNKNOWN_LABEL:
DirectOffset.known += 1
Offset.known += 1
Offset.tp_1p += 1
else:
DirectOffset.unknown += 1
Offset.unknown += 1
Offset.fp_1p += 1
def debug_info(self):
bs = bytearray()
bs.append(ENUM_ABBREV_CODE['VARIABLE'])
# name
bs.extend(map(ord, self.test_name))
bs.append(0x00)
if self.test_name not in TTYPES \
and self.test_name != UNKNOWN_LABEL \
and self.test_name not in self.binary.sections.symbol_names:
self.binary.predicted.add(self.test_name)
bs.append(self.binary.config.ADDRESS_BYTE_SIZE + 1)
bs.append(ENUM_DW_FORM_exprloc['DW_OP_addr'])
bs += utils.encode_address(self.offset, self.binary)
if self.ttype.test_name is None \
or self.ttype.test_name in (UNKNOWN_LABEL, VOID) \
or self.ttype.test_name not in TTYPES:
bs += utils.encode_kbytes(self.binary.types.get_offset(INT), 4)
else:
bs += utils.encode_kbytes(self.binary.types.get_offset(self.ttype.test_name), 4)
return bs
class StringArrayOffset(DirectOffset):
total = 0
known = 0
unknown = 0
inf = 0
giv = 0
correct = 0
ttype_total = 0
ttype_known = 0
ttype_unknown = 0
ttype_inf = 0
ttype_tp_1p = 0
ttype_fp_1p = 0
ttype_tn_1p = 0
ttype_fn_1p = 0
ttype_correct = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.name = '@SA'
self.strings = kwargs['strings']
self.access = kwargs['access']
def __repr__(self):
return '[StringArray {} ({}) {}]'.format(format(self.offset, '02x'), ', '.join(map(repr, self.strings)), str(self.access))
def __str__(self):
if self.test_name == self.train_name:
return '[StringArray {} {}]'.format(self.train_name, str(self.ttype))
else:
if self.train_name == UNKNOWN_LABEL:
return '[StringArray (WRONGU {} {}) {}]'.format(self.train_name, self.test_name, str(self.ttype))
else:
return '[StringArray (WRONGK {} {}) {}]'.format(self.train_name, self.test_name, str(self.ttype))
def stat(self):
super().stat()
StringArrayOffset.total += 1
if self.is_name_given:
StringArrayOffset.giv += 1
else:
StringArrayOffset.inf += 1
if self.train_name != UNKNOWN_LABEL:
StringArrayOffset.known += 1
else:
StringArrayOffset.unknown += 1
class IndirectOffset(Offset):
total = 0
known = 0
unknown = 0
inf = 0
tp_1p = 0
fp_1p = 0
tn_1p = 0
fn_1p = 0
correct = 0
ttype_total = 0
ttype_known = 0
ttype_unknown = 0
ttype_inf = 0
ttype_tp_1p = 0
ttype_fp_1p = 0
ttype_tn_1p = 0
ttype_fn_1p = 0
ttype_correct = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.function = kwargs['function']
self.binary = self.function.binary
self.base_pointer = kwargs['base_pointer']
self.offset = kwargs['offset']
self.index = kwargs['index']
self.name = '{}:S:{}'.format(self.base_pointer, self.offset)
self.ttype = Ttype(owner=self)
self.n2p_type = self.binary.config.INF
self.train_name = UNKNOWN_LABEL
self.test_name = UNKNOWN_LABEL
self.low_pc = None
self.high_pc = None
self.pcs = set()
self.blks = set()
self.features = set()
if self.binary.config.MACHINE_ARCH == 'x86':
if self.base_pointer == 'EBP' and self.offset >= 0:
self.var_type = FUN_ARG
else:
self.var_type = LOC_VAR
elif self.binary.config.MACHINE_ARCH == 'x64':
if self.base_pointer == 'RBP' and self.offset >= 0:
self.var_type = FUN_ARG
else:
self.var_type = LOC_VAR
elif self.binary.config.MACHINE_ARCH == 'ARM':
self.var_type = LOC_VAR
def __repr__(self):
return '[IndirectOffset {} {}]'.format(self.base_pointer, self.offset)
def __str__(self):
if self.test_name == self.train_name:
return '[IndirectOffset {} {}]'.format(self.train_name, str(self.ttype))
else:
if self.train_name == UNKNOWN_LABEL:
return '[IndirectOffset (WRONGU {} {}) {}]'.format(self.train_name, self.test_name, str(self.ttype))
else:
return '[IndirectOffset (WRONGK {} {}) {}]'.format(self.train_name, self.test_name, str(self.ttype))
def init_features(self):
coarse = depgraph.infos.coarse
fine = depgraph.infos.fine
self.features.add(coarse(self))
self.features.add(fine(self))
self.features.add('blk[{}][{}]'.format(len(self.blks), coarse(self)))
self.features.add('blk[{}][{}]'.format(len(self.blks), fine(self)))
def add_pc(self, pc):
self.pcs.add(pc)
if self.low_pc is not None:
self.low_pc = min(pc, self.low_pc)
else:
self.low_pc = pc
if self.high_pc is not None:
self.high_pc = max(pc, self.high_pc)
else:
self.high_pc = pc
def train_info(self, die, ttype):
origin = self.binary.debug_info.get_name_origin(die)
name_attr = origin.attributes.get('DW_AT_name', None)
if name_attr is not None:
name = name_attr.value.decode('ascii')
if self.train_name == UNKNOWN_LABEL:
self.ttype.train_info(ttype)
self.train_name = name
else:
if self.ttype.train_name in (UNKNOWN_LABEL, VOID) and ttype != UNKNOWN_LABEL:
self.ttype.train_info(ttype)
self.train_name == name
else:
if self.train_name > name:
self.train_name = name
self.ttype.train_info(ttype)
else:
pass
def stat(self):
super().stat()
IndirectOffset.total += 1
if self.train_name != UNKNOWN_LABEL:
IndirectOffset.known += 1
Offset.known += 1
else:
IndirectOffset.unknown += 1
Offset.unknown += 1
if self.n2p_type == self.binary.config.INF:
IndirectOffset.inf += 1
Offset.inf += 1
if self.train_name == UNKNOWN_LABEL:
IndirectOffset.fp_1p += 1
Offset.fp_1p += 1
else:
IndirectOffset.tp_1p += 1
Offset.tp_1p += 1
elif self.n2p_type == self.binary.config.GIV:
if self.train_name == UNKNOWN_LABEL:
IndirectOffset.tn_1p += 1
Offset.tn_1p += 1
else:
IndirectOffset.fn_1p += 1
Offset.fn_1p += 1
def debug_info(self):
bs = bytearray()
if self.var_type == FUN_ARG:
bs.append(ENUM_ABBREV_CODE['FUN_ARG'])
elif self.var_type == LOC_VAR:
bs.append(ENUM_ABBREV_CODE['VARIABLE'])
# name
bs.extend(map(ord, self.test_name))
bs.append(0x00)
if self.test_name not in TTYPES and self.test_name != UNKNOWN_LABEL:
self.binary.predicted.add(self.test_name)
loc_expr = bytearray()
loc_expr.append(self.binary.config.REG_MAPPING[self.base_pointer] +
ENUM_DW_FORM_exprloc['DW_OP_breg0'])
loc_expr += utils.encode_sleb128(self.offset)
bs += utils.encode_uleb128(len(loc_expr))
bs += loc_expr
if self.ttype.test_name is None \
or self.ttype.test_name in (UNKNOWN_LABEL, VOID) \
or self.ttype.test_name not in TTYPES:
bs += utils.encode_kbytes(self.binary.types.get_offset(INT), 4)
else:
bs += utils.encode_kbytes(self.binary.types.get_offset(self.ttype.test_name), 4)
return bs
|
examples/semantic_segmentation/backend.py
|
niqbal996/paz
| 300 |
98232
|
import cv2
import numpy as np
from paz.backend.image.draw import put_text, draw_rectangle
from paz.backend.image.draw import GREEN
def draw_box(image, coordinates, class_name, score,
color=GREEN, scale=0.7, weighted=False):
x_min, y_min, x_max, y_max = coordinates
if weighted:
color = [int(channel * score) for channel in color]
text = '{:0.2f}, {}'.format(score, class_name)
put_text(image, text, (x_min, y_min - 10), scale, color, 1)
draw_rectangle(image, (x_min, y_min), (x_max, y_max), color, 2)
return image
def draw_square(image, center_x, center_y, size, color):
x_min, y_min = center_x - size, center_y - size
x_max, y_max = center_x + size, center_y + size
cv2.rectangle(image, (x_min, y_min), (x_max, y_max), color, -1)
return image
def draw_circle(image, center_x, center_y, size, color):
cv2.circle(image, (center_x, center_y), size, color, -1)
return image
def draw_triangle(image, center_x, center_y, size, color):
vertex_A = (center_x, center_y - size)
vertex_B = (center_x - size, center_y + size)
vertex_C = (center_x + size, center_y + size)
points = np.array([[vertex_A, vertex_B, vertex_C]], dtype=np.int32)
cv2.fillPoly(image, points, color)
return image
def resize_image_with_nearest_neighbors(image, size):
"""Resize image using nearest neighbors interpolation.
# Arguments
image: Numpy array.
size: List of two ints.
# Returns
Numpy array.
"""
if(type(image) != np.ndarray):
raise ValueError(
'Recieved Image is not of type numpy array', type(image))
else:
return cv2.resize(image, size, interpolation=cv2.INTER_NEAREST)
|
Calibration/TkAlCaRecoProducers/test/inspectNearByPixelClusters_cfg.py
|
Purva-Chaudhari/cmssw
| 852 |
98265
|
<gh_stars>100-1000
import glob
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
options = VarParsing.VarParsing()
###################################################################
# Setup 'standard' options
###################################################################
options.register('OutFileName',
"test.root", # default value
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"name of the output file (test.root is default)")
options.register('myGT',
"auto:run2_data", # default value
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"name of the input Global Tag")
options.register('maxEvents',
-1,
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.int, # string, int, or float
"num. events to run")
options.parseArguments()
process = cms.Process("AlCaRECOAnalysis")
###################################################################
# Message logger service
###################################################################
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 1
###################################################################
# Geometry producer and standard includes
###################################################################
process.load("RecoVertex.BeamSpotProducer.BeamSpot_cff")
process.load("Configuration.StandardSequences.Services_cff")
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load('Configuration.StandardSequences.MagneticField_cff')
#process.load("Configuration.StandardSequences.MagneticField_0T_cff")
process.load("CondCore.CondDB.CondDB_cfi")
####################################################################
# Get the GlogalTag
####################################################################
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag,options.myGT, '')
###################################################################
# Source
###################################################################
readFiles = cms.untracked.vstring()
readFiles.extend(['file:SiPixelCalSingleMuonTight.root'])
#readFiles.extend(['file:SiPixelCalSingleMuonTight_fullDetId.root'])
process.source = cms.Source("PoolSource",fileNames = readFiles)
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(options.maxEvents))
###################################################################
# The TrackRefitter
###################################################################
process.load("RecoTracker.TrackProducer.TrackRefitters_cff")
import RecoTracker.TrackProducer.TrackRefitters_cff
process.TrackRefitter = process.TrackRefitterP5.clone(src = 'ALCARECOSiPixelCalSingleMuonTight',
TrajectoryInEvent = True,
TTRHBuilder = "WithAngleAndTemplate",
NavigationSchool = "",
)
###################################################################
# The analysis module
###################################################################
process.myanalysis = cms.EDAnalyzer("NearbyPixelClustersAnalyzer",
trajectoryInput = cms.InputTag("TrackRefitter"),
#skimmedGeometryPath = cms.string("CalibTracker/SiPixelESProducers/data/PixelSkimmedGeometry.txt") # phase-0
skimmedGeometryPath = cms.string("SLHCUpgradeSimulations/Geometry/data/PhaseI/PixelSkimmedGeometry_phase1.txt") #phase-1
)
###################################################################
# Output name
###################################################################
process.TFileService = cms.Service("TFileService",
fileName = cms.string(options.OutFileName))
###################################################################
# Path
###################################################################
process.p1 = cms.Path(process.offlineBeamSpot*
process.TrackRefitter*
process.myanalysis)
|
usaspending_api/recipient/migrations/0001_initial.py
|
g4brielvs/usaspending-api
| 217 |
98280
|
<filename>usaspending_api/recipient/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-05-11 00:51
from __future__ import unicode_literals
import django.contrib.postgres.fields
import django.contrib.postgres.indexes
import partial_index
from django.contrib.postgres.operations import TrigramExtension
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
TrigramExtension(),
migrations.CreateModel(
name='DUNS',
fields=[
('awardee_or_recipient_uniqu', models.TextField(primary_key=True, serialize=False)),
('legal_business_name', models.TextField(blank=True, null=True)),
('dba_name', models.TextField(blank=True, null=True)),
('ultimate_parent_unique_ide', models.TextField(blank=True, null=True)),
('ultimate_parent_legal_enti', models.TextField(blank=True, null=True)),
('address_line_1', models.TextField(blank=True, null=True)),
('address_line_2', models.TextField(blank=True, null=True)),
('city', models.TextField(blank=True, null=True)),
('state', models.TextField(blank=True, null=True)),
('zip', models.TextField(blank=True, null=True)),
('zip4', models.TextField(blank=True, null=True)),
('country_code', models.TextField(blank=True, null=True)),
('congressional_district', models.TextField(blank=True, null=True)),
('business_types_codes', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), default=list, null=True, size=None)),
('entity_structure', models.TextField(blank=True, null=True)),
('broker_duns_id', models.TextField()),
('update_date', models.DateField()),
],
options={
'db_table': 'duns',
},
),
migrations.CreateModel(
name='HistoricParentDUNS',
fields=[
('awardee_or_recipient_uniqu', models.TextField()),
('legal_business_name', models.TextField(blank=True, null=True)),
('ultimate_parent_unique_ide', models.TextField(blank=True, null=True)),
('ultimate_parent_legal_enti', models.TextField(blank=True, null=True)),
('broker_historic_duns_id', models.IntegerField(primary_key=True, serialize=False)),
('year', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'historic_parent_duns',
},
),
migrations.CreateModel(
name='RecipientLookup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('recipient_hash', models.UUIDField(null=True, unique=True)),
('legal_business_name', models.TextField(db_index=True, null=True)),
('duns', models.TextField(null=True, unique=True)),
('parent_duns', models.TextField(null=True)),
('parent_legal_business_name', models.TextField(null=True)),
('address_line_1', models.TextField(null=True)),
('address_line_2', models.TextField(null=True)),
('city', models.TextField(null=True)),
('state', models.TextField(null=True)),
('zip5', models.TextField(null=True)),
('zip4', models.TextField(null=True)),
('country_code', models.TextField(null=True)),
('congressional_district', models.TextField(null=True)),
('business_types_codes', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), default=list, null=True, size=None)),
],
options={
'db_table': 'recipient_lookup',
},
),
migrations.CreateModel(
name='RecipientProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('recipient_level', models.CharField(max_length=1)),
('recipient_hash', models.UUIDField(db_index=True, null=True)),
('recipient_unique_id', models.TextField(db_index=True, null=True)),
('recipient_name', models.TextField(db_index=True, null=True)),
('recipient_affiliations', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), default=list, size=None)),
('award_types', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), default=list, size=None)),
('last_12_months', models.DecimalField(decimal_places=2, default=0.0, max_digits=23)),
('last_12_contracts', models.DecimalField(decimal_places=2, default=0.0, max_digits=23)),
('last_12_grants', models.DecimalField(decimal_places=2, default=0.0, max_digits=23)),
('last_12_direct_payments', models.DecimalField(decimal_places=2, default=0.0, max_digits=23)),
('last_12_loans', models.DecimalField(decimal_places=2, default=0.0, max_digits=23)),
('last_12_other', models.DecimalField(decimal_places=2, default=0.0, max_digits=23)),
('last_12_months_count', models.IntegerField(default=0)),
],
options={
'db_table': 'recipient_profile',
'managed': True,
},
),
migrations.CreateModel(
name='StateData',
fields=[
('id', models.TextField(primary_key=True, serialize=False)),
('fips', models.TextField(db_index=True)),
('code', models.TextField()),
('name', models.TextField()),
('type', models.TextField()),
('year', models.IntegerField(db_index=True)),
('population', models.BigIntegerField(blank=True, null=True)),
('pop_source', models.TextField(blank=True, null=True)),
('median_household_income', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)),
('mhi_source', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'state_data',
},
),
migrations.CreateModel(
name='SummaryAwardRecipient',
fields=[
('award_id', models.BigIntegerField(primary_key=True, serialize=False)),
('action_date', models.DateField(blank=True, db_index=True)),
('recipient_hash', models.UUIDField(db_index=True, null=True)),
('parent_recipient_unique_id', models.TextField(db_index=True, null=True)),
],
options={
'db_table': 'summary_award_recipient',
'managed': True,
},
),
migrations.AddIndex(
model_name='recipientprofile',
index=django.contrib.postgres.indexes.GinIndex(fields=['award_types'], name='recipient_p_award_t_211373_gin'),
),
migrations.AddIndex(
model_name='recipientprofile',
index=models.Index(fields=['recipient_unique_id'], name='recipient_p_recipie_7039a5_idx'),
),
migrations.AlterUniqueTogether(
name='recipientprofile',
unique_together=set([('recipient_hash', 'recipient_level')]),
),
migrations.AddIndex(
model_name='recipientlookup',
index=partial_index.PartialIndex(fields=['duns'], name='recipient_l_duns_bb057a_partial', unique=True, where=partial_index.PQ(duns__isnull=False)),
),
migrations.AddIndex(
model_name='recipientlookup',
index=partial_index.PartialIndex(fields=['parent_duns'], name='recipient_l_parent__efd6d5_partial', unique=False, where=partial_index.PQ(parent_duns__isnull=False)),
),
migrations.RunSQL(
sql=[
'alter table only recipient_profile alter column last_12_months set default 0.00',
"alter table only recipient_profile alter column recipient_affiliations set default '{}'::text[]",
'create index idx_recipient_profile_name on recipient_profile using gin (recipient_name gin_trgm_ops)',
],
),
]
|
discovery-provider/src/utils/redis_constants.py
|
atticwip/audius-protocol
| 429 |
98310
|
<gh_stars>100-1000
latest_block_redis_key = "latest_block_from_chain"
latest_block_hash_redis_key = "latest_blockhash_from_chain"
most_recent_indexed_block_redis_key = "most_recently_indexed_block_from_db"
most_recent_indexed_block_hash_redis_key = "most_recently_indexed_block_hash_from_db"
most_recent_indexed_ipld_block_redis_key = "most_recent_indexed_ipld_block_redis_key"
most_recent_indexed_ipld_block_hash_redis_key = (
"most_recent_indexed_ipld_block_hash_redis_key"
)
trending_tracks_last_completion_redis_key = "trending:tracks:last-completion"
trending_playlists_last_completion_redis_key = "trending-playlists:last-completion"
challenges_last_processed_event_redis_key = "challenges:last-processed-event"
user_balances_refresh_last_completion_redis_key = "user_balances:last-completion"
latest_sol_play_tx_key = "latest_sol_play_tx_key"
index_eth_last_completion_redis_key = "index_eth:last-completion"
|
tests/runtimes/local/agent_runtime_test.py
|
bbhunter/ostorlab
| 113 |
98340
|
<filename>tests/runtimes/local/agent_runtime_test.py
"""Unittest for agent runtime."""
import docker
from ostorlab.runtimes import definitions
from ostorlab.utils import defintions as utils_defintions
from ostorlab.agent import definitions as agent_definitions
from ostorlab.runtimes.local import agent_runtime
import ostorlab
def container_name_mock(name):
del name
return 'name'
def testCreateAgentService_whenAgentDefAndAgentSettingsAreNotEmpty_serviceCreatedwithAgentSettings(mocker):
"""Test creation of the agent service : Case where agent definitions & agent settings have different values for
some attributes, the agent settings values should override.
"""
agent_def = agent_definitions.AgentDefinition(name='agent_name_from_def',
mounts=['def_mount1', 'def_mount2'],
mem_limit=420000,
restart_policy='any',
open_ports=[utils_defintions.PortMapping(20000, 30000),
utils_defintions.PortMapping(20001, 30001)])
mocker.patch('ostorlab.runtimes.local.agent_runtime.AgentRuntime.create_agent_definition_from_label',
return_value=agent_def)
mocker.patch.object(ostorlab.runtimes.definitions.AgentSettings, 'container_image', property(container_name_mock))
mocker.patch('ostorlab.runtimes.local.agent_runtime.AgentRuntime.update_agent_settings', return_value=None)
mocker.patch('ostorlab.runtimes.local.agent_runtime.AgentRuntime.create_settings_config', return_value=None)
mocker.patch('ostorlab.runtimes.local.agent_runtime.AgentRuntime.create_definition_config', return_value=None)
create_service_mock = mocker.patch('docker.models.services.ServiceCollection.create', return_value=None)
docker_client = docker.from_env()
settings_open_ports = [utils_defintions.PortMapping(20000, 40000),
utils_defintions.PortMapping(20002, 40002)]
agent_settings = definitions.AgentSettings(key='agent/org/name',
mounts=['settings_mount1'],
mem_limit=700000,
restart_policy='on-failure',
constraints=['constraint1'],
open_ports=settings_open_ports)
runtime_agent = agent_runtime.AgentRuntime(
agent_settings, '42', docker_client, mq_service=None, redis_service=None)
runtime_agent.create_agent_service(network_name='test', extra_configs=[])
kwargs = create_service_mock.call_args.kwargs
#assert arguments were overridden by the agent settings.
assert kwargs['resources']['Limits']['MemoryBytes'] == 700000
assert kwargs['mounts'] == ['settings_mount1']
assert kwargs['endpoint_spec']['Ports'][0]['PublishedPort'] == 40000
assert kwargs['restart_policy']['Condition'] == 'on-failure'
def testCreateAgentService_whenAgentDefIsNotEmptyAndAgentSettingsIsEmpty_serviceCreatedwithAgentDef(mocker):
"""Test creation of the agent service : Case where agent settings values are empty,
the agent definition values should be used.
"""
agent_def = agent_definitions.AgentDefinition(name='agent_name_from_def',
mounts=['def_mount1', 'def_mount2'],
mem_limit=620000,
restart_policy='any',
open_ports=[utils_defintions.PortMapping(20000, 30000),
utils_defintions.PortMapping(20001, 30001)])
mocker.patch('ostorlab.runtimes.local.agent_runtime.AgentRuntime.create_agent_definition_from_label',
return_value=agent_def)
mocker.patch.object(ostorlab.runtimes.definitions.AgentSettings, 'container_image', property(container_name_mock))
mocker.patch('ostorlab.runtimes.local.agent_runtime.AgentRuntime.update_agent_settings', return_value=None)
mocker.patch('ostorlab.runtimes.local.agent_runtime.AgentRuntime.create_settings_config', return_value=None)
mocker.patch('ostorlab.runtimes.local.agent_runtime.AgentRuntime.create_definition_config', return_value=None)
create_service_mock = mocker.patch('docker.models.services.ServiceCollection.create', return_value=None)
docker_client = docker.from_env()
agent_settings = definitions.AgentSettings(key='agent/org/name')
runtime_agent = agent_runtime.AgentRuntime(
agent_settings, '42', docker_client, mq_service=None, redis_service=None)
runtime_agent.create_agent_service(network_name='test', extra_configs=[])
kwargs = create_service_mock.call_args.kwargs
#assert arguments from agent definition were used.
assert kwargs['resources']['Limits']['MemoryBytes'] == 620000
assert kwargs['mounts'] == ['def_mount1', 'def_mount2']
assert kwargs['endpoint_spec']['Ports'][0]['PublishedPort'] == 30000
assert kwargs['restart_policy']['Condition'] == 'any'
|
tests/test_gmplot.py
|
Monti03/gmplot
| 606 |
98348
|
import unittest
import warnings
from gmplot.utility import StringIO, _format_LatLng
from gmplot.writer import _Writer
from gmplot.drawables.route import _Route
from gmplot.google_map_plotter import GoogleMapPlotter
class GMPlotTest(unittest.TestCase):
def test_format_LatLng(self):
self.assertEqual(_format_LatLng(45.123456, -80.987654, 6), 'new google.maps.LatLng(45.123456, -80.987654)')
self.assertEqual(_format_LatLng(45.123456, -80.987654, 4), 'new google.maps.LatLng(45.1235, -80.9877)')
self.assertEqual(_format_LatLng(45.1, -80.9, 3), 'new google.maps.LatLng(45.100, -80.900)')
# Note: This test only ensures that Route's functions can be called without failing,
# it doesn't test if the resulting output can actually be rendered properly in a browser.
class RouteTest(unittest.TestCase):
def test_write(self):
route = _Route((37.770776,-122.461689), (37.780776,-122.461689), 6)
with StringIO() as f:
with _Writer(f) as writer:
route.write(writer)
def test_write_waypoints(self):
route = _Route((37.770776,-122.461689), (37.780776,-122.461689), 6, waypoints=[(37.431257,-122.133121)])
with StringIO() as f:
with _Writer(f) as writer:
route.write(writer)
# Note: This test only ensures that GoogleMapPlotter's functions can be called without failing,
# it doesn't test if the resulting map can actually be rendered properly in a browser.
class GoogleMapPlotterTest(unittest.TestCase):
PATH_1 = [(37.429, 37.428, 37.427, 37.427, 37.427),
(-122.145, -122.145, -122.145, -122.146, -122.146)]
PATH_2 = [[i+.01 for i in PATH_1[0]], [i+.02 for i in PATH_1[1]]]
PATH_3 = [(37.433302, 37.431257, 37.427644, 37.430303), (-122.14488, -122.133121, -122.137799, -122.148743)]
PATH_4 = [(37.423074, 37.422700, 37.422410, 37.422188, 37.422274, 37.422495, 37.422962, 37.423552, 37.424387, 37.425920, 37.425937),
(-122.150288, -122.149794, -122.148936, -122.148142, -122.146747, -122.14561, -122.144773, -122.143936, -122.142992, -122.147863, -122.145953)]
def test_get(self):
bounds = {'north':37.832285, 'south': 37.637336, 'west': -122.520364, 'east': -122.346922}
map = GoogleMapPlotter(37.428, -122.145, 16, fit_bounds=bounds)
# Test marker:
map.marker(37.427, -122.145, color="yellow")
map.marker(37.428, -122.146, color="cornflowerblue")
map.marker(37.429, -122.144, color="k", title='Here')
map.marker(37.430, -122.142, color="red", label='A')
# Test circle:
map.circle(37.429, -122.145, 100, color="#FF0000", ew=2)
# Test plot:
map.plot(self.PATH_1[0], self.PATH_1[1], color="plum", edge_width=10)
map.plot(self.PATH_2[0], self.PATH_2[1], color="red")
# Test directions:
map.directions((37.770776,-122.461689), (37.780776,-122.461689), waypoints=[(37.431257,-122.133121)])
# Test polygon:
map.polygon(self.PATH_3[0], self.PATH_3[1], edge_color="cyan", edge_width=5, face_color="blue", face_alpha=0.1)
# Test heatmap:
map.heatmap(self.PATH_4[0], self.PATH_4[1], radius=40, weights=[1, 1, 1, 0.5, 0.5, 0.5, 1, 1, 1, 2, 2])
map.heatmap(self.PATH_3[0], self.PATH_3[1], radius=40, dissipating=False, gradient=[(30,30,30,0), (30,30,30,1), (50, 50, 50, 1)])
# Test scatter:
map.scatter(self.PATH_3[0], self.PATH_3[1], c='r', marker=[True, False, False, True])
map.scatter(self.PATH_4[0], self.PATH_4[1], size=[1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2], symbol='x')
map.scatter(self.PATH_4[0], self.PATH_4[1], s=90, marker=False, alpha=0.9, symbol='+', c='red', edge_width=4)
map.scatter(self.PATH_3[0], self.PATH_3[1],
color=['r','g','b','k'],
precision=[1,2,3,4],
marker=[True, True, False, True],
title=['First', 'Second', 'Third', 'Fourth'],
label=['A','B','C','D'],
size=[10,20,30,40],
symbol=['+','o','x','x']
)
# Test ground overlay:
bounds_dict = {'north':37.832285, 'south': 37.637336, 'west': -122.520364, 'east': -122.346922}
map.ground_overlay('http://explore.museumca.org/creeks/images/TopoSFCreeks.jpg', bounds_dict, opacity=0.5)
map.get()
def test_scatter_length_mismatch(self):
map = GoogleMapPlotter(37.428, -122.145, 16)
with self.assertRaises(ValueError):
map.scatter(self.PATH_3[0], self.PATH_3[1],
color=['r','g','b'],
precision=[1,2],
marker=[True],
title=['First', 'Second'],
label=['A','B','C','D','E'],
size=[10,20],
symbol=['+','o','x','x','o']
)
def test_invalid_symbol(self):
map = GoogleMapPlotter(37.428, -122.145, 16)
with self.assertRaises(KeyError):
map.scatter(self.PATH_4[0], self.PATH_4[1], s=90, marker=False, alpha=0.9, symbol='z', c='red', edge_width=4)
map.get()
def test_grid(self):
map = GoogleMapPlotter(37.428, -122.145, 16)
bounds = {'north': 37.43, 'south': 37.42, 'east': -122.14, 'west': -122.15}
map.grid(bounds, 0.001, 0.001)
map.get()
def test_map_styles(self):
map_styles = [
{
'featureType': 'all',
'stylers': [
{'saturation': -80},
{'lightness': 60},
]
}
]
map = GoogleMapPlotter(37.428, -122.145, 16, map_type='satellite', map_styles=map_styles, tilt=0, scale_control=True)
map.get()
def test_unsupported_marker_color(self):
map = GoogleMapPlotter(37.428, -122.145, 16)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
map.marker(37.428, -122.146, color="#123456") # (valid but unsupported color)
self.assertEqual(len(w), 1, "'get()' should raise a single warning")
self.assertTrue(issubclass(w[-1].category, UserWarning), "'get()' should raise a 'UserWarning'")
map.get()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.