max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/unit/test_parse_env_file.py
|
Parnassius/poethepoet
| 446 |
85885
|
from poethepoet.envfile import parse_env_file
import pytest
valid_examples = [
(
"""
# empty
""",
{},
),
(
"""
# single word values
WORD=something
WORD_WITH_HASH=some#thing
NUMBER=0
EMOJI=😃😃
DOUBLE_QUOTED_WORD="something"
SINGLE_QUOTED_WORD='something'
""",
{
"WORD": "something",
"WORD_WITH_HASH": "some#thing",
"NUMBER": "0",
"EMOJI": "😃😃",
"DOUBLE_QUOTED_WORD": "something",
"SINGLE_QUOTED_WORD": "something",
},
),
(
"""
# multiword values
WORD=some\\ thing # and trailing comments
DOUBLE_QUOTED_WORD="some thing"
SINGLE_QUOTED_WORD='some thing'
""",
{
"WORD": r"some thing",
"DOUBLE_QUOTED_WORD": "some thing",
"SINGLE_QUOTED_WORD": "some thing",
},
),
(
"""
# values with line breaks
WORD=some\\
thing
DOUBLE_QUOTED_WORD="some
thing"
SINGLE_QUOTED_WORD='some
thing'
""",
{
"WORD": "some\nthing",
"DOUBLE_QUOTED_WORD": "some\n thing",
"SINGLE_QUOTED_WORD": "some\n thing",
},
),
(
"""
# without linebreak between vars
FOO=BAR BAR=FOO
""",
{"FOO": "BAR", "BAR": "FOO"},
),
(
"""
# with semicolons
; FOO=BAR;BAR=FOO ;
;
BAZ="2;'2"#;
\tQUX=3\t;
""",
{"FOO": "BAR", "BAR": "FOO", "BAZ": "2;'2#", "QUX": "3"},
),
(
r"""
# with extra backslashes
FOO=a\\\ b
BAR='a\\\ b'
BAZ="a\\\ b"
""",
{"FOO": r"a\ b", "BAR": r"a\\\ b", "BAZ": r"a\ b"},
),
( # a value with many parts and some empty vars
r"""FOO=a\\\ b'a\\\ b'"a\\\ b"#"#"'\'' ;'#;\t
BAR=
BAZ= # still empty
QUX=""",
{"FOO": r"a\ ba\\\ ba\ b##\ ;#", "BAR": "", "BAZ": "", "QUX": ""},
),
# export keyword is allowed
(
"""export answer=42
export \t question=undefined
export\tdinner=chicken
""",
{"answer": "42", "question": "undefined", "dinner": "chicken"},
),
]
invalid_examples = [
"foo = bar",
"foo =bar",
"foo= bar",
"foo\t=\tbar",
"foo\t=bar",
"foo=\tbar",
"foo= 'bar",
'foo= "bar"',
"foo",
"foo;",
"8oo=bar",
"foo@=bar",
'"foo@"=bar',
"'foo@'=bar",
r"foo\=bar",
r"foo\==bar",
r"export;foo=bar",
r"export\nfoo=bar",
]
@pytest.mark.parametrize("example", valid_examples)
def test_parse_valid_env_files(example):
assert parse_env_file(example[0]) == example[1]
@pytest.mark.parametrize("example", invalid_examples)
def test_parse_invalid_env_files(example):
with pytest.raises(ValueError):
parse_env_file(example)
|
django/django-pyodbc-azure/DjangoWebProject1/__init__.py
|
garotogordo/azure-sql-database-samples
| 674 |
85892
|
"""
Package for DjangoWebProject1.
"""
|
alg/interpole/diag-bias/main-offpoirl.py
|
DaraOrange/mlforhealthlabpub
| 171 |
85898
|
<filename>alg/interpole/diag-bias/main-offpoirl.py
import argparse
import dill
import jax
import numpy as np
import pomdp
pomdp.horizon = 25
parser = argparse.ArgumentParser()
parser.add_argument('--silent', action='store_true')
parser.add_argument('--cont', action='store_true')
parser.add_argument('--bias', action='store_true')
args = parser.parse_args()
key = jax.random.PRNGKey(0)
with open('data/data{}-meta.obj'.format('-bias' if args.bias else ''), 'rb') as f:
data_meta = dill.load(f)
S = data_meta['S']
A = data_meta['A']
Z = data_meta['Z']
with open('data/data{}.obj'.format('-bias' if args.bias else ''), 'rb') as f:
data = dill.load(f)
def log_pi(alp, bet, b):
res = np.zeros(A)
for a in range(A):
if alp[a].size == 0:
res[a] = -1e6
else:
res[a] = bet * (alp[a] @ b).max()
return res - np.log(np.sum(np.exp(res)))
def likelihood(b0, T, O, alp):
res = 0
for traj in data:
b = b0
for a, z in zip(traj['a'], traj['z']):
res += log_pi(alp, 10, b)[a]
b = O[a,:,z] * (T[:,a,:].T @ b)
b /= b.sum()
return res
if args.cont:
with open(args.output, 'rb') as f:
res = dill.load(f)
key = res['key']
b0, T, O, R = res['out'][-1]
else:
res = dict()
res['out'] = list()
key, *subkey = jax.random.split(key, 4)
b0 = np.array(jax.random.dirichlet(subkey[0], np.ones(S)))
T = np.array(jax.random.dirichlet(subkey[1], np.ones((S,A,S)), shape=(S,A)))
O = np.array(jax.random.dirichlet(subkey[1], np.ones((A,S,Z)), shape=(A,S)))
###
T = np.array([[[1,0],[1,0],[1,0]],[[0,1],[0,1],[0,1]]])
O[:2,...] = np.array([[[1,0],[1,0]],[[0,1],[0,1]]])
###
key, subkey = jax.random.split(key)
R = np.array([[1,-1.5,0], [-1.5,1,0]]) * .25
R += .001 * np.array(jax.random.normal(subkey, shape=(S,A)))
alp = pomdp.solve(S, A, Z, b0, T, O, R)
like = likelihood(b0, T, O, alp)
rtio = 0
rtio_n = 0
for i in range(len(res['out']), 1000):
_b0, _T, _O, _R = b0, T, O, R
key, subkey = jax.random.split(key)
if jax.random.choice(subkey, [True, False]):
for traj in data:
alp = [None] * (traj['tau']+1)
alp[0] = b0
for t in range(traj['tau']):
alp[t+1] = O[traj['a'][t],:,traj['z'][t]] * (T[:,traj['a'][t],:].T @ alp[t])
alp[t+1] /= alp[t+1].sum()
bet = [None] * (traj['tau']+1)
bet[-1] = np.ones(S)
for t in reversed(range(traj['tau'])):
bet[t] = T[:,traj['a'][t],:] @ (O[traj['a'][t],:,traj['z'][t]] * bet[t+1])
bet[t] /= bet[t].sum()
gmm = [None] * (traj['tau']+1)
for t in range(traj['tau']+1):
gmm[t] = alp[t] * bet[t]
gmm[t] /= gmm[t].sum()
traj['s'] = [None] * (traj['tau']+1)
for t in range(traj['tau']+1):
key, subkey = jax.random.split(key)
traj['s'][t] = jax.random.choice(subkey, range(S), p=gmm[t])
dir_b0 = np.ones(b0.shape)
dir_T = np.ones(T.shape)
dir_O = np.ones(O.shape)
for traj in data:
dir_b0[traj['s'][0]] += 1
for t in range(traj['tau']):
dir_T[traj['s'][t],traj['a'][t],traj['s'][t+1]] += 1
for t in range(traj['tau']):
dir_O[traj['a'][t],traj['s'][t+1],traj['z'][t]] += 1
###
key, subkey = jax.random.split(key)
_b0 = np.array(jax.random.dirichlet(subkey, dir_b0))
if args.bias:
_b0 = np.array([.5,.5])
_T = np.array([[[1,0],[1,0],[1,0]],[[0,1],[0,1],[0,1]]])
_O = np.array([[[1,0],[1,0]],[[0,1],[0,1]],[[.5,.5],[.5,.5]]])
for s in range(S):
key, subkey = jax.random.split(key)
_O[2,s,:] = np.array(jax.random.dirichlet(subkey, dir_O[2,s,:]))
else:
key, subkey = jax.random.split(key)
_R = R + .001 * np.array(jax.random.normal(subkey, shape=(S,A)))
_alp = pomdp.solve(S, A, Z, _b0, _T, _O, _R)
_like = likelihood(_b0, _T, _O, _alp)
key, subkey = jax.random.split(key)
unif = jax.random.uniform(subkey)
if np.log(unif) < _like - like:
b0, T, O, R = _b0, _T, _O, _R
like = _like
rtio += 1 if like == _like else 0
rtio_n += 1
if not args.silent:
print('i = {}, like = {}, {} ({})'.format(i, like, '*' if like == _like else '-', rtio / rtio_n))
res['key'] = key
res['out'].append((b0, T, O, R))
if (i+1) % 100 == 0:
with open('res/res{}-offpoirl.obj'.format('-bias' if args.bias else ''), 'wb') as f:
dill.dump(res, f)
with open('res/res{}-offpoirl.obj'.format('-bias' if args.bias else ''), 'wb') as f:
dill.dump(res, f)
|
tests/dataframe_protocol_test.py
|
sethvargo/vaex
| 337 |
85922
|
<reponame>sethvargo/vaex
import numpy as np
import pyarrow as pa
import pytest
from typing import Any, Optional, Tuple, Dict, Iterable, Sequence
DataFrameObject = Any
ColumnObject = Any
import vaex
from common import *
from vaex.dataframe_protocol import _from_dataframe_to_vaex, _DtypeKind, _VaexBuffer, _VaexColumn, _VaexDataFrame
def test_float_only(df_factory):
df = df_factory(x=[1.5, 2.5, 3.5], y=[9.2, 10.5, 11.8])
df2 = _from_dataframe_to_vaex(df.__dataframe__())
assert df2.x.tolist() == df.x.tolist()
assert df2.y.tolist() == df.y.tolist()
assert df2.__dataframe__().get_column_by_name("x").null_count == 0
assert df2.__dataframe__().get_column_by_name("y").null_count == 0
assert_dataframe_equal(df.__dataframe__(), df)
def test_mixed_intfloat(df_factory):
df = df_factory(x=[1, 2, 0], y=[9.2, 10.5, 11.8])
df2 = _from_dataframe_to_vaex(df.__dataframe__())
assert df2.x.tolist() == df.x.tolist()
assert df2.y.tolist() == df.y.tolist()
assert df2.__dataframe__().get_column_by_name("x").null_count == 0
assert df2.__dataframe__().get_column_by_name("y").null_count == 0
assert_dataframe_equal(df.__dataframe__(), df)
def test_mixed_intfloatbool(df_factory):
df = df_factory(x=np.array([True, True, False]), y=np.array([1, 2, 0]), z=np.array([9.2, 10.5, 11.8]))
df2 = _from_dataframe_to_vaex(df.__dataframe__())
assert df2.x.tolist() == df.x.tolist()
assert df2.y.tolist() == df.y.tolist()
assert df2.z.tolist() == df.z.tolist()
assert df2.__dataframe__().get_column_by_name("x").null_count == 0
assert df2.__dataframe__().get_column_by_name("y").null_count == 0
assert df2.__dataframe__().get_column_by_name("z").null_count == 0
# Additionl tests for _VaexColumn
assert df2.__dataframe__().get_column_by_name("x")._allow_copy == True
assert df2.__dataframe__().get_column_by_name("x").size == 3
assert df2.__dataframe__().get_column_by_name("x").offset == 0
assert df2.__dataframe__().get_column_by_name("z").dtype[0] == 2 # 2: float64
assert df2.__dataframe__().get_column_by_name("z").dtype[1] == 64 # 64: float64
assert df2.__dataframe__().get_column_by_name("z").dtype == (2, 64, "<f8", "=")
with pytest.raises(TypeError):
assert df2.__dataframe__().get_column_by_name("y").describe_categorical
if df2['y'].dtype.is_arrow:
assert df2.__dataframe__().get_column_by_name("y").describe_null == (3, 0)
else:
assert df2.__dataframe__().get_column_by_name("y").describe_null == (0, None)
assert_dataframe_equal(df.__dataframe__(), df)
def test_mixed_missing(df_factory_arrow):
df = df_factory_arrow(x=np.array([True, None, False, None, True]), y=np.array([None, 2, 0, 1, 2]), z=np.array([9.2, 10.5, None, 11.8, None]))
df2 = _from_dataframe_to_vaex(df.__dataframe__())
assert df.__dataframe__().metadata == df2.__dataframe__().metadata
assert df["x"].tolist() == df2["x"].tolist()
assert not df2["x"].is_masked
assert df2.__dataframe__().get_column_by_name("x").null_count == 2
assert df["x"].dtype == df2["x"].dtype
assert df["y"].tolist() == df2["y"].tolist()
assert not df2["y"].is_masked
assert df2.__dataframe__().get_column_by_name("y").null_count == 1
assert df["y"].dtype == df2["y"].dtype
assert df["z"].tolist() == df2["z"].tolist()
assert not df2["z"].is_masked
assert df2.__dataframe__().get_column_by_name("z").null_count == 2
assert df["z"].dtype == df2["z"].dtype
assert_dataframe_equal(df.__dataframe__(), df)
def test_missing_from_masked(df_factory_numpy):
df = df_factory_numpy(
x=np.ma.array([1, 2, 3, 4, 0], mask=[0, 0, 0, 1, 1], dtype=int),
y=np.ma.array([1.5, 2.5, 3.5, 4.5, 0], mask=[False, True, True, True, False], dtype=float),
z=np.ma.array([True, False, True, True, True], mask=[1, 0, 0, 1, 0], dtype=bool),
)
df2 = _from_dataframe_to_vaex(df.__dataframe__())
assert df.__dataframe__().metadata == df2.__dataframe__().metadata
assert df["x"].tolist() == df2["x"].tolist()
assert not df2["x"].is_masked
assert df2.__dataframe__().get_column_by_name("x").null_count == 2
assert df["x"].dtype == df2["x"].dtype
assert df["y"].tolist() == df2["y"].tolist()
assert not df2["y"].is_masked
assert df2.__dataframe__().get_column_by_name("y").null_count == 3
assert df["y"].dtype == df2["y"].dtype
assert df["z"].tolist() == df2["z"].tolist()
assert not df2["z"].is_masked
assert df2.__dataframe__().get_column_by_name("z").null_count == 2
assert df["z"].dtype == df2["z"].dtype
assert_dataframe_equal(df.__dataframe__(), df)
def test_categorical():
df = vaex.from_arrays(year=[2012, 2013, 2015, 2019], weekday=[0, 1, 4, 6])
df = df.categorize("year", min_value=2012, max_value=2019)
df = df.categorize("weekday", labels=["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"])
# Some detailed testing for correctness of dtype and null handling:
col = df.__dataframe__().get_column_by_name("year")
assert col.dtype[0] == _DtypeKind.CATEGORICAL
assert col.describe_categorical == (False, True, {0: 2012, 1: 2013, 2: 2014, 3: 2015, 4: 2016, 5: 2017, 6: 2018, 7: 2019})
assert col.describe_null == (0, None)
assert col.dtype == (23, 64, "u", "=")
col2 = df.__dataframe__().get_column_by_name("weekday")
assert col2.dtype[0] == _DtypeKind.CATEGORICAL
assert col2.describe_categorical == (False, True, {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu", 4: "Fri", 5: "Sat", 6: "Sun"})
assert col2.describe_null == (0, None)
assert col2.dtype == (23, 64, "u", "=")
df2 = _from_dataframe_to_vaex(df.__dataframe__())
assert df2["year"].tolist() == [2012, 2013, 2015, 2019]
assert df2["weekday"].tolist() == ["Mon", "Tue", "Fri", "Sun"]
assert_dataframe_equal(df.__dataframe__(), df)
def test_arrow_dictionary():
indices = pa.array([0, 1, 0, 1, 2, 0, 1, 2])
dictionary = pa.array(["foo", "bar", "baz"])
dict_array = pa.DictionaryArray.from_arrays(indices, dictionary)
df = vaex.from_arrays(x=dict_array)
# Some detailed testing for correctness of dtype and null handling:
col = df.__dataframe__().get_column_by_name("x")
assert col.dtype[0] == _DtypeKind.CATEGORICAL
assert col.describe_categorical == (False, True, {0: "foo", 1: "bar", 2: "baz"})
if df['x'].dtype.is_arrow:
assert col.describe_null == (3, 0)
else:
assert col.describe_null == (0, None)
assert col.dtype == (23, 64, "u", "=")
df2 = _from_dataframe_to_vaex(df.__dataframe__())
assert df2.x.tolist() == df.x.tolist()
assert df2.__dataframe__().get_column_by_name("x").null_count == 0
assert_dataframe_equal(df.__dataframe__(), df)
def test_arrow_dictionary_missing():
indices = pa.array([0, 1, 2, 0, 1], mask=np.array([0, 1, 1, 0, 0], dtype=bool))
dictionary = pa.array(["aap", "noot", "mies"])
dict_array = pa.DictionaryArray.from_arrays(indices, dictionary)
df = vaex.from_arrays(x=dict_array)
# Some detailed testing for correctness of dtype and null handling:
col = df.__dataframe__().get_column_by_name("x")
assert col.dtype[0] == _DtypeKind.CATEGORICAL
assert col.describe_categorical == (False, True, {0: "aap", 1: "noot", 2: "mies"})
df2 = _from_dataframe_to_vaex(df.__dataframe__())
assert df2.x.tolist() == df.x.tolist()
assert df2.__dataframe__().get_column_by_name("x").null_count == 2
assert df["x"].dtype.index_type == df2["x"].dtype.index_type
assert_dataframe_equal(df.__dataframe__(), df)
def test_string():
df = vaex.from_dict({"A": ["a", None, "cdef", "", "g"]})
col = df.__dataframe__().get_column_by_name("A")
assert col._col.tolist() == df.A.tolist()
assert col.size == 5
assert col.null_count == 1
assert col.dtype[0] == _DtypeKind.STRING
assert col.describe_null == (3,0)
df2 = _from_dataframe_to_vaex(df.__dataframe__())
assert df2.A.tolist() == df.A.tolist()
assert df2.__dataframe__().get_column_by_name("A").null_count == 1
assert df2.__dataframe__().get_column_by_name("A").describe_null == (3,0)
assert df2.__dataframe__().get_column_by_name("A").dtype[0] == _DtypeKind.STRING
df_sliced = df[1:]
col = df_sliced.__dataframe__().get_column_by_name("A")
assert col.size == 4
assert col.null_count == 1
assert col.dtype[0] == _DtypeKind.STRING
assert col.describe_null == (3,0)
df2 = _from_dataframe_to_vaex(df_sliced.__dataframe__())
assert df2.A.tolist() == df_sliced.A.tolist()
assert df2.__dataframe__().get_column_by_name("A").null_count == 1
assert df2.__dataframe__().get_column_by_name("A").describe_null == (3,0)
assert df2.__dataframe__().get_column_by_name("A").dtype[0] == _DtypeKind.STRING
def test_no_mem_copy():
strings = ["a", "", "cdef", "", "g"]
# data for above string array
dbuf = np.array([ 97, 99, 100, 101, 102, 103], dtype='uint8')
obuf = np.array([0, 1, 1, 5, 5, 6], dtype='int64')
length = 5
buffers = [None, pa.py_buffer(obuf), pa.py_buffer(dbuf)]
s = pa.Array.from_buffers(pa.large_utf8(), length, buffers)
x = np.arange(0, 5)
df = vaex.from_arrays(x=x, s=s)
df2 = _from_dataframe_to_vaex(df.__dataframe__())
# primitive data
x[0] = 999
assert df2.x.tolist() == [999, 1, 2, 3, 4]
# strings
assert df.s.tolist() == strings
assert df2.s.tolist() == strings
# mutate the buffer data (which actually arrow and vaex both don't support/want)
strings[0] = "b"
dbuf[0] += 1
assert df.s.tolist() == strings
assert df2.s.tolist() == strings
def test_object():
df = vaex.from_arrays(x=np.array([None, True, False]))
col = df.__dataframe__().get_column_by_name("x")
assert col._col.tolist() == df.x.tolist()
assert col.size == 3
with pytest.raises(ValueError):
assert col.dtype
with pytest.raises(ValueError):
assert col.describe_null
def test_virtual_column():
df = vaex.from_arrays(x=np.array([True, True, False]), y=np.array([1, 2, 0]), z=np.array([9.2, 10.5, 11.8]))
df.add_virtual_column("r", "sqrt(y**2 + z**2)")
df2 = _from_dataframe_to_vaex(df.__dataframe__())
assert df2.r.tolist() == df.r.tolist()
def test_VaexBuffer():
x = np.ndarray(shape=(5,), dtype=float, order="F")
x_buffer = _VaexBuffer(x)
assert x_buffer.bufsize == 5 * x.itemsize
assert x_buffer.ptr == x.__array_interface__["data"][0]
assert x_buffer.__dlpack_device__() == (1, None)
assert x_buffer.__repr__() == f"VaexBuffer({{'bufsize': {5*x.itemsize}, 'ptr': {x.__array_interface__['data'][0]}, 'device': 'CPU'}})"
with pytest.raises(NotImplementedError):
assert x_buffer.__dlpack__()
def test_VaexDataFrame():
df = vaex.from_arrays(x=np.array([True, True, False]), y=np.array([1, 2, 0]), z=np.array([9.2, 10.5, 11.8]))
df2 = df.__dataframe__()
assert df2._allow_copy == True
assert df2.num_columns() == 3
assert df2.num_rows() == 3
assert df2.num_chunks() == 1
assert df2.column_names() == ["x", "y", "z"]
assert df2.get_column(0)._col.tolist() == df.x.tolist()
assert df2.get_column_by_name("y")._col.tolist() == df.y.tolist()
for col in df2.get_columns():
assert col._col.tolist() == df[col._col.expression].tolist()
assert df2.select_columns((0, 2))._df[:, 0].tolist() == df2.select_columns_by_name(("x", "z"))._df[:, 0].tolist()
assert df2.select_columns((0, 2))._df[:, 1].tolist() == df2.select_columns_by_name(("x", "z"))._df[:, 1].tolist()
def test_chunks(df_factory):
x = np.arange(10)
df = df_factory(x=x)
df2 = df.__dataframe__()
chunk_iter = iter(df2.get_chunks(3))
chunk = next(chunk_iter)
assert chunk.num_rows() == 4
chunk = next(chunk_iter)
assert chunk.num_rows() == 4
chunk = next(chunk_iter)
assert chunk.num_rows() == 2
with pytest.raises(StopIteration):
chunk = next(chunk_iter)
def assert_buffer_equal(buffer_dtype: Tuple[_VaexBuffer, Any], vaexcol: vaex.expression.Expression):
buf, dtype = buffer_dtype
pytest.raises(NotImplementedError, buf.__dlpack__)
assert buf.__dlpack_device__() == (1, None)
assert dtype[1] == vaexcol.dtype.index_type.numpy.itemsize * 8
if not isinstance(vaexcol.values, np.ndarray) and isinstance(vaexcol.values.type, pa.DictionaryType):
assert dtype[2] == vaexcol.index_values().dtype.numpy.str
else:
assert dtype[2] == vaexcol.dtype.numpy.str
def assert_column_equal(col: _VaexColumn, vaexcol: vaex.expression.Expression):
assert col.size == vaexcol.df.count("*")
assert col.offset == 0
assert col.null_count == vaexcol.countmissing()
assert_buffer_equal(col._get_data_buffer(), vaexcol)
def assert_dataframe_equal(dfo: DataFrameObject, df: vaex.dataframe.DataFrame):
assert dfo.num_columns() == len(df.columns)
assert dfo.num_rows() == len(df)
assert dfo.column_names() == list(df.get_column_names())
for col in df.get_column_names():
assert_column_equal(dfo.get_column_by_name(col), df[col])
|
spikeinterface/comparison/groundtruthcomparison.py
|
khl02007/spikeinterface
| 116 |
85936
|
import pandas as pd
import numpy as np
from .basecomparison import BaseTwoSorterComparison
from .comparisontools import (do_score_labels, make_possible_match,
make_best_match, make_hungarian_match, do_confusion_matrix, do_count_score,
compute_performance)
class GroundTruthComparison(BaseTwoSorterComparison):
"""
Compares a sorter to a ground truth.
This class can:
* compute a "match between gt_sorting and tested_sorting
* compute optionally the score label (TP, FN, CL, FP) for each spike
* count by unit of GT the total of each (TP, FN, CL, FP) into a Dataframe
GroundTruthComparison.count
* compute the confusion matrix .get_confusion_matrix()
* compute some performance metric with several strategy based on
the count score by unit
* count well detected units
* count false positive detected units
* count redundant units
* count overmerged units
* summary all this
Parameters
----------
gt_sorting: SortingExtractor
The first sorting for the comparison
tested_sorting: SortingExtractor
The second sorting for the comparison
gt_name: str
The name of sorter 1
tested_name: : str
The name of sorter 2
delta_time: float
Number of ms to consider coincident spikes (default 0.4 ms) match_score: float
Minimum agreement score to match units (default 0.5)
chance_score: float
Minimum agreement score to for a possible match (default 0.1)
redundant_score: float
Agreement score above which units are redundant (default 0.2)
overmerged_score: float
Agreement score above which units can be overmerged (default 0.2)
well_detected_score: float
Agreement score above which units are well detected (default 0.8)
exhaustive_gt: bool (default True)
Tell if the ground true is "exhaustive" or not. In other world if the
GT have all possible units. It allows more performance measurement.
For instance, MEArec simulated dataset have exhaustive_gt=True
match_mode: 'hungarian', or 'best'
What is match used for counting : 'hungarian' or 'best match'.
n_jobs: int
Number of cores to use in parallel. Uses all available if -1
compute_labels: bool
If True, labels are computed at instantiation (default False)
compute_misclassifications: bool
If True, misclassifications are computed at instantiation (default False)
verbose: bool
If True, output is verbose
Returns
-------
sorting_comparison: SortingComparison
The SortingComparison object
"""
def __init__(self, gt_sorting, tested_sorting, gt_name=None, tested_name=None,
delta_time=0.4, sampling_frequency=None, match_score=0.5, well_detected_score=0.8,
redundant_score=0.2, overmerged_score=0.2, chance_score=0.1, exhaustive_gt=False, n_jobs=-1,
match_mode='hungarian', compute_labels=False, compute_misclassifications=False, verbose=False):
if gt_name is None:
gt_name = 'ground truth'
if tested_name is None:
tested_name = 'tested'
BaseTwoSorterComparison.__init__(self, gt_sorting, tested_sorting, sorting1_name=gt_name,
sorting2_name=tested_name, delta_time=delta_time,
match_score=match_score, # sampling_frequency=sampling_frequency,
chance_score=chance_score, n_jobs=n_jobs,
verbose=verbose)
self.exhaustive_gt = exhaustive_gt
self._compute_misclassifications = compute_misclassifications
self.redundant_score = redundant_score
self.overmerged_score = overmerged_score
self.well_detected_score = well_detected_score
assert match_mode in ['hungarian', 'best']
self.match_mode = match_mode
self._compute_labels = compute_labels
self._do_count()
self._labels_st1 = None
self._labels_st2 = None
if self._compute_labels:
self._do_score_labels()
# confusion matrix is compute on demand
self._confusion_matrix = None
def get_labels1(self, unit_id):
if self._labels_st1 is None:
self._do_score_labels()
if unit_id in self.sorting1.get_unit_ids():
return self._labels_st1[unit_id]
else:
raise Exception("Unit_id is not a valid unit")
def get_labels2(self, unit_id):
if self._labels_st1 is None:
self._do_score_labels()
if unit_id in self.sorting2.get_unit_ids():
return self._labels_st2[unit_id]
else:
raise Exception("Unit_id is not a valid unit")
def _do_matching(self):
if self._verbose:
print("Matching...")
self.possible_match_12, self.possible_match_21 = make_possible_match(self.agreement_scores, self.chance_score)
self.best_match_12, self.best_match_21 = make_best_match(self.agreement_scores, self.chance_score)
self.hungarian_match_12, self.hungarian_match_21 = make_hungarian_match(self.agreement_scores,
self.match_score)
def _do_count(self):
"""
Do raw count into a dataframe.
Internally use hungarian match or best match.
"""
if self.match_mode == 'hungarian':
match_12 = self.hungarian_match_12
elif self.match_mode == 'best':
match_12 = self.best_match_12
self.count_score = do_count_score(self.event_counts1, self.event_counts2,
match_12, self.match_event_count)
def _do_confusion_matrix(self):
if self._verbose:
print("Computing confusion matrix...")
if self.match_mode == 'hungarian':
match_12 = self.hungarian_match_12
elif self.match_mode == 'best':
match_12 = self.best_match_12
self._confusion_matrix = do_confusion_matrix(self.event_counts1, self.event_counts2, match_12,
self.match_event_count)
def get_confusion_matrix(self):
"""
Computes the confusion matrix.
Returns
-------
confusion_matrix: pandas.DataFrame
The confusion matrix
"""
if self._confusion_matrix is None:
self._do_confusion_matrix()
return self._confusion_matrix
def _do_score_labels(self):
assert self.match_mode == 'hungarian', \
'Labels (TP, FP, FN) can be computed only with hungarian match'
if self._verbose:
print("Adding labels...")
self._labels_st1, self._labels_st2 = do_score_labels(self.sorting1, self.sorting2,
self.delta_frames, self.hungarian_match_12,
self._compute_misclassifications)
def get_performance(self, method='by_unit', output='pandas'):
"""
Get performance rate with several method:
* 'raw_count' : just render the raw count table
* 'by_unit' : render perf as rate unit by unit of the GT
* 'pooled_with_average' : compute rate unit by unit and average
Parameters
----------
method: str
'by_unit', or 'pooled_with_average'
output: str
'pandas' or 'dict'
Returns
-------
perf: pandas dataframe/series (or dict)
dataframe/series (based on 'output') with performance entries
"""
possibles = ('raw_count', 'by_unit', 'pooled_with_average')
if method not in possibles:
raise Exception("'method' can be " + ' or '.join(possibles))
if method == 'raw_count':
perf = self.count_score
elif method == 'by_unit':
perf = compute_performance(self.count_score)
elif method == 'pooled_with_average':
perf = self.get_performance(method='by_unit').mean(axis=0)
if output == 'dict' and isinstance(perf, pd.Series):
perf = perf.to_dict()
return perf
def print_performance(self, method='pooled_with_average'):
"""
Print performance with the selected method
"""
template_txt_performance = _template_txt_performance
if method == 'by_unit':
perf = self.get_performance(method=method, output='pandas')
perf = perf * 100
# ~ print(perf)
d = {k: perf[k].tolist() for k in perf.columns}
txt = template_txt_performance.format(method=method, **d)
print(txt)
elif method == 'pooled_with_average':
perf = self.get_performance(method=method, output='pandas')
perf = perf * 100
txt = template_txt_performance.format(method=method, **perf.to_dict())
print(txt)
def print_summary(self, well_detected_score=None, redundant_score=None, overmerged_score=None):
"""
Print a global performance summary that depend on the context:
* exhaustive= True/False
* how many gt units (one or several)
This summary mix several performance metrics.
"""
txt = _template_summary_part1
d = dict(
num_gt=len(self.unit1_ids),
num_tested=len(self.unit2_ids),
num_well_detected=self.count_well_detected_units(well_detected_score),
num_redundant=self.count_redundant_units(redundant_score),
num_overmerged=self.count_overmerged_units(overmerged_score),
)
if self.exhaustive_gt:
txt = txt + _template_summary_part2
d['num_false_positive_units'] = self.count_false_positive_units()
d['num_bad'] = self.count_bad_units()
txt = txt.format(**d)
print(txt)
def get_well_detected_units(self, well_detected_score=None):
"""
Return units list of "well detected units" from tested_sorting.
"well detected units" are defined as units in tested that
are well matched to GT units.
Parameters
----------
well_detected_score: float (default 0.8)
The agreement score above which tested units
are counted as "well detected".
"""
if well_detected_score is not None:
self.well_detected_score = well_detected_score
matched_units2 = self.hungarian_match_12
well_detected_ids = []
for u2 in self.unit2_ids:
if u2 in list(matched_units2.values):
u1 = self.hungarian_match_21[u2]
score = self.agreement_scores.at[u1, u2]
if score >= self.well_detected_score:
well_detected_ids.append(u2)
return well_detected_ids
def count_well_detected_units(self, well_detected_score):
"""
Count how many well detected units.
kwargs are the same as get_well_detected_units.
"""
return len(self.get_well_detected_units(well_detected_score=well_detected_score))
def get_false_positive_units(self, redundant_score=None):
"""
Return units list of "false positive units" from tested_sorting.
"false positive units" are defined as units in tested that
are not matched at all in GT units.
Need exhaustive_gt=True
Parameters
----------
redundant_score: float (default 0.2)
The agreement score below which tested units
are counted as "false positive"" (and not "redundant").
"""
assert self.exhaustive_gt, 'false_positive_units list is valid only if exhaustive_gt=True'
if redundant_score is not None:
self.redundant_score = redundant_score
matched_units2 = list(self.hungarian_match_12.values)
false_positive_ids = []
for u2 in self.unit2_ids:
if u2 not in matched_units2:
if self.best_match_21[u2] == -1:
false_positive_ids.append(u2)
else:
u1 = self.best_match_21[u2]
score = self.agreement_scores.at[u1, u2]
if score < self.redundant_score:
false_positive_ids.append(u2)
return false_positive_ids
def count_false_positive_units(self, redundant_score=None):
"""
See get_false_positive_units().
"""
return len(self.get_false_positive_units(redundant_score))
def get_redundant_units(self, redundant_score=None):
"""
Return "redundant units"
"redundant units" are defined as units in tested
that match a GT units with a big agreement score
but it is not the best match.
In other world units in GT that detected twice or more.
Parameters
----------
redundant_score=None: float (default 0.2)
The agreement score above which tested units
are counted as "redundant" (and not "false positive" ).
"""
assert self.exhaustive_gt, 'redundant_units list is valid only if exhaustive_gt=True'
if redundant_score is not None:
self.redundant_score = redundant_score
matched_units2 = list(self.hungarian_match_12.values)
redundant_ids = []
for u2 in self.unit2_ids:
if u2 not in matched_units2 and self.best_match_21[u2] != -1:
u1 = self.best_match_21[u2]
if u2 != self.best_match_12[u1]:
score = self.agreement_scores.at[u1, u2]
if score >= self.redundant_score:
redundant_ids.append(u2)
return redundant_ids
def count_redundant_units(self, redundant_score=None):
"""
See get_redundant_units().
"""
return len(self.get_redundant_units(redundant_score=redundant_score))
def get_overmerged_units(self, overmerged_score=None):
"""
Return "overmerged units"
"overmerged units" are defined as units in tested
that match more than one GT unit with an agreement score larger than overmerged_score.
Parameters
----------
overmerged_score: float (default 0.4)
Tested units with 2 or more agreement scores above 'overmerged_score'
are counted as "overmerged".
"""
assert self.exhaustive_gt, 'overmerged_units list is valid only if exhaustive_gt=True'
if overmerged_score is not None:
self.overmerged_score = overmerged_score
overmerged_ids = []
for u2 in self.unit2_ids:
scores = self.agreement_scores.loc[:, u2]
if len(np.where(scores > self.overmerged_score)[0]) > 1:
overmerged_ids.append(u2)
return overmerged_ids
def count_overmerged_units(self, overmerged_score=None):
"""
See get_overmerged_units().
"""
return len(self.get_overmerged_units(overmerged_score=overmerged_score))
def get_bad_units(self):
"""
Return units list of "bad units".
"bad units" are defined as units in tested that are not
in the best match list of GT units.
So it is the union of "false positive units" + "redundant units".
Need exhaustive_gt=True
"""
assert self.exhaustive_gt, 'bad_units list is valid only if exhaustive_gt=True'
matched_units2 = list(self.hungarian_match_12.values)
bad_ids = []
for u2 in self.unit2_ids:
if u2 not in matched_units2:
bad_ids.append(u2)
return bad_ids
def count_bad_units(self):
"""
See get_bad_units
"""
return len(self.get_bad_units())
# usefull also for gathercomparison
_template_txt_performance = """PERFORMANCE ({method})
-----------
ACCURACY: {accuracy}
RECALL: {recall}
PRECISION: {precision}
FALSE DISCOVERY RATE: {false_discovery_rate}
MISS RATE: {miss_rate}
"""
_template_summary_part1 = """SUMMARY
-------
GT num_units: {num_gt}
TESTED num_units: {num_tested}
num_well_detected: {num_well_detected}
num_redundant: {num_redundant}
num_overmerged: {num_overmerged}
"""
_template_summary_part2 = """num_false_positive_units {num_false_positive_units}
num_bad: {num_bad}
"""
def compare_sorter_to_ground_truth(*args, **kwargs):
return GroundTruthComparison(*args, **kwargs)
compare_sorter_to_ground_truth.__doc__ = GroundTruthComparison.__doc__
|
src/cutadapt/adapters.py
|
marcelm/cutadapt
| 375 |
85941
|
"""
Adapter finding and trimming classes
The ...Adapter classes are responsible for finding adapters.
The ...Match classes trim the reads.
"""
import logging
from enum import IntFlag
from collections import defaultdict
from typing import Optional, Tuple, Sequence, Dict, Any, List, Union
from abc import ABC, abstractmethod
from .align import EndSkip, Aligner, PrefixComparer, SuffixComparer, edit_environment, hamming_environment
logger = logging.getLogger()
class InvalidCharacter(Exception):
pass
# TODO remove this enum, this should be within each Adapter class
class Where(IntFlag):
"""
Aligner flag combinations for all adapter types.
"REFERENCE" is the adapter sequence, "QUERY" is the read sequence
"""
BACK = EndSkip.QUERY_START | EndSkip.QUERY_STOP | EndSkip.REFERENCE_END
FRONT = EndSkip.QUERY_START | EndSkip.QUERY_STOP | EndSkip.REFERENCE_START
PREFIX = EndSkip.QUERY_STOP
SUFFIX = EndSkip.QUERY_START
# Just like FRONT/BACK, but without internal matches
FRONT_NOT_INTERNAL = EndSkip.REFERENCE_START | EndSkip.QUERY_STOP
BACK_NOT_INTERNAL = EndSkip.QUERY_START | EndSkip.REFERENCE_END
ANYWHERE = EndSkip.SEMIGLOBAL
def returns_defaultdict_int():
# We need this function to make EndStatistics picklable.
# Even a @staticmethod of EndStatistics is not sufficient
# as that is not picklable before Python 3.5.
return defaultdict(int)
class EndStatistics:
"""Statistics about the 5' or 3' end"""
def __init__(self, adapter: "SingleAdapter"):
self.max_error_rate: float = adapter.max_error_rate
self.sequence: str = adapter.sequence
self.effective_length: int = adapter.effective_length
self.has_wildcards: bool = adapter.adapter_wildcards
self.indels: bool = adapter.indels
self.adapter_type: str = adapter.descriptive_identifier()
self.allows_partial_matches: bool = adapter.allows_partial_matches
# self.errors[l][e] == n iff a sequence of length l matching at e errors was removed n times
self.errors: Dict[int, Dict[int, int]] = defaultdict(returns_defaultdict_int)
self.adjacent_bases = {'A': 0, 'C': 0, 'G': 0, 'T': 0, '': 0}
# TODO avoid hard-coding the list of classes
self._remove_prefix = isinstance(adapter, FrontAdapter)
def __repr__(self):
errors = {k: dict(v) for k, v in self.errors.items()}
return "EndStatistics(max_error_rate={}, errors={}, adjacent_bases={})".format(
self.max_error_rate,
errors,
self.adjacent_bases,
)
def __iadd__(self, other: Any):
if not isinstance(other, self.__class__):
raise ValueError("Cannot compare")
if (
self.max_error_rate != other.max_error_rate
or self.sequence != other.sequence
or self.effective_length != other.effective_length
or self.indels != other.indels
):
raise RuntimeError('Incompatible EndStatistics, cannot be added')
for base in ('A', 'C', 'G', 'T', ''):
self.adjacent_bases[base] += other.adjacent_bases[base]
for length, error_dict in other.errors.items():
for errors in error_dict:
self.errors[length][errors] += other.errors[length][errors]
return self
@property
def lengths(self):
d = {length: sum(errors.values()) for length, errors in self.errors.items()}
return d
def random_match_probabilities(self, gc_content: float) -> List[float]:
"""
Estimate probabilities that this adapter end matches a
random sequence. Indels are not taken into account.
Returns a list p, where p[i] is the probability that
i bases of this adapter match a random sequence with
GC content gc_content.
"""
assert 0.0 <= gc_content <= 1.0
seq = self.sequence
# FIXME this is broken for 'anywhere' adapters
if self._remove_prefix:
seq = seq[::-1]
allowed_bases = 'CGRYSKMBDHVN' if self.has_wildcards else 'GC'
p = 1.
probabilities = [p]
for i, c in enumerate(seq):
if c in allowed_bases:
p *= gc_content / 2.
else:
p *= (1. - gc_content) / 2.
probabilities.append(p)
return probabilities
class AdapterStatistics(ABC):
reverse_complemented: int = 0
name: str
adapter: "Adapter"
@abstractmethod
def __iadd__(self, other):
pass
@abstractmethod
def end_statistics(self) -> Tuple[Optional[EndStatistics], Optional[EndStatistics]]:
pass
@abstractmethod
def add_match(self, match) -> None:
pass
class SingleAdapterStatistics(AdapterStatistics, ABC):
"""
Statistics about a 5' or 3' adapter, where we only need to keep track of sequences
removed from one "end".
"""
def __init__(self, adapter: "SingleAdapter"):
self.name = adapter.name
self.adapter = adapter
self.end = EndStatistics(adapter)
def __repr__(self):
return f"SingleAdapterStatistics(name={self.name}, end={self.end})"
def __iadd__(self, other: "SingleAdapterStatistics"):
if not isinstance(other, self.__class__):
raise ValueError("Cannot iadd")
self.end += other.end
self.reverse_complemented += other.reverse_complemented
return self
class FrontAdapterStatistics(SingleAdapterStatistics):
def add_match(self, match: "RemoveBeforeMatch"):
self.end.errors[match.removed_sequence_length()][match.errors] += 1
def end_statistics(self) -> Tuple[Optional[EndStatistics], Optional[EndStatistics]]:
return self.end, None
class BackAdapterStatistics(SingleAdapterStatistics):
def add_match(self, match: "RemoveAfterMatch"):
adjacent_base = match.adjacent_base()
self.end.errors[match.removed_sequence_length()][match.errors] += 1
try:
self.end.adjacent_bases[adjacent_base] += 1
except KeyError:
self.end.adjacent_bases[""] += 1
def end_statistics(self) -> Tuple[Optional[EndStatistics], Optional[EndStatistics]]:
return None, self.end
class LinkedAdapterStatistics(AdapterStatistics):
"""
Statistics about sequences removed by a lined adapter.
"""
def __init__(
self,
adapter: "LinkedAdapter",
front: "SingleAdapter",
back: "SingleAdapter",
):
self.name = adapter.name
self.adapter = adapter
self.front = EndStatistics(front)
self.back = EndStatistics(back)
self.reverse_complemented = 0
def __repr__(self):
return f"LinkedAdapterStatistics(name={self.name}, front={self.front}, back={self.back})"
def __iadd__(self, other: "LinkedAdapterStatistics"):
if not isinstance(other, self.__class__):
raise ValueError("Cannot iadd")
self.front += other.front
self.back += other.back
self.reverse_complemented += other.reverse_complemented
return self
def add_match(self, match: "LinkedMatch"):
# TODO this is duplicated code
if match.front_match:
self.front.errors[match.front_match.removed_sequence_length()][match.errors] += 1
if match.back_match:
adjacent_base = match.back_match.adjacent_base()
self.back.errors[match.back_match.removed_sequence_length()][match.errors] += 1
try:
self.back.adjacent_bases[adjacent_base] += 1
except KeyError:
self.back.adjacent_bases[""] += 1
def end_statistics(self) -> Tuple[Optional[EndStatistics], Optional[EndStatistics]]:
return self.front, self.back
class AnywhereAdapterStatistics(AdapterStatistics):
"""
Statistics about sequences removed by a lined adapter.
"""
def __init__(self, adapter: "AnywhereAdapter"):
self.name = adapter.name
self.adapter = adapter
self.front = EndStatistics(adapter)
self.back = EndStatistics(adapter)
self.reverse_complemented = 0
def __repr__(self):
return f"AnywhereAdapterStatistics(name={self.name}, front={self.front}, back={self.back})"
def __iadd__(self, other: "AnywhereAdapterStatistics"):
if not isinstance(other, AnywhereAdapterStatistics):
raise ValueError("Cannot add")
self.front += other.front
self.back += other.back
self.reverse_complemented += other.reverse_complemented
return self
def add_match(self, match: Union["RemoveBeforeMatch", "RemoveAfterMatch"]) -> None:
# TODO contains duplicated code from the other add_match() methods
if isinstance(match, RemoveBeforeMatch):
self.front.errors[match.removed_sequence_length()][match.errors] += 1
else:
adjacent_base = match.adjacent_base()
self.back.errors[match.removed_sequence_length()][match.errors] += 1
try:
self.back.adjacent_bases[adjacent_base] += 1
except KeyError:
self.back.adjacent_bases[""] += 1
def end_statistics(self) -> Tuple[Optional[EndStatistics], Optional[EndStatistics]]:
return self.front, self.back
class Match(ABC):
adapter: "Adapter"
@abstractmethod
def remainder_interval(self) -> Tuple[int, int]:
pass
@abstractmethod
def retained_adapter_interval(self) -> Tuple[int, int]:
pass
@abstractmethod
def get_info_records(self, read) -> List[List]:
pass
@abstractmethod
def trimmed(self, read):
pass
class SingleMatch(Match, ABC):
"""
Representation of a single adapter matched to a single string
"""
__slots__ = ['astart', 'astop', 'rstart', 'rstop', 'matches', 'errors',
'adapter', 'sequence', 'length', 'adjacent_base']
def __init__(
self,
astart: int,
astop: int,
rstart: int,
rstop: int,
matches: int,
errors: int,
adapter: "SingleAdapter",
sequence: str,
):
self.astart: int = astart
self.astop: int = astop
self.rstart: int = rstart
self.rstop: int = rstop
self.matches: int = matches
self.errors: int = errors
self.adapter: SingleAdapter = adapter
self.sequence = sequence
# Number of aligned characters in the adapter. If there are
# indels, this may be different from the number of characters
# in the read.
self.length: int = astop - astart
def __repr__(self):
return 'SingleMatch(astart={}, astop={}, rstart={}, rstop={}, matches={}, errors={})'.format(
self.astart, self.astop, self.rstart, self.rstop, self.matches, self.errors)
def __eq__(self, other) -> bool:
return (
other.__class__ is self.__class__
and self.astart == other.astart
and self.astop == other.astop
and self.rstart == other.rstart
and self.rstop == other.rstop
and self.matches == other.matches
and self.errors == other.errors
and self.adapter is other.adapter
and self.sequence == other.sequence
)
def wildcards(self, wildcard_char: str = "N") -> str:
"""
Return a string that contains, for each wildcard character,
the character that it matches. For example, if the adapter
ATNGNA matches ATCGTA, then the string 'CT' is returned.
If there are indels, this is not reliable as the full alignment
is not available.
"""
wildcards = [self.sequence[self.rstart + i] for i in range(self.length)
if self.adapter.sequence[self.astart + i] == wildcard_char and
self.rstart + i < len(self.sequence)]
return ''.join(wildcards)
def get_info_records(self, read) -> List[List]:
seq = read.sequence
qualities = read.qualities
info = [
"",
self.errors,
self.rstart,
self.rstop,
seq[0:self.rstart],
seq[self.rstart:self.rstop],
seq[self.rstop:],
self.adapter.name,
]
if qualities:
info += [
qualities[0:self.rstart],
qualities[self.rstart:self.rstop],
qualities[self.rstop:],
]
else:
info += ["", "", ""]
return [info]
@abstractmethod
def removed_sequence_length(self) -> int:
pass
class RemoveBeforeMatch(SingleMatch):
"""A match that removes sequence before the match"""
def __repr__(self):
return 'RemoveBeforeMatch(astart={}, astop={}, rstart={}, rstop={}, matches={}, errors={})'.format(
self.astart, self.astop, self.rstart, self.rstop, self.matches, self.errors)
def rest(self) -> str:
"""
Return the part of the read before this match if this is a
'front' (5') adapter,
return the part after the match if this is not a 'front' adapter (3').
This can be an empty string.
"""
return self.sequence[:self.rstart]
def remainder_interval(self) -> Tuple[int, int]:
"""
Return an interval (start, stop) that describes the part of the read that would
remain after trimming
"""
return self.rstop, len(self.sequence)
def retained_adapter_interval(self) -> Tuple[int, int]:
return self.rstart, len(self.sequence)
def trim_slice(self):
# Same as remainder_interval, but as a slice() object
return slice(self.rstop, None)
def trimmed(self, read):
return read[self.rstop:]
def removed_sequence_length(self) -> int:
return self.rstop
class RemoveAfterMatch(SingleMatch):
"""A match that removes sequence after the match"""
def __repr__(self):
return "RemoveAfterMatch(astart={}, astop={}, rstart={}, rstop={}, matches={}, errors={})".format(
self.astart, self.astop, self.rstart, self.rstop, self.matches, self.errors)
def rest(self) -> str:
"""
Return the part of the read before this match if this is a
'front' (5') adapter,
return the part after the match if this is not a 'front' adapter (3').
This can be an empty string.
"""
return self.sequence[self.rstop:]
def remainder_interval(self) -> Tuple[int, int]:
"""
Return an interval (start, stop) that describes the part of the read that would
remain after trimming
"""
return 0, self.rstart
def retained_adapter_interval(self) -> Tuple[int, int]:
return 0, self.rstop
def trim_slice(self):
# Same as remainder_interval, but as a slice() object
return slice(None, self.rstart)
def trimmed(self, read):
return read[:self.rstart]
def adjacent_base(self) -> str:
return self.sequence[self.rstart - 1:self.rstart]
def removed_sequence_length(self) -> int:
return len(self.sequence) - self.rstart
def _generate_adapter_name(_start=[1]) -> str:
name = str(_start[0])
_start[0] += 1
return name
class Matchable(ABC):
"""Something that has a match_to() method."""
def __init__(self, name: str, *args, **kwargs):
self.name = name
@abstractmethod
def enable_debug(self):
pass
@abstractmethod
def match_to(self, sequence: str):
pass
class Adapter(Matchable, ABC):
description = "adapter with one component" # this is overriden in subclasses
@abstractmethod
def spec(self) -> str:
"""Return string representation of this adapter"""
@abstractmethod
def create_statistics(self) -> AdapterStatistics:
pass
@abstractmethod
def descriptive_identifier(self) -> str:
pass
class SingleAdapter(Adapter, ABC):
"""
This class can find a single adapter characterized by sequence, error rate,
type etc. within reads.
where -- A Where enum value. This influences where the adapter is allowed to appear within the
read.
sequence -- The adapter sequence as string. Will be converted to uppercase.
Also, Us will be converted to Ts.
max_errors -- Maximum allowed errors (non-negative float). If the values is less than 1, this is
interpreted as a rate directly and passed to the aligner. If it is 1 or greater, the value
is converted to a rate by dividing it by the length of the sequence.
The error rate is the number of errors in the alignment divided by the length
of the part of the alignment that matches the adapter.
minimum_overlap -- Minimum length of the part of the alignment
that matches the adapter.
read_wildcards -- Whether IUPAC wildcards in the read are allowed.
adapter_wildcards -- Whether IUPAC wildcards in the adapter are
allowed.
name -- optional name of the adapter. If not provided, the name is set to a
unique number.
"""
allows_partial_matches: bool = True
def __init__(
self,
sequence: str,
max_errors: float = 0.1,
min_overlap: int = 3,
read_wildcards: bool = False,
adapter_wildcards: bool = True,
name: Optional[str] = None,
indels: bool = True,
):
self.name: str = _generate_adapter_name() if name is None else name
super().__init__(self.name)
self._debug: bool = False
self.sequence: str = sequence.upper().replace("U", "T")
if not self.sequence:
raise ValueError("Adapter sequence is empty")
if max_errors >= 1:
max_errors /= len(self.sequence)
self.max_error_rate: float = max_errors
self.min_overlap: int = min(min_overlap, len(self.sequence))
iupac = frozenset('ABCDGHKMNRSTUVWXY')
if adapter_wildcards and not set(self.sequence) <= iupac:
for c in self.sequence:
if c not in iupac:
if c == "I":
extra = "For inosine, consider using N instead and please comment " \
"on <https://github.com/marcelm/cutadapt/issues/546>."
else:
extra = "Use only characters 'ABCDGHKMNRSTUVWXY'."
raise InvalidCharacter(
f"Character '{c}' in adapter sequence '{self.sequence}' is "
f"not a valid IUPAC code. {extra}"
)
# Optimization: Use non-wildcard matching if only ACGT is used
self.adapter_wildcards: bool = adapter_wildcards and not set(self.sequence) <= set("ACGT")
self.read_wildcards: bool = read_wildcards
self.indels: bool = indels
self.aligner = self._aligner()
def _make_aligner(self, flags: int) -> Aligner:
# TODO
# Indels are suppressed by setting their cost very high, but a different algorithm
# should be used instead.
indel_cost = 1 if self.indels else 100000
return Aligner(
self.sequence,
self.max_error_rate,
flags=flags,
wildcard_ref=self.adapter_wildcards,
wildcard_query=self.read_wildcards,
indel_cost=indel_cost,
min_overlap=self.min_overlap,
)
def __repr__(self):
return '<{cls}(name={name!r}, sequence={sequence!r}, '\
'max_error_rate={max_error_rate}, min_overlap={min_overlap}, '\
'read_wildcards={read_wildcards}, '\
'adapter_wildcards={adapter_wildcards}, '\
'indels={indels})>'.format(cls=self.__class__.__name__, **vars(self))
@property
def effective_length(self) -> int:
return self.aligner.effective_length
def enable_debug(self) -> None:
"""
Print out the dynamic programming matrix after matching a read to an
adapter.
"""
self._debug = True
self.aligner.enable_debug()
@abstractmethod
def _aligner(self):
pass
@abstractmethod
def match_to(self, sequence: str):
"""
Attempt to match this adapter to the given string.
Return a Match instance if a match was found;
return None if no match was found given the matching criteria (minimum
overlap length, maximum error rate).
"""
def __len__(self) -> int:
return len(self.sequence)
class FrontAdapter(SingleAdapter):
"""A 5' adapter"""
description = "regular 5'"
def __init__(self, *args, **kwargs):
self._force_anywhere = kwargs.pop("force_anywhere", False)
super().__init__(*args, **kwargs)
def descriptive_identifier(self) -> str:
return "regular_five_prime"
def _aligner(self) -> Aligner:
return self._make_aligner(Where.ANYWHERE.value if self._force_anywhere else Where.FRONT.value)
def match_to(self, sequence: str):
"""
Attempt to match this adapter to the given read.
Return a Match instance if a match was found;
return None if no match was found given the matching criteria (minimum
overlap length, maximum error rate).
"""
alignment: Optional[Tuple[int, int, int, int, int, int]] = self.aligner.locate(sequence)
if self._debug:
print(self.aligner.dpmatrix)
if alignment is None:
return None
return RemoveBeforeMatch(*alignment, adapter=self, sequence=sequence)
def spec(self) -> str:
return f"{self.sequence}..."
def create_statistics(self) -> FrontAdapterStatistics:
return FrontAdapterStatistics(self)
class BackAdapter(SingleAdapter):
"""A 3' adapter"""
description = "regular 3'"
def __init__(self, *args, **kwargs):
self._force_anywhere = kwargs.pop("force_anywhere", False)
super().__init__(*args, **kwargs)
def descriptive_identifier(self) -> str:
return "regular_three_prime"
def _aligner(self):
return self._make_aligner(Where.ANYWHERE.value if self._force_anywhere else Where.BACK.value)
def match_to(self, sequence: str):
"""
Attempt to match this adapter to the given read.
Return a Match instance if a match was found;
return None if no match was found given the matching criteria (minimum
overlap length, maximum error rate).
"""
alignment: Optional[Tuple[int, int, int, int, int, int]] = self.aligner.locate(sequence)
if self._debug:
print(self.aligner.dpmatrix) # pragma: no cover
if alignment is None:
return None
return RemoveAfterMatch(*alignment, adapter=self, sequence=sequence)
def spec(self) -> str:
return f"{self.sequence}"
def create_statistics(self) -> BackAdapterStatistics:
return BackAdapterStatistics(self)
class AnywhereAdapter(SingleAdapter):
"""
An adapter that can be 5' or 3'. If a match involves the first base of
the read, it is assumed to be a 5' adapter and a 3' otherwise.
"""
description = "variable 5'/3'"
def descriptive_identifier(self) -> str:
return "anywhere"
def _aligner(self):
return self._make_aligner(Where.ANYWHERE.value)
def match_to(self, sequence: str):
"""
Attempt to match this adapter to the given string.
Return a Match instance if a match was found;
return None if no match was found given the matching criteria (minimum
overlap length, maximum error rate).
"""
alignment = self.aligner.locate(sequence.upper())
if self._debug:
print(self.aligner.dpmatrix)
if alignment is None:
return None
# guess: if alignment starts at pos 0, it’s a 5' adapter
if alignment[2] == 0: # index 2 is rstart
match = RemoveBeforeMatch(*alignment, adapter=self, sequence=sequence) # type: ignore
else:
match = RemoveAfterMatch(*alignment, adapter=self, sequence=sequence) # type: ignore
return match
def spec(self) -> str:
return f"...{self.sequence}..."
def create_statistics(self) -> AnywhereAdapterStatistics:
return AnywhereAdapterStatistics(self)
class NonInternalFrontAdapter(FrontAdapter):
"""A non-internal 5' adapter"""
description = "non-internal 5'"
def descriptive_identifier(self) -> str:
return "noninternal_five_prime"
def _aligner(self):
return self._make_aligner(Where.FRONT_NOT_INTERNAL.value)
def match_to(self, sequence: str):
# The locate function takes care of uppercasing the sequence
alignment = self.aligner.locate(sequence)
if self._debug:
try:
print(self.aligner.dpmatrix)
except AttributeError:
pass
if alignment is None:
return None
return RemoveBeforeMatch(*alignment, adapter=self, sequence=sequence) # type: ignore
def spec(self) -> str:
return f"X{self.sequence}..."
class NonInternalBackAdapter(BackAdapter):
"""A non-internal 3' adapter"""
description = "non-internal 3'"
def descriptive_identifier(self) -> str:
return "noninternal_three_prime"
def _aligner(self):
return self._make_aligner(Where.BACK_NOT_INTERNAL.value)
def match_to(self, sequence: str):
# The locate function takes care of uppercasing the sequence
alignment = self.aligner.locate(sequence)
if self._debug:
try:
print(self.aligner.dpmatrix) # pragma: no cover
except AttributeError:
pass
if alignment is None:
return None
return RemoveAfterMatch(*alignment, adapter=self, sequence=sequence) # type: ignore
def spec(self) -> str:
return f"{self.sequence}X"
class PrefixAdapter(NonInternalFrontAdapter):
"""An anchored 5' adapter"""
description = "anchored 5'"
allows_partial_matches = False
def __init__(self, sequence: str, *args, **kwargs):
kwargs["min_overlap"] = len(sequence)
super().__init__(sequence, *args, **kwargs)
def descriptive_identifier(self) -> str:
return "anchored_five_prime"
def _aligner(self):
if not self.indels: # TODO or if error rate allows 0 errors anyway
return PrefixComparer(
self.sequence,
self.max_error_rate,
wildcard_ref=self.adapter_wildcards,
wildcard_query=self.read_wildcards,
min_overlap=self.min_overlap
)
else:
return self._make_aligner(Where.PREFIX.value)
def spec(self) -> str:
return f"^{self.sequence}..."
class SuffixAdapter(NonInternalBackAdapter):
"""An anchored 3' adapter"""
description = "anchored 3'"
allows_partial_matches = False
def __init__(self, sequence: str, *args, **kwargs):
kwargs["min_overlap"] = len(sequence)
super().__init__(sequence, *args, **kwargs)
def descriptive_identifier(self) -> str:
return "anchored_three_prime"
def _aligner(self):
if not self.indels: # TODO or if error rate allows 0 errors anyway
return SuffixComparer(
self.sequence,
self.max_error_rate,
wildcard_ref=self.adapter_wildcards,
wildcard_query=self.read_wildcards,
min_overlap=self.min_overlap
)
else:
return self._make_aligner(Where.SUFFIX.value)
def spec(self) -> str:
return f"{self.sequence}$"
class LinkedMatch(Match):
"""
Represent a match of a LinkedAdapter
"""
def __init__(self, front_match: RemoveBeforeMatch, back_match: RemoveAfterMatch, adapter: "LinkedAdapter"):
assert front_match is not None or back_match is not None
self.front_match: RemoveBeforeMatch = front_match
self.back_match: RemoveAfterMatch = back_match
self.adapter: LinkedAdapter = adapter
def __repr__(self):
return '<LinkedMatch(front_match={!r}, back_match={}, adapter={})>'.format(
self.front_match, self.back_match, self.adapter)
@property
def matches(self):
"""Number of matching bases"""
m = 0
if self.front_match is not None:
m += self.front_match.matches
if self.back_match is not None:
m += self.back_match.matches
return m
@property
def errors(self):
e = 0
if self.front_match is not None:
e += self.front_match.errors
if self.back_match is not None:
e += self.back_match.errors
return e
def trimmed(self, read):
if self.front_match:
read = self.front_match.trimmed(read)
if self.back_match:
read = self.back_match.trimmed(read)
return read
def remainder_interval(self) -> Tuple[int, int]:
matches = [match for match in [self.front_match, self.back_match] if match is not None]
return remainder(matches)
def retained_adapter_interval(self) -> Tuple[int, int]:
if self.front_match:
start = self.front_match.rstart
offset = self.front_match.rstop
else:
start = offset = 0
if self.back_match:
end = self.back_match.rstop + offset
else:
end = len(self.front_match.sequence)
return start, end
def get_info_records(self, read) -> List[List]:
records = []
for match, namesuffix in [
(self.front_match, ";1"),
(self.back_match, ";2"),
]:
if match is None:
continue
record = match.get_info_records(read)[0]
record[7] = ("none" if self.adapter.name is None else self.adapter.name) + namesuffix
records.append(record)
read = match.trimmed(read)
return records
class LinkedAdapter(Adapter):
"""A 5' adapter combined with a 3' adapter"""
description = "linked"
def __init__(
self,
front_adapter: SingleAdapter,
back_adapter: SingleAdapter,
front_required: bool,
back_required: bool,
name: str,
):
super().__init__(name)
self.front_required = front_required
self.back_required = back_required
# The following attributes are needed for the report
self.where = "linked"
self.name = _generate_adapter_name() if name is None else name
self.front_adapter = front_adapter
self.front_adapter.name = self.name
self.back_adapter = back_adapter
def descriptive_identifier(self) -> str:
return "linked"
def enable_debug(self):
self.front_adapter.enable_debug()
self.back_adapter.enable_debug()
def match_to(self, sequence: str) -> Optional[LinkedMatch]:
"""
Match the two linked adapters against a string
"""
front_match = self.front_adapter.match_to(sequence)
if self.front_required and front_match is None:
return None
if front_match is not None:
sequence = sequence[front_match.trim_slice()]
back_match = self.back_adapter.match_to(sequence)
if back_match is None and (self.back_required or front_match is None):
return None
return LinkedMatch(front_match, back_match, self)
def create_statistics(self) -> LinkedAdapterStatistics:
return LinkedAdapterStatistics(self, front=self.front_adapter, back=self.back_adapter)
@property
def sequence(self):
return self.front_adapter.sequence + "..." + self.back_adapter.sequence
@property
def remove(self):
return None
def spec(self) -> str:
return f"{self.front_adapter.spec()}...{self.back_adapter.spec()}"
class MultipleAdapters(Matchable):
"""
Represent multiple adapters at once
"""
def __init__(self, adapters: Sequence[Matchable]):
super().__init__(name="multiple_adapters")
self._adapters = adapters
def enable_debug(self):
for a in self._adapters:
a.enable_debug()
def __getitem__(self, item):
return self._adapters[item]
def __len__(self):
return len(self._adapters)
def match_to(self, sequence: str) -> Optional[SingleMatch]:
"""
Find the adapter that best matches the sequence.
Return either a Match instance or None if there are no matches.
"""
best_match = None
for adapter in self._adapters:
match = adapter.match_to(sequence)
if match is None:
continue
# the no. of matches determines which adapter fits best
if best_match is None or match.matches > best_match.matches or (
match.matches == best_match.matches and match.errors < best_match.errors
):
best_match = match
return best_match
class IndexedAdapters(Matchable, ABC):
"""
Represent multiple adapters of the same type at once and use an index data structure
to speed up matching. This acts like a "normal" Adapter as it provides a match_to
method, but is faster with lots of adapters.
There are quite a few restrictions:
- the error rate allows at most 2 mismatches
- wildcards in the adapter are not allowed
- wildcards in the read are not allowed
Use the is_acceptable() method to check individual adapters.
"""
AdapterIndex = Dict[str, Tuple[SingleAdapter, int, int]]
def __init__(self, adapters):
"""All given adapters must be of the same type"""
super().__init__(name="indexed_adapters")
if not adapters:
raise ValueError("Adapter list is empty")
for adapter in adapters:
self._accept(adapter)
self._adapters = adapters
self._multiple_adapters = MultipleAdapters(adapters)
self._lengths, self._index = self._make_index()
logger.debug("String lengths in the index: %s", sorted(self._lengths, reverse=True))
if len(self._lengths) == 1:
self._length = self._lengths[0]
self.match_to = self._match_to_one_length
else:
self.match_to = self._match_to_multiple_lengths
self._make_affix = self._get_make_affix()
def __repr__(self):
return f"{self.__class__.__name__}(adapters={self._adapters!r})"
def match_to(self, sequence: str):
"""Never called because it gets overwritten in __init__"""
@abstractmethod
def _get_make_affix(self):
pass
@abstractmethod
def _make_match(self, adapter, length, matches, errors, sequence) -> SingleMatch:
pass
@classmethod
def _accept(cls, adapter):
"""Raise a ValueError if the adapter is not acceptable"""
if adapter.read_wildcards:
raise ValueError("Wildcards in the read not supported")
if adapter.adapter_wildcards:
raise ValueError("Wildcards in the adapter not supported")
k = int(len(adapter) * adapter.max_error_rate)
if k > 2:
raise ValueError("Error rate too high")
@classmethod
def is_acceptable(cls, adapter):
"""
Return whether this adapter is acceptable for being used in an index
Adapters are not acceptable if they allow wildcards, allow too many errors,
or would lead to a very large index.
"""
try:
cls._accept(adapter)
except ValueError:
return False
return True
def _make_index(self) -> Tuple[List[int], "AdapterIndex"]:
logger.info('Building index of %s adapters ...', len(self._adapters))
index: Dict[str, Tuple[SingleAdapter, int, int]] = dict()
lengths = set()
has_warned = False
for adapter in self._adapters:
sequence = adapter.sequence
k = int(adapter.max_error_rate * len(sequence))
environment = edit_environment if adapter.indels else hamming_environment
for s, errors, matches in environment(sequence, k):
if s in index:
other_adapter, other_errors, other_matches = index[s]
if matches < other_matches:
continue
if other_matches == matches and not has_warned:
logger.warning(
"Adapters %s %r and %s %r are very similar. At %s allowed errors, "
"the sequence %r cannot be assigned uniquely because the number of "
"matches is %s compared to both adapters.",
other_adapter.name, other_adapter.sequence, adapter.name,
adapter.sequence, k, s, matches
)
has_warned = True
else:
index[s] = (adapter, errors, matches)
lengths.add(len(s))
logger.info('Built an index containing %s strings.', len(index))
return sorted(lengths, reverse=True), index
def _match_to_one_length(self, sequence: str):
"""
Match the adapters against a string and return a Match that represents
the best match or None if no match was found
"""
affix = self._make_affix(sequence.upper(), self._length)
if "N" in affix:
# Fall back to non-indexed matching
return self._multiple_adapters.match_to(sequence)
try:
adapter, e, m = self._index[affix]
except KeyError:
return None
return self._make_match(adapter, self._length, m, e, sequence)
def _match_to_multiple_lengths(self, sequence: str):
"""
Match the adapters against a string and return a Match that represents
the best match or None if no match was found
"""
affix = sequence.upper()
# Check all the prefixes or suffixes (affixes) that could match
best_adapter: Optional[SingleAdapter] = None
best_length = 0
best_m = -1
best_e = 1000
check_n = True
for length in self._lengths:
if length < best_m:
# No chance of getting the same or a higher number of matches, so we can stop early
break
affix = self._make_affix(affix, length)
if check_n:
if "N" in affix:
return self._multiple_adapters.match_to(sequence)
check_n = False
try:
adapter, e, m = self._index[affix]
except KeyError:
continue
if m > best_m or (m == best_m and e < best_e):
# TODO this could be made to work:
# assert best_m == -1
best_adapter = adapter
best_e = e
best_m = m
best_length = length
if best_m == -1:
return None
else:
return self._make_match(best_adapter, best_length, best_m, best_e, sequence)
def enable_debug(self):
pass
class IndexedPrefixAdapters(IndexedAdapters):
@classmethod
def _accept(cls, adapter):
if not isinstance(adapter, PrefixAdapter):
raise ValueError("Only 5' anchored adapters are allowed")
return super()._accept(adapter)
def _make_match(self, adapter, length, matches, errors, sequence):
return RemoveBeforeMatch(
astart=0,
astop=len(adapter.sequence),
rstart=0,
rstop=length,
matches=matches,
errors=errors,
adapter=adapter,
sequence=sequence,
)
def _get_make_affix(self):
return self._make_prefix
@staticmethod
def _make_prefix(s, n):
return s[:n]
class IndexedSuffixAdapters(IndexedAdapters):
@classmethod
def _accept(cls, adapter):
if not isinstance(adapter, SuffixAdapter):
raise ValueError("Only anchored 3' adapters are allowed")
return super()._accept(adapter)
def _make_match(self, adapter, length, matches, errors, sequence):
return RemoveAfterMatch(
astart=0,
astop=len(adapter.sequence),
rstart=len(sequence) - length,
rstop=len(sequence),
matches=matches,
errors=errors,
adapter=adapter,
sequence=sequence,
)
def _get_make_affix(self):
return self._make_suffix
@staticmethod
def _make_suffix(s, n):
return s[-n:]
def warn_duplicate_adapters(adapters):
d = dict()
for adapter in adapters:
key = (adapter.__class__, adapter.sequence)
if key in d:
logger.warning("Adapter %r (%s) was specified multiple times! "
"Please make sure that this is what you want.",
adapter.sequence, adapter.description)
d[key] = adapter.name
def remainder(matches: Sequence[Match]) -> Tuple[int, int]:
"""
Determine which section of the read would not be trimmed. Return a tuple (start, stop)
that gives the interval of the untrimmed part relative to the original read.
matches must be non-empty
"""
if not matches:
raise ValueError("matches must not be empty")
start = 0
for match in matches:
match_start, match_stop = match.remainder_interval()
start += match_start
length = match_stop - match_start
return (start, start + length)
|
tests/previs/test_track.py
|
MehmetErer/anima
| 101 |
85958
|
<filename>tests/previs/test_track.py
# -*- coding: utf-8 -*-
import unittest
from anima.edit import Track, Clip, File
class TrackTestCase(unittest.TestCase):
"""tests the anima.previs.Track class
"""
def test_to_xml_method_is_working_properly(self):
"""testing if the to xml method is working properly
"""
t = Track()
t.enabled = True
t.locked = False
# clip 1
f = File()
f.duration = 34
f.name = 'shot2'
f.pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov'
c = Clip()
c.id = 'shot2'
c.start = 1
c.end = 35
c.name = 'shot2'
c.enabled = True
c.duration = 34
c.in_ = 0
c.out = 34
c.file = f
t.clips.append(c)
# clip 2
f = File()
f.duration = 30
f.name = 'shot'
f.pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot.mov'
c = Clip()
c.id = 'shot'
c.start = 35
c.end = 65
c.name = 'shot'
c.enabled = True
c.duration = 30
c.in_ = 0
c.out = 30
c.file = f
t.clips.append(c)
# clip 3
f = File()
f.duration = 45
f.name = 'shot1'
f.pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot1.mov'
c = Clip()
c.id = 'shot1'
c.start = 65
c.end = 110
c.name = 'shot1'
c.enabled = True
c.duration = 45
c.in_ = 0
c.out = 45
c.file = f
t.clips.append(c)
expected_xml = \
"""<track>
<locked>FALSE</locked>
<enabled>TRUE</enabled>
<clipitem id="shot2">
<end>35</end>
<name>shot2</name>
<enabled>True</enabled>
<start>1</start>
<in>0</in>
<duration>34</duration>
<out>34</out>
<file id="shot2.mov">
<duration>34</duration>
<name>shot2</name>
<pathurl>file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov</pathurl>
</file>
</clipitem>
<clipitem id="shot">
<end>65</end>
<name>shot</name>
<enabled>True</enabled>
<start>35</start>
<in>0</in>
<duration>30</duration>
<out>30</out>
<file id="shot.mov">
<duration>30</duration>
<name>shot</name>
<pathurl>file://localhost/home/eoyilmaz/maya/projects/default/data/shot.mov</pathurl>
</file>
</clipitem>
<clipitem id="shot1">
<end>110</end>
<name>shot1</name>
<enabled>True</enabled>
<start>65</start>
<in>0</in>
<duration>45</duration>
<out>45</out>
<file id="shot1.mov">
<duration>45</duration>
<name>shot1</name>
<pathurl>file://localhost/home/eoyilmaz/maya/projects/default/data/shot1.mov</pathurl>
</file>
</clipitem>
</track>"""
self.assertEqual(
expected_xml,
t.to_xml()
)
def test_from_xml_method_is_working_properly(self):
"""testing if the from_xml method will fill object attributes from the
given xml node
"""
from xml.etree import ElementTree
track_node = ElementTree.Element('track')
locked_node = ElementTree.SubElement(track_node, 'locked')
locked_node.text = 'FALSE'
enabled_node = ElementTree.SubElement(track_node, 'enabled')
enabled_node.text = 'TRUE'
# clip1
clip_node = ElementTree.SubElement(track_node, 'clipitem',
attrib={'id': 'shot2'})
end_node = ElementTree.SubElement(clip_node, 'end')
end_node.text = '35'
name_node = ElementTree.SubElement(clip_node, 'name')
name_node.text = 'shot2'
enabled_node = ElementTree.SubElement(clip_node, 'enabled')
enabled_node.text = 'True'
start_node = ElementTree.SubElement(clip_node, 'start')
start_node.text = '1'
in_node = ElementTree.SubElement(clip_node, 'in')
in_node.text = '0'
duration_node = ElementTree.SubElement(clip_node, 'duration')
duration_node.text = '34'
out_node = ElementTree.SubElement(clip_node, 'out')
out_node.text = '34'
file_node = ElementTree.SubElement(clip_node, 'file')
duration_node = ElementTree.SubElement(file_node, 'duration')
duration_node.text = '34'
name_node = ElementTree.SubElement(file_node, 'name')
name_node.text = 'shot2'
pathurl_node = ElementTree.SubElement(file_node, 'pathurl')
pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov'
pathurl_node.text = pathurl
# clip2
clip_node = ElementTree.SubElement(track_node, 'clipitem',
attrib={'id': 'shot'})
end_node = ElementTree.SubElement(clip_node, 'end')
end_node.text = '65'
name_node = ElementTree.SubElement(clip_node, 'name')
name_node.text = 'shot'
enabled_node = ElementTree.SubElement(clip_node, 'enabled')
enabled_node.text = 'True'
start_node = ElementTree.SubElement(clip_node, 'start')
start_node.text = '35'
in_node = ElementTree.SubElement(clip_node, 'in')
in_node.text = '0'
duration_node = ElementTree.SubElement(clip_node, 'duration')
duration_node.text = '30'
out_node = ElementTree.SubElement(clip_node, 'out')
out_node.text = '30'
file_node = ElementTree.SubElement(clip_node, 'file')
duration_node = ElementTree.SubElement(file_node, 'duration')
duration_node.text = '30'
name_node = ElementTree.SubElement(file_node, 'name')
name_node.text = 'shot'
pathurl_node = ElementTree.SubElement(file_node, 'pathurl')
pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot.mov'
pathurl_node.text = pathurl
# clip3
clip_node = ElementTree.SubElement(track_node, 'clipitem',
attrib={'id': 'shot1'})
end_node = ElementTree.SubElement(clip_node, 'end')
end_node.text = '110'
name_node = ElementTree.SubElement(clip_node, 'name')
name_node.text = 'shot1'
enabled_node = ElementTree.SubElement(clip_node, 'enabled')
enabled_node.text = 'True'
start_node = ElementTree.SubElement(clip_node, 'start')
start_node.text = '65'
in_node = ElementTree.SubElement(clip_node, 'in')
in_node.text = '0'
duration_node = ElementTree.SubElement(clip_node, 'duration')
duration_node.text = '45'
out_node = ElementTree.SubElement(clip_node, 'out')
out_node.text = '45'
file_node = ElementTree.SubElement(clip_node, 'file')
duration_node = ElementTree.SubElement(file_node, 'duration')
duration_node.text = '45'
name_node = ElementTree.SubElement(file_node, 'name')
name_node.text = 'shot1'
pathurl_node = ElementTree.SubElement(file_node, 'pathurl')
pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot1.mov'
pathurl_node.text = pathurl
t = Track()
t.from_xml(track_node)
self.assertEqual(False, t.locked)
self.assertEqual(True, t.enabled)
# clip1
c = t.clips[0]
self.assertEqual(35, c.end)
self.assertEqual('shot2', c.name)
self.assertEqual(True, c.enabled)
self.assertEqual(1, c.start)
self.assertEqual(0, c.in_)
self.assertEqual(34, c.duration)
self.assertEqual(34, c.out)
f = c.file
self.assertEqual(34, f.duration)
self.assertEqual('shot2', f.name)
self.assertEqual(
'file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov',
f.pathurl
)
# clip2
c = t.clips[1]
self.assertEqual(65, c.end)
self.assertEqual('shot', c.name)
self.assertEqual(True, c.enabled)
self.assertEqual(35, c.start)
self.assertEqual(0, c.in_)
self.assertEqual(30, c.duration)
self.assertEqual(30, c.out)
f = c.file
self.assertEqual(30, f.duration)
self.assertEqual('shot', f.name)
self.assertEqual(
'file://localhost/home/eoyilmaz/maya/projects/default/data/shot.mov',
f.pathurl
)
# clip3
c = t.clips[2]
self.assertEqual(110, c.end)
self.assertEqual('shot1', c.name)
self.assertEqual(True, c.enabled)
self.assertEqual(65, c.start)
self.assertEqual(0, c.in_)
self.assertEqual(45, c.duration)
self.assertEqual(45, c.out)
f = c.file
self.assertEqual(45, f.duration)
self.assertEqual('shot1', f.name)
self.assertEqual(
'file://localhost/home/eoyilmaz/maya/projects/default/data/shot1.mov',
f.pathurl
)
def test_optimize_clips_is_working_properly(self):
"""testing if the optimize_clips method will optimize the clips to use
the same file node if the file pathurls are same
"""
t = Track()
t.enabled = True
t.locked = False
# clip 1
f = File()
f.duration = 34
f.name = 'shot2'
f.pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov'
c = Clip()
c.id = 'shot2'
c.start = 1
c.end = 35
c.name = 'shot2'
c.enabled = True
c.duration = 34
c.in_ = 0
c.out = 34
c.file = f
t.clips.append(c)
# clip 2
f = File()
f.duration = 30
f.name = 'shot'
f.pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov'
c = Clip()
c.id = 'shot'
c.start = 35
c.end = 65
c.name = 'shot'
c.enabled = True
c.duration = 30
c.in_ = 0
c.out = 30
c.file = f
t.clips.append(c)
# clip 3
f = File()
f.duration = 45
f.name = 'shot1'
f.pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot1.mov'
c = Clip()
c.id = 'shot1'
c.start = 65
c.end = 110
c.name = 'shot1'
c.enabled = True
c.duration = 45
c.in_ = 0
c.out = 45
c.file = f
t.clips.append(c)
# check if the file objects are different
self.assertNotEqual(t.clips[0].file, t.clips[1].file)
self.assertNotEqual(t.clips[0].file, t.clips[2].file)
self.assertNotEqual(t.clips[1].file, t.clips[2].file)
# now optimize the clips
t.optimize_clips()
# check if the file[0] and file[1] is the same file node
# and the file[2] is different than the others
self.assertEqual(t.clips[0].file, t.clips[1].file)
self.assertNotEqual(t.clips[0].file, t.clips[2].file)
self.assertNotEqual(t.clips[1].file, t.clips[2].file)
def test_to_xml_method_with_optimized_clips_is_working_properly(self):
"""testing if the to xml method is working properly with the clips are
optimized
"""
t = Track()
t.enabled = True
t.locked = False
# clip 1
f = File()
f.duration = 34
f.name = 'shot2'
f.pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov'
c = Clip()
c.id = 'shot2'
c.start = 1
c.end = 35
c.name = 'shot2'
c.enabled = True
c.duration = 34
c.in_ = 0
c.out = 34
c.file = f
t.clips.append(c)
# clip 2
f = File()
f.duration = 30
f.name = 'shot'
f.pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov'
c = Clip()
c.id = 'shot2'
c.start = 35
c.end = 65
c.name = 'shot2'
c.enabled = True
c.duration = 30
c.in_ = 0
c.out = 30
c.file = f
t.clips.append(c)
# clip 3
f = File()
f.duration = 45
f.name = 'shot1'
f.pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot1.mov'
c = Clip()
c.id = 'shot1'
c.start = 65
c.end = 110
c.name = 'shot1'
c.enabled = True
c.duration = 45
c.in_ = 0
c.out = 45
c.file = f
t.clips.append(c)
expected_xml = \
"""<track>
<locked>FALSE</locked>
<enabled>TRUE</enabled>
<clipitem id="shot2">
<end>35</end>
<name>shot2</name>
<enabled>True</enabled>
<start>1</start>
<in>0</in>
<duration>34</duration>
<out>34</out>
<file id="shot2.mov">
<duration>34</duration>
<name>shot2</name>
<pathurl>file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov</pathurl>
</file>
</clipitem>
<clipitem id="shot2 2">
<end>65</end>
<name>shot2</name>
<enabled>True</enabled>
<start>35</start>
<in>0</in>
<duration>30</duration>
<out>30</out>
<file id="shot2.mov"/>
</clipitem>
<clipitem id="shot1">
<end>110</end>
<name>shot1</name>
<enabled>True</enabled>
<start>65</start>
<in>0</in>
<duration>45</duration>
<out>45</out>
<file id="shot1.mov">
<duration>45</duration>
<name>shot1</name>
<pathurl>file://localhost/home/eoyilmaz/maya/projects/default/data/shot1.mov</pathurl>
</file>
</clipitem>
</track>"""
t.optimize_clips()
self.assertEqual(
expected_xml,
t.to_xml()
)
|
pydpm/_model/_basic_model.py
|
HalcyonBravado/Pydpm
| 112 |
85975
|
# Author: <NAME> <<EMAIL>>; <NAME> <<EMAIL>>; <NAME> <<EMAIL>>
# License: BSD-3-Claus
class Params(object):
def __init__(self):
"""
The basic class for storing the parameters in the probabilistic model
"""
super(Params, self).__init__()
class Basic_Model(object):
def __init__(self, *args, **kwargs):
"""
The basic model for all probabilistic models in this package
Attributes:
@public:
global_params : [Params] the global parameters of the probabilistic model
local_params : [Params] the local parameters of the probabilistic model
@private:
_model_setting : [Params] the model settings of the probabilistic model
_hyper_params : [Params] the hyper parameters of the probabilistic model
"""
super(Basic_Model, self).__init__()
setattr(self, 'global_params', Params())
setattr(self, 'local_params', Params())
setattr(self, '_model_setting', Params())
setattr(self, '_hyper_params', Params())
|
pyfakefs/tests/fixtures/module_with_attributes.py
|
kmerenkov/pyfakefs
| 422 |
86003
|
<reponame>kmerenkov/pyfakefs
# Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module is for testing pyfakefs
:py:class:`fake_filesystem_unittest.Patcher`. It defines attributes that have
the same names as file modules, sudh as 'io` and `path`. Since these are not
modules, :py:class:`fake_filesystem_unittest.Patcher` should not patch them.
Whenever a new module is added to
:py:meth:`fake_filesystem_unittest.Patcher._findModules`, the corresponding
attribute should be added here and in the test
:py:class:`fake_filesystem_unittest_test.TestAttributesWithFakeModuleNames`.
"""
os = 'os attribute value'
path = 'path attribute value'
pathlib = 'pathlib attribute value'
shutil = 'shutil attribute value'
io = 'io attribute value'
|
Geometry/RPCGeometry/test/rpcgeo.py
|
ckamtsikis/cmssw
| 852 |
86034
|
<filename>Geometry/RPCGeometry/test/rpcgeo.py
import FWCore.ParameterSet.Config as cms
process = cms.Process("Demo")
process.load("Configuration.Geometry.GeometryExtended2015Reco_cff")
process.load("Geometry.RPCGeometry.rpcGeometry_cfi")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptySource")
process.MessageLogger = cms.Service("MessageLogger")
process.demo = cms.EDAnalyzer("RPCGEO")
process.p = cms.Path(process.demo)
|
recipes/Python/148291_Behavior_like_list_object_carried_out/recipe-148291.py
|
tdiprima/code
| 2,023 |
86066
|
class Listlike:
def __init__(self):
self.list = [1, 2, 3, 4, 5]
def __getitem__(self, index):
return self.list[index]
def __len__(self):
return len(self.list)
>>> listlike = Listlike()
>>> list(listlike)
[1, 2, 3, 4, 5]
|
ptstat/dist/uniform.py
|
timmyzhao/ptstat
| 116 |
86080
|
import torch
from torch.autograd import Variable
from ptstat.core import RandomVariable, _to_v
# TODO: Implement Uniform(a, b) constructor.
class Uniform(RandomVariable):
"""
Uniform(0, 1) iid rv.
"""
def __init__(self, size, cuda=False):
super(Uniform, self).__init__()
assert len(size) == 2, str(size)
self._cuda = cuda
self._p_size = size
def _size(self):
return self._p_size
def _log_pdf(self, x):
return self._entropy()
def _sample(self):
# TODO: Use CUDA random_ when implemented.
y = Variable(torch.FloatTensor(*self._p_size).uniform_())
if self._cuda:
y = y.cuda()
return y
def _entropy(self):
return _to_v(0, self._p_size[0], self._cuda)
|
recipes/Python/303061_Remove_whitespaceonly_text_nodes_XML/recipe-303061.py
|
tdiprima/code
| 2,023 |
86106
|
def remove_whilespace_nodes(node, unlink=False):
"""Removes all of the whitespace-only text decendants of a DOM node.
When creating a DOM from an XML source, XML parsers are required to
consider several conditions when deciding whether to include
whitespace-only text nodes. This function ignores all of those
conditions and removes all whitespace-only text decendants of the
specified node. If the unlink flag is specified, the removed text
nodes are unlinked so that their storage can be reclaimed. If the
specified node is a whitespace-only text node then it is left
unmodified."""
remove_list = []
for child in node.childNodes:
if child.nodeType == dom.Node.TEXT_NODE and \
not child.data.strip():
remove_list.append(child)
elif child.hasChildNodes():
remove_whilespace_nodes(child, unlink)
for node in remove_list:
node.parentNode.removeChild(node)
if unlink:
node.unlink()
|
mindspore_serving/server/start_worker.py
|
mindspore-ai/serving
| 157 |
86119
|
<gh_stars>100-1000
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Start worker process with single core servable"""
import os
import time
import threading
import signal
import argparse
import psutil
import mindspore_serving.log as logger
from mindspore_serving.server import worker
from mindspore_serving.server.common import check_type
from mindspore_serving._mindspore_serving import ExitSignalHandle_
from mindspore_serving._mindspore_serving import Worker_
_main_thread_exited = False
def start_listening_parent_thread(servable_name, device_id):
"""listening to parent process status"""
def worker_listening_parent_thread():
parent_process = psutil.Process(os.getppid())
while parent_process.is_running() and not ExitSignalHandle_.has_stopped():
time.sleep(0.1)
logger.warning(f"Worker {servable_name} device_id {device_id}, detect parent "
f"pid={parent_process.pid} has exited or receive Ctrl+C message, worker begin to exit"
f", parent running {parent_process.is_running()}, exit status {ExitSignalHandle_.has_stopped()}")
worker.stop()
cur_process = psutil.Process(os.getpid())
for _ in range(100): # 100x0.1=10s
try:
children = cur_process.children(recursive=True)
if not children and _main_thread_exited:
logger.info(f"All current children processes have exited")
break
for child in children:
os.kill(child.pid, signal.SIGTERM)
time.sleep(0.1)
# pylint: disable=broad-except
except Exception as e:
logger.warning(f"Kill children catch exception {e}")
thread = threading.Thread(target=worker_listening_parent_thread)
thread.start()
def start_worker(servable_directory, servable_name, version_number,
device_type, device_id, master_address, dec_key, dec_mode, listening_master=False):
"""Start worker process with single core servable"""
signal.signal(signal.SIGCHLD, signal.SIG_DFL) # for ccec compiler
check_type.check_str('servable_directory', servable_directory)
check_type.check_str('servable_name', servable_name)
check_type.check_int('version_number', version_number, 0)
check_type.check_str('device_type', device_type)
check_type.check_int('device_id', device_id, 0)
check_type.check_str('master_address', master_address)
check_type.check_bool('listening_master', listening_master)
ExitSignalHandle_.start() # Set flag to running and receive Ctrl+C message
if listening_master:
start_listening_parent_thread(servable_name, device_id)
# for servable_config.py to get device id of current worker.
os.environ["SERVING_DEVICE_ID"] = str(device_id)
worker_pid = os.getpid()
unix_socket_dir = "unix_socket_files"
try:
os.mkdir(unix_socket_dir)
except FileExistsError:
pass
worker_address = f"unix:{unix_socket_dir}/serving_worker_{servable_name}_device{device_id}_{worker_pid}"
if len(worker_address) > 107: # maximum unix domain socket address length
worker_address = worker_address[:50] + "___" + worker_address[-50:]
try:
worker.start_servable(servable_directory=servable_directory, servable_name=servable_name,
version_number=version_number, device_type=device_type, device_id=device_id,
master_address=master_address, worker_address=worker_address,
dec_key=dec_key, dec_mode=dec_mode)
except Exception as ex:
Worker_.notify_failed(master_address,
f"{{servable name:{servable_name}, device id:{device_id}, <{ex}>}}")
raise
def parse_args_and_start():
"""Parse args and start distributed worker"""
parser = argparse.ArgumentParser(description="Serving start extra worker")
parser.add_argument('--servable_directory', type=str, required=True, help="servable directory")
parser.add_argument('--servable_name', type=str, required=True, help="servable name")
parser.add_argument('--version_number', type=int, required=True, help="version numbers")
parser.add_argument('--device_type', type=str, required=True, help="device type")
parser.add_argument('--device_id', type=str, required=True, help="device id")
parser.add_argument('--master_address', type=str, required=True, help="master address")
parser.add_argument('--dec_key_pipe_file', type=str, required=True, help="dec key pipe file")
parser.add_argument('--dec_mode', type=str, required=True, help="dec mode")
parser.add_argument('--listening_master', type=str, required=True, help="whether listening master")
args = parser.parse_args()
servable_directory = args.servable_directory
servable_name = args.servable_name
version_number = int(args.version_number)
device_type = args.device_type
device_id = int(args.device_id)
master_address = args.master_address
dec_key_pipe = args.dec_key_pipe_file
if dec_key_pipe != "None":
with open(dec_key_pipe, "rb") as fp:
dec_key = fp.read()
prefix = "serving_temp_dec_"
if dec_key_pipe[:len(prefix)] == prefix:
os.remove(dec_key_pipe)
else:
dec_key = None
dec_mode = args.dec_mode
# pylint: disable=simplifiable-if-expression
listening_master = True if args.listening_master.lower() == "true" else False
try:
start_worker(servable_directory, servable_name, version_number, device_type, device_id, master_address,
dec_key, dec_mode, listening_master)
finally:
global _main_thread_exited
_main_thread_exited = True
if __name__ == '__main__':
parse_args_and_start()
|
pymtl3/stdlib/test_utils/valrdy_test_srcs.py
|
jbrzozo24/pymtl3
| 152 |
86157
|
"""
========================================================================
Test sources
========================================================================
Test sources with CL or RTL interfaces.
Author : <NAME>
Date : Mar 11, 2019
"""
from collections import deque
from copy import deepcopy
from pymtl3 import *
from pymtl3.stdlib.ifcs import OutValRdyIfc
class TestSrcRTL( Component ):
def construct( s, Type, msgs, initial_delay=0, interval_delay=0 ):
# Interface
s.out = OutValRdyIfc( Type )
# Data
s.msgs = deepcopy(msgs)
# TODO: use wires and ROM to make it translatable
s.idx = 0
s.num_msgs = len(s.msgs)
s.count = 0
@update_ff
def up_src():
if s.reset:
s.idx = 0
s.count = initial_delay
s.out.val <<= 0
else:
if s.out.val & s.out.rdy:
s.idx += 1
s.count = interval_delay
if s.count > 0:
s.count -= 1
s.out.val <<= 0
else: # s.count == 0
if s.idx < s.num_msgs:
s.out.val <<= 1
s.out.msg <<= s.msgs[s.idx]
else:
s.out.val <<= 0
def done( s ):
return s.idx >= s.num_msgs
# Line trace
def line_trace( s ):
return f"{s.out}"
|
cctbx/miller/tst_map_to_asu_isym.py
|
dperl-sol/cctbx_project
| 155 |
86160
|
from __future__ import absolute_import, division, print_function
from six.moves import range
def intify(a):
return tuple([int(round(val)) for val in a])
def reference_map(sg, mi):
from cctbx import sgtbx
asu = sgtbx.reciprocal_space_asu(sg.type())
isym_ = []
mi_ = []
for hkl in mi:
found = False
for i_inv in range(sg.f_inv()):
for i_smx in range(sg.n_smx()):
rt_mx = sg(0, i_inv, i_smx)
hkl_ = intify(hkl * rt_mx.r())
if asu.is_inside(hkl_):
mi_.append(hkl_)
if i_inv:
isym_.append(- i_smx)
else:
isym_.append(i_smx)
found = True
break
if found:
continue
else:
assert(not sg.is_centric())
for i_inv in range(sg.f_inv()):
for i_smx in range(sg.n_smx()):
rt_mx = sg(0, i_inv, i_smx)
_hkl = [-h for h in hkl]
mhkl_ = intify(_hkl * rt_mx.r())
if asu.is_inside(mhkl_):
mi_.append(mhkl_)
isym_.append(- i_smx)
found = True
break
return mi_, isym_
def tst_map_to_asu_isym(anomalous_flag):
from cctbx import sgtbx
from cctbx.miller import map_to_asu_isym
from cctbx.array_family import flex
mi = flex.miller_index()
i = flex.int()
import random
nhkl = 1000
for j in range(nhkl):
hkl = [random.randint(-10, 10) for j in range(3)]
mi.append(hkl)
i.append(0)
spacegroup = sgtbx.space_group_symbols(195).hall()
sg = sgtbx.space_group(spacegroup)
mi_, isym_ = reference_map(sg, mi)
map_to_asu_isym(sg.type(), anomalous_flag, mi, i)
for j in range(nhkl):
assert(i[j] == isym_[j])
if __name__ == '__main__':
tst_map_to_asu_isym(True)
tst_map_to_asu_isym(False)
print('OK')
|
articles/making-chrome-headless-undetectable/inject.py
|
sangaline/intoli-article-materials
| 248 |
86202
|
<reponame>sangaline/intoli-article-materials<filename>articles/making-chrome-headless-undetectable/inject.py
from bs4 import BeautifulSoup
from mitmproxy import ctx
# load in the javascript to inject
with open('injected-test-bypasses.js', 'r') as f:
content_js = f.read()
def response(flow):
# only process 200 responses of html content
if flow.response.headers['Content-Type'] != 'text/html':
return
if not flow.response.status_code == 200:
return
# inject the script tag
html = BeautifulSoup(flow.response.text, 'lxml')
container = html.head or html.body
if container:
script = html.new_tag('script', type='text/javascript')
script.string = content_js
container.insert(0, script)
flow.response.text = str(html)
ctx.log.info('Successfully injected the injected-test-bypasses.js script.')
|
screen.py
|
nicholas-zww/ActualVim
| 849 |
86224
|
class Cell:
def __init__(self, c=' '):
self.c = c
self.highlight = {}
def __mul__(self, n):
return [Cell(self.c) for i in range(n)]
def __str__(self):
return self.c
class Highlight:
def __init__(self, line, highlight):
self.line = line
self.highlight = highlight
self.start = 0
self.end = 0
def s(self):
return (self.line, self.start, self.end, tuple(self.highlight.items()))
def __eq__(self, h):
return self.s() == h.s()
def __hash__(self):
return hash((self.line, self.start, self.end, tuple(self.highlight.items())))
class Screen:
def __init__(self):
self.x = 0
self.y = 0
self.resize(1, 1)
self.highlight = {}
self.changes = 0
def resize(self, w, h):
self.w = w
self.h = h
# TODO: should resize clear?
self.screen = [Cell() * w for i in range(h)]
self.scroll_region = [0, self.h, 0, self.w]
# clamp cursor
self.x = min(self.x, w - 1)
self.y = min(self.y, h - 1)
def clear(self):
self.resize(self.w, self.h)
def scroll(self, dy):
ya, yb = self.scroll_region[0:2]
xa, xb = self.scroll_region[2:4]
yi = (ya, yb)
if dy < 0:
yi = (yb, ya - 1)
for y in range(yi[0], yi[1], int(dy / abs(dy))):
if ya <= y + dy < yb:
self.screen[y][xa:xb] = self.screen[y + dy][xa:xb]
else:
self.screen[y][xa:xb] = Cell() * (xb - xa)
def redraw(self, updates):
blacklist = [
'mode_change',
'bell', 'mouse_on', 'highlight_set',
'update_fb', 'update_bg', 'update_sp', 'clear',
]
changed = False
for cmd in updates:
if not cmd:
continue
name, args = cmd[0], cmd[1:]
if name == 'cursor_goto':
self.y, self.x = args[0]
elif name == 'eol_clear':
changed = True
self.screen[self.y][self.x:] = Cell() * (self.w - self.x)
elif name == 'put':
changed = True
for cs in args:
for c in cs:
cell = self.screen[self.y][self.x]
cell.c = c
cell.highlight = self.highlight
self.x += 1
# TODO: line wrap is not specified, neither is wrapping off the end. semi-sane defaults.
if self.x >= self.w:
self.x = 0
self.y += 1
if self.y >= self.h:
self.y = 0
elif name == 'resize':
changed = True
self.resize(*args[0])
elif name == 'highlight_set':
self.highlight = args[0][0]
elif name == 'set_scroll_region':
self.scroll_region = args[0]
elif name == 'scroll':
changed = True
self.scroll(args[0][0])
elif name in blacklist:
pass
# else:
# print('unknown update cmd', name)
if changed:
self.changes += 1
def highlights(self):
hlset = []
for y, line in enumerate(self.screen):
cur = {}
h = None
for x, cell in enumerate(line):
if h and cur and cell.highlight == cur:
h.end = x + 1
else:
cur = cell.highlight
if cur:
h = Highlight(y, cur)
h.start = x
h.end = x + 1
hlset.append(h)
return hlset
def p(self):
print('-' * self.w)
print(str(self))
print('-' * self.w)
def __setitem__(self, xy, c):
x, y = xy
try:
cell = self.screen[y][x]
cell.c = c
cell.highlight = self.highlight
except IndexError:
pass
def __getitem__(self, y):
if isinstance(y, tuple):
return self.screen[y[1]][y[0]]
return ''.join(str(c) for c in self.screen[y])
def __str__(self):
return '\n'.join([self[y] for y in range(self.h)])
|
data_processing/create_voxel_off.py
|
vedanthpadigelwar/if-net
| 209 |
86245
|
<filename>data_processing/create_voxel_off.py
from voxels import VoxelGrid
import numpy as np
import multiprocessing as mp
from multiprocessing import Pool
import glob
import os
import argparse
def create_voxel_off(path):
voxel_path = path + '/voxelization_{}.npy'.format( res)
off_path = path + '/voxelization_{}.off'.format( res)
if unpackbits:
occ = np.unpackbits(np.load(voxel_path))
voxels = np.reshape(occ, (res,)*3)
else:
voxels = np.reshape(np.load(voxel_path)['occupancies'], (res,)*3)
loc = ((min+max)/2, )*3
scale = max - min
VoxelGrid(voxels, loc, scale).to_mesh().export(off_path)
print('Finished: {}'.format(path))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Run voxalization to off'
)
parser.add_argument('-res', type=int)
args = parser.parse_args()
ROOT = 'shapenet/data'
unpackbits = True
res = args.res
min = -0.5
max = 0.5
p = Pool(mp.cpu_count())
p.map(create_voxel_off, glob.glob( ROOT + '/*/*/'))
|
pygorithm/geometry/rect_broad_phase.py
|
bharadwaj-9/pygorithm
| 4,736 |
86269
|
"""
Author: <NAME>
Created On: 23 August 2017
"""
# To test if two rectangle intersect, we only have to find out
# if their projections intersect on all of the coordinate axes
import inspect
class Coord:
"""Coord
Class to initialize Coordinate of one point
"""
def __init__(self, x, y):
self.x = x
self.y = y
class SimpleRectangle:
"""SimpleRectangle
Class to initialize Body of Object
"""
def __init__(self, coord1, coord2):
"""
:type coord1: object of class Coord
:type coord2: object of class Coord
"""
self.min_x = coord1.x
self.min_y = coord1.y
self.max_x = coord2.x
self.max_y = coord2.y
def broad_phase(simpleRect1, simpleRect2):
"""
:type simpleRect1: object
:type simpleRect2: object
"""
d1x = simpleRect2.min_x - simpleRect1.max_x
d1y = simpleRect2.min_y - simpleRect1.max_y
d2x = simpleRect1.min_x - simpleRect2.max_x
d2y = simpleRect1.min_y - simpleRect2.max_y
if d1x > 0 or d1y > 0:
return False
if d2x > 0 or d2y > 0:
return False
return True
def get_code():
"""
returns the code for the broad phase function
"""
return inspect.getsource(broad_phase)
|
examples/python_api_example/src/example.py
|
zmughal-contrib/SourcetrailDB
| 274 |
86314
|
import argparse
import os
import sourcetraildb as srctrl
def main():
parser = argparse.ArgumentParser(description="SourcetrailDB Python API Example")
parser.add_argument("--database-file-path", help="path to the generated Sourcetrail database file",
type=str, required=True)
parser.add_argument("--source-file-path", help="path to the source file to index",
type=str, required=True)
parser.add_argument("--database-version", help="database version of the invoking Sourcetrail binary",
type=int, required=False, default=0)
args = parser.parse_args()
databaseFilePath = args.database_file_path
sourceFilePath = args.source_file_path.replace("\\", "/")
dbVersion = args.database_version
print("SourcetrailDB Python API Example")
print("Supported database version: " + str(srctrl.getSupportedDatabaseVersion()))
if dbVersion > 0 and dbVersion != srctrl.getSupportedDatabaseVersion():
print("ERROR: Only supports database version: " + str(srctrl.getSupportedDatabaseVersion()) +
". Requested version: " + str(dbVersion))
return 1
if not srctrl.open(databaseFilePath):
print("ERROR: " + srctrl.getLastError())
return 1
print("Clearing loaded database now...")
srctrl.clear()
print("start indexing")
srctrl.beginTransaction()
fileId = srctrl.recordFile(sourceFilePath)
srctrl.recordFileLanguage(fileId, "python")
if len(srctrl.getLastError()) > 0:
print("ERROR: " + srctrl.getLastError())
return 1
symbolId = srctrl.recordSymbol(
'{ "name_delimiter": ".", "name_elements": [ '
'{ "prefix": "", "name": "MyType", "postfix": "" } '
'] }')
srctrl.recordSymbolDefinitionKind(symbolId, srctrl.DEFINITION_EXPLICIT)
srctrl.recordSymbolKind(symbolId, srctrl.SYMBOL_CLASS)
srctrl.recordSymbolLocation(symbolId, fileId, 2, 7, 2, 12)
srctrl.recordSymbolScopeLocation(symbolId, fileId, 2, 1, 7, 1)
memberId = srctrl.recordSymbol(
'{ "name_delimiter": ".", "name_elements": [ '
'{ "prefix": "", "name": "MyType", "postfix": "" }, '
'{ "prefix": "", "name": "my_member", "postfix": "" } '
'] }')
srctrl.recordSymbolDefinitionKind(memberId, srctrl.DEFINITION_EXPLICIT)
srctrl.recordSymbolKind(memberId, srctrl.SYMBOL_FIELD)
srctrl.recordSymbolLocation(memberId, fileId, 4, 2, 4, 10)
methodId = srctrl.recordSymbol(
'{ "name_delimiter": ".", "name_elements": [ '
'{ "prefix": "", "name": "MyType", "postfix": "" }, '
'{ "prefix": "", "name": "my_method", "postfix": "" } '
'] }')
srctrl.recordSymbolDefinitionKind(methodId, srctrl.DEFINITION_EXPLICIT)
srctrl.recordSymbolKind(methodId, srctrl.SYMBOL_METHOD)
srctrl.recordSymbolLocation(methodId, fileId, 6, 6, 6, 14)
srctrl.recordSymbolScopeLocation(methodId, fileId, 6, 1, 7, 1)
useageId = srctrl.recordReference(methodId, memberId, srctrl.REFERENCE_USAGE)
srctrl.recordReferenceLocation(useageId, fileId, 7, 10, 7, 18)
srctrl.commitTransaction()
if len(srctrl.getLastError()) > 0:
print("ERROR: " + srctrl.getLastError())
return 1
if not srctrl.close():
print("ERROR: " + srctrl.getLastError())
return 1
print("done")
return 0
main()
|
15Flask/day04/cart/__init__.py
|
HaoZhang95/PythonAndMachineLearning
| 937 |
86321
|
from flask import Blueprint
# 每一个模块蓝图可以拥有自己的静态文件夹,默认的app会设置好系统级别的static
# 但是蓝图需要自己手动设置注册静态文件的路径
# 为了区分加载的是主应用下的static还是模块下的static,给模块初始化的时候添加url前缀
# 添加前缀后,那么该蓝图.route的时候就自动添加了该前缀, 加载img的时候使用/cart/static/xxx.img
# 如果模块模板和主营业模板下都拥有相同的html名字,则优先加载主营业下的模板
cart_blue = Blueprint('cart', __name__,
static_folder='static',
template_folder='templates',
url_prefix='/cart')
# 只能在蓝图初始化之后再倒入views,因为.views使用了蓝图
from .views import *
|
codewars_Error_correction_1__Hamming_Code.py
|
DazEB2/SimplePyScripts
| 117 |
86355
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: https://www.codewars.com/kata/5ef9ca8b76be6d001d5e1c3e/train/python
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i: i + n]
# Task 1: Encode function
#
# Implement the encode function, using the following steps:
# - convert every letter of the text to its ASCII value;
# - convert ASCII values to 8-bit binary;
# - triple every bit;
# - concatenate the result;
#
# For example:
# input: "hey"
# --> 104, 101, 121 // ASCII values
# --> 01101000, 01100101, 01111001 // binary
# --> 000111111000111000000000 000111111000000111000111 000111111111111000000111 // tripled
# --> "000111111000111000000000000111111000000111000111000111111111111000000111" // concatenated
def encode(text: str) -> str:
return ''.join(
''.join(b * 3 for b in f"{ord(c):08b}") # Tripled bits
for c in text
)
# Task 2: Decode function:
# Check if any errors happened and correct them. Errors will be only bit flips, and not a loss of bits:
# - 111 --> 101 : this can and will happen
# - 111 --> 11 : this cannot happen
#
# Note: the length of the input string is also always divisible by 24 so that you can convert it to an ASCII value.
#
# Steps:
# - Split the input into groups of three characters;
# - Check if an error occurred: replace each group with the character that occurs most often,
# e.g. 010 --> 0, 110 --> 1, etc;
# - Take each group of 8 characters and convert that binary number;
# - Convert the binary values to decimal (ASCII);
# - Convert the ASCII values to characters and concatenate the result
#
# For example:
# input: "100111111000111001000010000111111000000111001111000111110110111000010111"
# --> 100, 111, 111, 000, 111, 001, ... // triples
# --> 0, 1, 1, 0, 1, 0, ... // corrected bits
# --> 01101000, 01100101, 01111001 // bytes
# --> 104, 101, 121 // ASCII values
# --> "hey"
def decode(bits: str) -> str:
bit_items = []
for tripled_bits in chunks(bits, 3):
sums = sum(map(int, tripled_bits))
# Example: 110 or 111 -> 1 and 000 -> 0 or 001 -> 0
bit_items.append('1' if sums == 2 or sums == 3 else '0')
binary = ''.join(bit_items)
items = []
for byte in chunks(binary, 8):
items.append(chr(int(byte, 2)))
return ''.join(items)
if __name__ == '__main__':
text = 'hey'
encoded = encode(text)
print(encoded)
assert encoded == '000111111000111000000000000111111000000111000111000111111111111000000111'
decoded = decode(encoded)
print(decoded)
assert text == decoded
invalid_encoded = '100111111000111001000010000111111000000111001111000111110110111000010111'
decoded = decode(invalid_encoded)
print(decoded)
assert text == decoded
|
intro/summary-exercises/examples/plot_optimize_lidar_data_fit.py
|
zmoon/scipy-lecture-notes
| 2,538 |
86361
|
"""
The lidar system, data and fit (1 of 2 datasets)
================================================
Generate a chart of the data fitted by Gaussian curve
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import leastsq
def model(t, coeffs):
return coeffs[0] + coeffs[1] * np.exp(- ((t-coeffs[2])/coeffs[3])**2)
def residuals(coeffs, y, t):
return y - model(t, coeffs)
waveform_1 = np.load('waveform_1.npy')
t = np.arange(len(waveform_1))
x0 = np.array([3, 30, 15, 1], dtype=float)
x, flag = leastsq(residuals, x0, args=(waveform_1, t))
print(x)
fig, ax = plt.subplots(figsize=(8, 6))
plt.plot(t, waveform_1, t, model(t, x))
plt.xlabel('Time [ns]')
plt.ylabel('Amplitude [bins]')
plt.legend(['Waveform', 'Model'])
plt.show()
|
venv/Lib/site-packages/statsmodels/tsa/arima/datasets/brockwell_davis_2002/data/dowj.py
|
EkremBayar/bayar
| 6,931 |
86363
|
<gh_stars>1000+
"""
Dow-Jones Utilities Index, Aug.28--Dec.18, 1972.
Dataset described in [1]_ and included as a part of the ITSM2000 software [2]_.
Downloaded on April 22, 2019 from:
http://www.springer.com/cda/content/document/cda_downloaddocument/ITSM2000.zip
See also https://finance.yahoo.com/quote/%5EDJU/history?period1=83822400&period2=93502800&interval=1d&filter=history&frequency=1d
TODO: Add the correct business days index for this data (freq='B' does not work)
References
----------
.. [1] Brockwell, <NAME>., and <NAME>. 2016.
Introduction to Time Series and Forecasting. Springer.
.. [2] Brockwell, <NAME>., and <NAME>. n.d. ITSM2000.
""" # noqa:E501
import pandas as pd
dowj = pd.Series([
110.94, 110.69, 110.43, 110.56, 110.75, 110.84, 110.46, 110.56, 110.46,
110.05, 109.6, 109.31, 109.31, 109.25, 109.02, 108.54, 108.77, 109.02,
109.44, 109.38, 109.53, 109.89, 110.56, 110.56, 110.72, 111.23, 111.48,
111.58, 111.9, 112.19, 112.06, 111.96, 111.68, 111.36, 111.42, 112,
112.22, 112.7, 113.15, 114.36, 114.65, 115.06, 115.86, 116.4, 116.44,
116.88, 118.07, 118.51, 119.28, 119.79, 119.7, 119.28, 119.66, 120.14,
120.97, 121.13, 121.55, 121.96, 122.26, 123.79, 124.11, 124.14, 123.37,
123.02, 122.86, 123.02, 123.11, 123.05, 123.05, 122.83, 123.18, 122.67,
122.73, 122.86, 122.67, 122.09, 122, 121.23])
|
mmedit/models/backbones/sr_backbones/ttsr_net.py
|
Jian137/mmediting-1
| 1,884 |
86373
|
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import build_conv_layer
from mmcv.runner import load_checkpoint
from mmedit.models.common import (PixelShufflePack, ResidualBlockNoBN,
make_layer)
from mmedit.models.registry import BACKBONES
from mmedit.utils import get_root_logger
# Use partial to specify some default arguments
_conv3x3_layer = partial(
build_conv_layer, dict(type='Conv2d'), kernel_size=3, padding=1)
_conv1x1_layer = partial(
build_conv_layer, dict(type='Conv2d'), kernel_size=1, padding=0)
class SFE(nn.Module):
"""Structural Feature Encoder
Backbone of Texture Transformer Network for Image Super-Resolution.
Args:
in_channels (int): Number of channels in the input image
mid_channels (int): Channel number of intermediate features
num_blocks (int): Block number in the trunk network
res_scale (float): Used to scale the residual in residual block.
Default: 1.
"""
def __init__(self, in_channels, mid_channels, num_blocks, res_scale):
super().__init__()
self.num_blocks = num_blocks
self.conv_first = _conv3x3_layer(in_channels, mid_channels)
self.body = make_layer(
ResidualBlockNoBN,
num_blocks,
mid_channels=mid_channels,
res_scale=res_scale)
self.conv_last = _conv3x3_layer(mid_channels, mid_channels)
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
x1 = x = F.relu(self.conv_first(x))
x = self.body(x)
x = self.conv_last(x)
x = x + x1
return x
class CSFI2(nn.Module):
"""Cross-Scale Feature Integration between 1x and 2x features.
Cross-Scale Feature Integration in Texture Transformer Network for
Image Super-Resolution.
It is cross-scale feature integration between 1x and 2x features.
For example, `conv2to1` means conv layer from 2x feature to 1x
feature. Down-sampling is achieved by conv layer with stride=2,
and up-sampling is achieved by bicubic interpolate and conv layer.
Args:
mid_channels (int): Channel number of intermediate features
"""
def __init__(self, mid_channels):
super().__init__()
self.conv1to2 = _conv1x1_layer(mid_channels, mid_channels)
self.conv2to1 = _conv3x3_layer(mid_channels, mid_channels, stride=2)
self.conv_merge1 = _conv3x3_layer(mid_channels * 2, mid_channels)
self.conv_merge2 = _conv3x3_layer(mid_channels * 2, mid_channels)
def forward(self, x1, x2):
"""Forward function.
Args:
x1 (Tensor): Input tensor with shape (n, c, h, w).
x2 (Tensor): Input tensor with shape (n, c, 2h, 2w).
Returns:
x1 (Tensor): Output tensor with shape (n, c, h, w).
x2 (Tensor): Output tensor with shape (n, c, 2h, 2w).
"""
x12 = F.interpolate(
x1, scale_factor=2, mode='bicubic', align_corners=False)
x12 = F.relu(self.conv1to2(x12))
x21 = F.relu(self.conv2to1(x2))
x1 = F.relu(self.conv_merge1(torch.cat((x1, x21), dim=1)))
x2 = F.relu(self.conv_merge2(torch.cat((x2, x12), dim=1)))
return x1, x2
class CSFI3(nn.Module):
"""Cross-Scale Feature Integration between 1x, 2x, and 4x features.
Cross-Scale Feature Integration in Texture Transformer Network for
Image Super-Resolution.
It is cross-scale feature integration between 1x and 2x features.
For example, `conv2to1` means conv layer from 2x feature to 1x
feature. Down-sampling is achieved by conv layer with stride=2,
and up-sampling is achieved by bicubic interpolate and conv layer.
Args:
mid_channels (int): Channel number of intermediate features
"""
def __init__(self, mid_channels):
super().__init__()
self.conv1to2 = _conv1x1_layer(mid_channels, mid_channels)
self.conv1to4 = _conv1x1_layer(mid_channels, mid_channels)
self.conv2to1 = _conv3x3_layer(mid_channels, mid_channels, stride=2)
self.conv2to4 = _conv1x1_layer(mid_channels, mid_channels)
self.conv4to1_1 = _conv3x3_layer(mid_channels, mid_channels, stride=2)
self.conv4to1_2 = _conv3x3_layer(mid_channels, mid_channels, stride=2)
self.conv4to2 = _conv3x3_layer(mid_channels, mid_channels, stride=2)
self.conv_merge1 = _conv3x3_layer(mid_channels * 3, mid_channels)
self.conv_merge2 = _conv3x3_layer(mid_channels * 3, mid_channels)
self.conv_merge4 = _conv3x3_layer(mid_channels * 3, mid_channels)
def forward(self, x1, x2, x4):
"""Forward function.
Args:
x1 (Tensor): Input tensor with shape (n, c, h, w).
x2 (Tensor): Input tensor with shape (n, c, 2h, 2w).
x4 (Tensor): Input tensor with shape (n, c, 4h, 4w).
Returns:
x1 (Tensor): Output tensor with shape (n, c, h, w).
x2 (Tensor): Output tensor with shape (n, c, 2h, 2w).
x4 (Tensor): Output tensor with shape (n, c, 4h, 4w).
"""
x12 = F.interpolate(
x1, scale_factor=2, mode='bicubic', align_corners=False)
x12 = F.relu(self.conv1to2(x12))
x14 = F.interpolate(
x1, scale_factor=4, mode='bicubic', align_corners=False)
x14 = F.relu(self.conv1to4(x14))
x21 = F.relu(self.conv2to1(x2))
x24 = F.interpolate(
x2, scale_factor=2, mode='bicubic', align_corners=False)
x24 = F.relu(self.conv2to4(x24))
x41 = F.relu(self.conv4to1_1(x4))
x41 = F.relu(self.conv4to1_2(x41))
x42 = F.relu(self.conv4to2(x4))
x1 = F.relu(self.conv_merge1(torch.cat((x1, x21, x41), dim=1)))
x2 = F.relu(self.conv_merge2(torch.cat((x2, x12, x42), dim=1)))
x4 = F.relu(self.conv_merge4(torch.cat((x4, x14, x24), dim=1)))
return x1, x2, x4
class MergeFeatures(nn.Module):
"""Merge Features. Merge 1x, 2x, and 4x features.
Final module of Texture Transformer Network for Image Super-Resolution.
Args:
mid_channels (int): Channel number of intermediate features
out_channels (int): Number of channels in the output image
"""
def __init__(self, mid_channels, out_channels):
super().__init__()
self.conv1to4 = _conv1x1_layer(mid_channels, mid_channels)
self.conv2to4 = _conv1x1_layer(mid_channels, mid_channels)
self.conv_merge = _conv3x3_layer(mid_channels * 3, mid_channels)
self.conv_last1 = _conv3x3_layer(mid_channels, mid_channels // 2)
self.conv_last2 = _conv1x1_layer(mid_channels // 2, out_channels)
def forward(self, x1, x2, x4):
"""Forward function.
Args:
x1 (Tensor): Input tensor with shape (n, c, h, w).
x2 (Tensor): Input tensor with shape (n, c, 2h, 2w).
x4 (Tensor): Input tensor with shape (n, c, 4h, 4w).
Returns:
x (Tensor): Output tensor with shape (n, c_out, 4h, 4w).
"""
x14 = F.interpolate(
x1, scale_factor=4, mode='bicubic', align_corners=False)
x14 = F.relu(self.conv1to4(x14))
x24 = F.interpolate(
x2, scale_factor=2, mode='bicubic', align_corners=False)
x24 = F.relu(self.conv2to4(x24))
x = F.relu(self.conv_merge(torch.cat((x4, x14, x24), dim=1)))
x = self.conv_last1(x)
x = self.conv_last2(x)
x = torch.clamp(x, -1, 1)
return x
@BACKBONES.register_module()
class TTSRNet(nn.Module):
"""TTSR network structure (main-net) for reference-based super-resolution.
Paper: Learning Texture Transformer Network for Image Super-Resolution
Adapted from 'https://github.com/researchmm/TTSR.git'
'https://github.com/researchmm/TTSR'
Copyright permission at 'https://github.com/researchmm/TTSR/issues/38'.
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels in the output image
mid_channels (int): Channel number of intermediate features.
Default: 64
num_blocks (tuple[int]): Block numbers in the trunk network.
Default: (16, 16, 8, 4)
res_scale (float): Used to scale the residual in residual block.
Default: 1.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels=64,
texture_channels=64,
num_blocks=(16, 16, 8, 4),
res_scale=1.0):
super().__init__()
self.texture_channels = texture_channels
self.sfe = SFE(in_channels, mid_channels, num_blocks[0], res_scale)
# stage 1
self.conv_first1 = _conv3x3_layer(4 * texture_channels + mid_channels,
mid_channels)
self.res_block1 = make_layer(
ResidualBlockNoBN,
num_blocks[1],
mid_channels=mid_channels,
res_scale=res_scale)
self.conv_last1 = _conv3x3_layer(mid_channels, mid_channels)
# up-sampling 1 -> 2
self.up1 = PixelShufflePack(
in_channels=mid_channels,
out_channels=mid_channels,
scale_factor=2,
upsample_kernel=3)
# stage 2
self.conv_first2 = _conv3x3_layer(2 * texture_channels + mid_channels,
mid_channels)
self.csfi2 = CSFI2(mid_channels)
self.res_block2_1 = make_layer(
ResidualBlockNoBN,
num_blocks[2],
mid_channels=mid_channels,
res_scale=res_scale)
self.res_block2_2 = make_layer(
ResidualBlockNoBN,
num_blocks[2],
mid_channels=mid_channels,
res_scale=res_scale)
self.conv_last2_1 = _conv3x3_layer(mid_channels, mid_channels)
self.conv_last2_2 = _conv3x3_layer(mid_channels, mid_channels)
# up-sampling 2 -> 3
self.up2 = PixelShufflePack(
in_channels=mid_channels,
out_channels=mid_channels,
scale_factor=2,
upsample_kernel=3)
# stage 3
self.conv_first3 = _conv3x3_layer(texture_channels + mid_channels,
mid_channels)
self.csfi3 = CSFI3(mid_channels)
self.res_block3_1 = make_layer(
ResidualBlockNoBN,
num_blocks[3],
mid_channels=mid_channels,
res_scale=res_scale)
self.res_block3_2 = make_layer(
ResidualBlockNoBN,
num_blocks[3],
mid_channels=mid_channels,
res_scale=res_scale)
self.res_block3_3 = make_layer(
ResidualBlockNoBN,
num_blocks[3],
mid_channels=mid_channels,
res_scale=res_scale)
self.conv_last3_1 = _conv3x3_layer(mid_channels, mid_channels)
self.conv_last3_2 = _conv3x3_layer(mid_channels, mid_channels)
self.conv_last3_3 = _conv3x3_layer(mid_channels, mid_channels)
# end, merge features
self.merge_features = MergeFeatures(mid_channels, out_channels)
def forward(self, x, soft_attention, textures):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
soft_attention (Tensor): Soft-Attention tensor with shape
(n, 1, h, w).
textures (Tuple[Tensor]): Transferred HR texture tensors.
[(N, C, H, W), (N, C/2, 2H, 2W), ...]
Returns:
Tensor: Forward results.
"""
assert textures[-1].shape[1] == self.texture_channels
x1 = self.sfe(x)
# stage 1
x1_res = torch.cat((x1, textures[0]), dim=1)
x1_res = self.conv_first1(x1_res)
# soft-attention
x1 = x1 + x1_res * soft_attention
x1_res = self.res_block1(x1)
x1_res = self.conv_last1(x1_res)
x1 = x1 + x1_res
# stage 2
x21 = x1
x22 = self.up1(x1)
x22 = F.relu(x22)
x22_res = torch.cat((x22, textures[1]), dim=1)
x22_res = self.conv_first2(x22_res)
# soft-attention
x22_res = x22_res * F.interpolate(
soft_attention,
scale_factor=2,
mode='bicubic',
align_corners=False)
x22 = x22 + x22_res
x21_res, x22_res = self.csfi2(x21, x22)
x21_res = self.res_block2_1(x21_res)
x22_res = self.res_block2_2(x22_res)
x21_res = self.conv_last2_1(x21_res)
x22_res = self.conv_last2_2(x22_res)
x21 = x21 + x21_res
x22 = x22 + x22_res
# stage 3
x31 = x21
x32 = x22
x33 = self.up2(x22)
x33 = F.relu(x33)
x33_res = torch.cat((x33, textures[2]), dim=1)
x33_res = self.conv_first3(x33_res)
# soft-attention
x33_res = x33_res * F.interpolate(
soft_attention,
scale_factor=4,
mode='bicubic',
align_corners=False)
x33 = x33 + x33_res
x31_res, x32_res, x33_res = self.csfi3(x31, x32, x33)
x31_res = self.res_block3_1(x31_res)
x32_res = self.res_block3_2(x32_res)
x33_res = self.res_block3_3(x33_res)
x31_res = self.conv_last3_1(x31_res)
x32_res = self.conv_last3_2(x32_res)
x33_res = self.conv_last3_3(x33_res)
x31 = x31 + x31_res
x32 = x32 + x32_res
x33 = x33 + x33_res
x = self.merge_features(x31, x32, x33)
return x
def init_weights(self, pretrained=None, strict=True):
"""Init weights for models.
Args:
pretrained (str, optional): Path for pretrained weights. If given
None, pretrained weights will not be loaded. Defaults to None.
strict (boo, optional): Whether strictly load the pretrained model.
Defaults to True.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif pretrained is None:
pass # use default initialization
else:
raise TypeError('"pretrained" must be a str or None. '
f'But received {type(pretrained)}.')
|
src/c3nav/routing/utils/draw.py
|
johnjohndoe/c3nav
| 132 |
86384
|
from django.conf import settings
def _ellipse_bbox(x, y, height):
x *= settings.RENDER_SCALE
y *= settings.RENDER_SCALE
y = height-y
return ((x - 2, y - 2), (x + 2, y + 2))
def _line_coords(from_point, to_point, height):
return (from_point.x * settings.RENDER_SCALE, height - (from_point.y * settings.RENDER_SCALE),
to_point.x * settings.RENDER_SCALE, height - (to_point.y * settings.RENDER_SCALE))
|
draft_kings/response/schema/draft_group.py
|
jaebradley/draftkings_client
| 111 |
86398
|
# pylint: disable=unused-argument, no-self-use
from marshmallow import Schema, fields, EXCLUDE, post_load
from draft_kings.response.objects.draft_group import ContestType, League, Game, DraftGroup, DraftGroupResponse
class ContestTypeSchema(Schema):
class Meta:
unknown = EXCLUDE
contestTypeId = fields.Int(attribute="contest_type_id", missing=None)
gameType = fields.Str(attribute="game_type", missing=None)
@post_load
def make_contest_type(self, data, **kwargs):
return ContestType(**data)
class LeagueSchema(Schema):
class Meta:
unknown = EXCLUDE
leagueAbbreviation = fields.Str(attribute="league_abbreviation", missing=None)
leagueId = fields.Int(attribute="league_id", missing=None)
leagueName = fields.Str(attribute="league_name", missing=None)
@post_load
def make_league(self, data, **kwargs):
return League(**data)
class GameSchema(Schema):
class Meta:
unknown = EXCLUDE
awayTeamId = fields.Int(attribute="away_team_id", missing=None)
description = fields.Str(attribute="description", missing=None)
gameId = fields.Int(attribute="game_id", missing=None)
homeTeamId = fields.Int(attribute="home_team_id", missing=None)
location = fields.Str(attribute="location", missing=None)
name = fields.Str(attribute="name", missing=None)
startDate = fields.AwareDateTime(attribute="start_date", missing=None)
status = fields.Str(attribute="status", missing=None)
@post_load
def make_game(self, data, **kwargs):
return Game(**data)
class DraftGroupSchema(Schema):
class Meta:
unknown = EXCLUDE
contestType = fields.Nested(ContestTypeSchema, attribute="contest_type", required=True)
draftGroupId = fields.Int(attribute="draft_group_id", missing=None)
draftGroupState = fields.Str(attribute="draft_group_state", missing=None)
games = fields.List(fields.Nested(GameSchema, required=True), attribute="games", missing=[])
leagues = fields.List(fields.Nested(LeagueSchema, required=True), attribute="leagues", missing=[])
maxStartTime = fields.AwareDateTime(attribute="max_start_time", missing=None)
minStartTime = fields.AwareDateTime(attribute="min_start_time", missing=None)
sportId = fields.Int(attribute="sport_id", missing=None)
startTimeType = fields.Str(attribute="start_time_type", missing=None)
@post_load
def make_draft_group(self, data, **kwargs):
return DraftGroup(**data)
class DraftGroupResponseSchema(Schema):
class Meta:
unknown = EXCLUDE
draftGroup = fields.Nested(DraftGroupSchema, attribute="draft_group", required=True)
@post_load
def make_draft_group_response(self, data, **kwargs):
return DraftGroupResponse(**data)
# pylint: enable=unused-argument, no-self-use
|
etl/parsers/etw/Microsoft_Windows_WMPNSS_PublicAPI.py
|
IMULMUL/etl-parser
| 104 |
86404
|
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-WMPNSS-PublicAPI
GUID : 614696c9-85af-4e64-b389-d2c0db4ff87b
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=100, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_100_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=101, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_101_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=102, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_102_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=103, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_103_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=104, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_104_0(Etw):
pattern = Struct(
"LibraryName" / WString,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=105, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_105_0(Etw):
pattern = Struct(
"LibraryName" / WString,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=106, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_106_0(Etw):
pattern = Struct(
"LibraryName" / WString,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=107, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_107_0(Etw):
pattern = Struct(
"LibraryName" / WString,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=108, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_108_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=109, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_109_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=110, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_110_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=111, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_111_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=112, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_112_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=113, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_113_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=114, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_114_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=115, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_115_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=116, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_116_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=117, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_117_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=118, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_118_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=119, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_119_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=120, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_120_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=121, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_121_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=122, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_122_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=123, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_123_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=124, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_124_0(Etw):
pattern = Struct(
"MACAddress" / WString,
"FriendlyName" / WString,
"Authorize" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=125, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_125_0(Etw):
pattern = Struct(
"MACAddress" / WString,
"FriendlyName" / WString,
"Authorize" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=126, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_126_0(Etw):
pattern = Struct(
"MACAddress" / WString,
"Authorize" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=127, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_127_0(Etw):
pattern = Struct(
"MACAddress" / WString,
"Authorize" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=128, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_128_0(Etw):
pattern = Struct(
"Devices" / Int64ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=129, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_129_0(Etw):
pattern = Struct(
"Devices" / Int64ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=130, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_130_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=131, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_131_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=132, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_132_0(Etw):
pattern = Struct(
"DeviceID" / WString,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=133, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_133_0(Etw):
pattern = Struct(
"DeviceID" / WString,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=134, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_134_0(Etw):
pattern = Struct(
"SecurityGroup" / WString,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=135, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_135_0(Etw):
pattern = Struct(
"SecurityGroup" / WString,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=136, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_136_0(Etw):
pattern = Struct(
"SecurityGroup" / WString,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=137, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_137_0(Etw):
pattern = Struct(
"SecurityGroup" / WString,
"HResult" / Int32ul
)
|
mc-cnn/rgs.py
|
datvuthanh/SPIMNET
| 746 |
86438
|
#!/usr/bin/env python3
import sys
dataset, action = sys.argv[1:]
assert(dataset in {'mb', 'kitti'})
assert(action in {'test_te', 'train_tr'})
workers = [
('localhost', '-gpu 1'),
('localhost', '-gpu 2'),
('localhost', '-gpu 3'),
('localhost', '-gpu 4'),
]
if dataset == 'kitti' and action == 'train_tr':
params = [
('l1', [3, 4, 5]),
('fm_s', [1, 2, 3, 4, 5, 6, 7]),
('fm_t', [4, 5, 6, 7, 8, 9, 10]),
('l2', [3, 4, 5]),
('nh2', [200, 300, 400]),
('lr', [0.001, 0.003, 0.01]),
('true1', [0.5, 1, 1.5]),
('false1', [2, 3, 4, 5]),
('false2', [4, 6, 8, 10, 12]),
]
def valid(ps):
if ps['fm_s'] > ps['fm_t']: return False
if ps['true1'] > ps['false1']: return False
return True
if dataset == 'mb' and action == 'train_tr':
params = [
('l1', [3, 4, 5]),
('fm_s', [1, 2, 3, 4, 5, 6]),
('fm_t', [1, 2, 3, 4, 5, 6]),
('l2', [3, 4, 5]),
('nh2', [100, 150, 200]),
('lr', [0.001, 0.003, 0.01]),
('true1', [0.5, 1, 1.5]),
('false1', [1, 1.5, 2, 2.5, 3]),
('false2', [4, 6, 8, 10, 12]),
]
def valid(ps):
if ps['fm_s'] > ps['fm_t']: return False
if ps['true1'] > ps['false1']: return False
return True
if dataset == 'mb' and action == 'test_te':
params = [
# ('L1', range(0, 10)),
# ('cbca_i1', [0, 2, 4, 6, 8]),
# ('cbca_i2', [0, 2, 4, 6, 8]),
('tau1', [2**(i/2.) for i in range(-13,-4)]),
# ('pi1', [2**i for i in range(-3, 4)]),
# ('pi2', [2**i for i in range(2, 9)]),
# ('sgm_q1', [3, 3.5, 4, 4.5, 5]),
# ('sgm_q2', [2, 2.5, 3, 3.5, 4, 4.5]),
# ('alpha1', [1 + i/4. for i in range(0, 8)]),
('tau_so', [2**(i/2.) for i in range(-10,0)]),
# ('blur_sigma', [2**(i/2.) for i in range(0, 8)]),
# ('blur_t', range(1, 8)),
]
def valid(ps):
# if ps['pi1'] > ps['pi2']: return False
return True
###
import random
import threading
import multiprocessing
import subprocess
import sys
def start_job(ps, level):
worker = multiprocessing.current_process()._identity[0] - 1
host, args = workers[worker]
ps_str = ' '.join('-%s %r' % (name, vals[i]) for name, vals, i in ps)
if action == 'test_te':
ps_str += ' -use_cache'
cmd = "ssh %s 'cd devel/mc-cnn;TERM=xterm ./main.lua %s -a %s %s %s'" % (host, dataset, action, args, ps_str)
try:
o = subprocess.check_output(cmd, shell=True)
return float(o.split()[-1]), ps_str, ps, level
except:
print('Exception!')
return 1, ps_str, ps, level
def stop_job(res):
results.append(res)
#print(min(results)[:2])
for r in sorted(results, reverse=True)[-50:]:
print(r[:2])
print(res[:2])
print('--')
sem.release()
for worker in set(w[0] for w in workers):
subprocess.call("ssh {} 'pkill luajit'".format(worker), shell=True)
pool = multiprocessing.Pool(len(workers))
sem = threading.Semaphore(len(workers))
results = []
visited = set()
while True:
# get level
level = random.randint(0, max([r[3] for r in results])) if results else 0
if level == 0:
ps = tuple((name, tuple(vals), random.randint(0, len(vals) - 1)) for name, vals in params)
else:
ps_min = min([r for r in results if r[3] == level])[2]
ps = []
for name, vals, i in ps_min:
xs = [i]
if i - 1 >= 0:
xs.append(i - 1)
if i + 1 < len(vals):
xs.append(i + 1)
ps.append((name, vals, random.choice(xs)))
ps = tuple(ps)
if not valid({name: vals[i] for name, vals, i in ps}):
continue
if ps in visited:
continue
visited.add(ps)
sem.acquire()
pool.apply_async(start_job, (ps, level + 1), callback=stop_job)
|
tests/unit/tasks/test_check_queue_size.py
|
guvenbz/amazon-s3-find-and-forget
| 165 |
86474
|
from types import SimpleNamespace
import pytest
from mock import patch, MagicMock
from backend.lambdas.tasks.check_queue_size import handler
pytestmark = [pytest.mark.unit, pytest.mark.task]
@patch("backend.lambdas.tasks.check_queue_size.sqs")
def test_it_returns_correct_queue_size(mock_resource):
mock_queue = MagicMock()
mock_resource.Queue.return_value = mock_queue
mock_queue.attributes = {
"ApproximateNumberOfMessages": "4",
"ApproximateNumberOfMessagesNotVisible": "2",
}
event = {"QueueUrl": "queue_url"}
resp = handler(event, SimpleNamespace())
assert {"Visible": 4, "NotVisible": 2, "Total": 6} == resp
|
tests/tensortrade/unit/feed/api/float/test_operations.py
|
nicomon24/tensortrade
| 3,081 |
86476
|
<gh_stars>1000+
import pandas as pd
from tensortrade.feed import Stream
from tests.utils.ops import assert_op
def test_add():
# (left, right) : (Stream, Stream)
s1 = Stream.source([3, -4, 6, -7, 2, -6], dtype="float")
s2 = Stream.source([-3, 4, -6, 7, -2, 6], dtype="float")
w1 = s1.add(s2).rename("w1")
w2 = (s1 + s2).rename("w2")
assert_op([w1, w2], 6*[0])
# (left, right) : (Stream, float)
s1 = Stream.source([1, 2, 3, 4, 5, 6], dtype="float")
s2 = 1
w1 = s1.add(s2).rename("w1")
w2 = (s1 + s2).rename("w2")
assert_op([w1, w2], [2, 3, 4, 5, 6, 7])
def test_radd():
# (left, right) : (float, Stream)
s1 = 1
s2 = Stream.source([1, 2, 3, 4, 5, 6], dtype="float")
w = (s1 + s2).rename("w")
assert_op([w], [2, 3, 4, 5, 6, 7])
def test_sub():
expected = [0, 1, 2, 3, 4, 5]
# (left, right) : (Stream, Stream)
s1 = Stream.source([1, 2, 3, 4, 5, 6], dtype="float")
s2 = Stream.source([1, 1, 1, 1, 1, 1], dtype="float")
w1 = s1.sub(s2).rename("w1")
w2 = (s1 - s2).rename("w2")
assert_op([w1, w2], expected)
# (left, right) : (Stream, float)
w1 = s1.sub(1).rename("w1")
w2 = (s1 - 1).rename("w2")
assert_op([w1, w2], expected)
def test_rsub():
# (left, right) : (float, Stream)
s1 = 6
s2 = Stream.source([1, 2, 3, 4, 5, 6], dtype="float")
w = (s1 - s2).rename("w")
assert_op([w], [5, 4, 3, 2, 1, 0])
def test_mul():
expected = [2, 4, 6, 8, 10, 12]
# (left, right) : (Stream, Stream)
s1 = Stream.source([1, 2, 3, 4, 5, 6], dtype="float")
s2 = Stream.source([2, 2, 2, 2, 2, 2], dtype="float")
w1 = s1.mul(s2).rename("w1")
w2 = (s1 * s2).rename("w2")
assert_op([w1, w2], expected)
# (left, right) : (Stream, float)
w1 = s1.mul(2).rename("w1")
w2 = (s1 * 2).rename("w2")
assert_op([w1, w2], expected)
def test_rmul():
expected = [2, 4, 6, 8, 10, 12]
# (left, right) : (Stream, Stream)
s = Stream.source([1, 2, 3, 4, 5, 6], dtype="float")
# (left, right) : (Stream, float)
w = (2 * s).rename("w")
assert_op([w], expected)
def test_div():
expected = [1, 2, 3, 4, 5, 6]
# (left, right) : (Stream, Stream)
s1 = Stream.source([2, 4, 6, 8, 10, 12], dtype="float")
s2 = Stream.source([2, 2, 2, 2, 2, 2], dtype="float")
w1 = s1.div(s2).rename("w1")
w2 = (s1 / s2).rename("w2")
assert_op([w1, w2], expected)
# (left, right) : (Stream, float)
w1 = s1.div(2).rename("w1")
w2 = (s1 / 2).rename("w2")
assert_op([w1, w2], expected)
def test_rdiv():
expected = [6, 3, 2, 3/2, 6/5, 1]
# (left, right) : (Stream, Stream)
s = Stream.source([2, 4, 6, 8, 10, 12], dtype="float")
# (left, right) : (Stream, float)
w = (12 / s).rename("w")
assert_op([w], expected)
def test_abs():
s = Stream.source([3, -4, 6, -7, 2, -6], dtype="float")
s1 = s.abs().rename("s1")
s2 = abs(s).rename("s2")
assert_op([s1, s2], [3, 4, 6, 7, 2, 6])
def test_neg():
s = Stream.source([3, -4, 6, -7, 2, -6], dtype="float")
s1 = s.neg().rename("s1")
s2 = (-s).rename("s2")
assert_op([s1, s2], [-3, 4, -6, 7, -2, 6])
def test_pow():
array = [1, -2, 3, -4, 5, -6]
s = Stream.source(array, dtype="float")
s1 = s.pow(3).rename("s1")
s2 = (s**3).rename("s2")
expected = list(pd.Series(array)**3)
assert_op([s1, s2], expected)
|
net/util.py
|
juandesant/astrometry.net
| 460 |
86479
|
<gh_stars>100-1000
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django import forms
from django.utils.safestring import mark_safe
from django.forms import widgets
class HorizontalRadioSelect(widgets.RadioSelect):
input_type = 'radio'
template_name = 'radio-horizontal.html'
option_template_name = 'django/forms/widgets/radio_option.html'
class NoBulletsRadioSelect(widgets.RadioSelect):
input_type = 'radio'
template_name = 'radio-nobullets.html'
option_template_name = 'django/forms/widgets/radio_option.html'
# class HorizontalRenderer(forms.RadioSelect.renderer):
# def render(self):
# return mark_safe(u'\n'.join([u'%s' % w for w in self]))
#
# class NoBulletsRenderer(forms.RadioSelect.renderer):
# def render(self):
# return mark_safe(u'<br />\n'.join([u'%s' % w for w in self]))
def store_session_form(session, form_class, data):
session[form_class.__name__] = data
def get_session_form(session, form_class, **kwargs):
if session.get(form_class.__name__):
form = form_class(session[form_class.__name__], **kwargs)
form.is_valid()
del session[form_class.__name__]
else:
form = form_class(**kwargs)
return form
def dict_pack(struct_tuple, data_tuple):
pack = []
for data in data_tuple:
index = 0
packed_data = {}
for key in struct_tuple:
packed_data.update({key:data[index]})
index += 1
pack += [packed_data]
return tuple(pack)
def choicify(choice_dict_list, database_value, human_readable_value):
choice_list = []
for d in choice_dict_list:
choice_list.append((d[database_value],d[human_readable_value]))
return tuple(choice_list)
def get_page(object_list, page_size, page_number):
paginator = Paginator(object_list, page_size)
try:
page = paginator.page(page_number)
except PageNotAnInteger:
page = paginator.page(1)
except EmptyPage:
page = paginator.page(paginator.num_pages)
return page
|
src/sensing/drivers/radar/umrr_driver/src/smartmicro/Helper/basicThreadHelper/threadHelper.py
|
P4nos/Aslan
| 227 |
86492
|
import threading
import logging
import time
class ThreadHelperException(Exception):
pass
class ThreadHelper(threading.Thread):
"""
The class provides a frame for the threads used in the framework.
"""
# ---------------------------------------------------------------------------------------------------------------- #
# function: initialization #
# ---------------------------------------------------------------------------------------------------------------- #
def __init__(self):
"""
The function initializes all necessary variables and instances to deal with threads.
"""
# init parent class
threading.Thread.__init__(self)
# init additional instances and variables
self.mutex = threading.Lock()
self.eventWakeup = threading.Event()
self.running = True
self.shutDownBool = False
# bind thread to main process
self.setDaemon(True)
# ---------------------------------------------------------------------------------------------------------------- #
# function: suspend #
# ---------------------------------------------------------------------------------------------------------------- #
def suspend(self):
"""
Suspends the thread.
"""
with self.mutex:
self.running = False
# ---------------------------------------------------------------------------------------------------------------- #
# function: resume #
# ---------------------------------------------------------------------------------------------------------------- #
def resume(self):
"""
Resumes the thread.
"""
with self.mutex:
if self.running is not True:
self.running = True
self.eventWakeup.set()
# ---------------------------------------------------------------------------------------------------------------- #
# function: shutDown #
# ---------------------------------------------------------------------------------------------------------------- #
def shutDown(self):
"""
Shut down the thread.
"""
with self.mutex:
self.shutDownBool = True
if self.running is not True:
self.resume()
# ---------------------------------------------------------------------------------------------------------------- #
# function: run #
# ---------------------------------------------------------------------------------------------------------------- #
def run(self):
"""
The default run function.
"""
logging.debug("Start the default thread")
while self.shutDownBool is not True:
if self.running:
# raise ThreadHelperException("The thread use the default run function. Implement a run function in the"
# " derived class")
logging.debug("Default thread executed")
time.sleep(0.05)
else:
logging.debug('Default thread wait')
self.eventWakeup.wait()
logging.debug('Default thread resumed')
logging.debug("Shut down the default thread")
|
pyGPs/Optimization/scg.py
|
Corentin-LF/pyGPs
| 196 |
86519
|
from __future__ import division
from past.utils import old_div
#===============================================================================
# SCG Scaled conjugate gradient optimization.
#
# Copyright (c) <NAME> (1996-2001)
# updates by <NAME> 2013
#
# Permission is granted for anyone to copy, use, or modify these
# programs and accompanying documents for purposes of research or
# education, provided this copyright notice is retained, and note is
# made of any changes that have been made.
#
# These programs and documents are distributed without any warranty,
# express or implied. As the programs were written for research
# purposes only, they have not been tested to the degree that would be
# advisable in any important application. All use of these programs is
# entirely at the user's own risk."
#===============================================================================
from math import sqrt
import numpy as np
import logging
def run(f, x, args=(), niters = 100, gradcheck = False, display = 0, flog = False, pointlog = False, scalelog = False, tolX = 1.0e-8, tolO = 1.0e-8, eval = None):
'''Scaled conjugate gradient optimization. '''
if display:
logging.getLogger(__name__).info('***** starting optimization (SCG) *****')
nparams = len(x);
# Check gradients
if gradcheck:
pass
eps = 1.0e-4
sigma0 = 1.0e-4
result = f(x, *args)
fold = result[0] # Initial function value.
fnow = fold
funcCount = 1 # Increment function evaluation counter.
gradnew = result[1] # Initial gradient.
gradold = gradnew
gradCount = 1 # Increment gradient evaluation counter.
d = -gradnew # Initial search direction.
success = 1 # Force calculation of directional derivs.
nsuccess = 0 # nsuccess counts number of successes.
beta = 1.0 # Initial scale parameter.
betamin = 1.0e-15 # Lower bound on scale.
betamax = 1.0e50 # Upper bound on scale.
j = 1 # j counts number of iterations.
if flog:
pass
#flog(j, :) = fold;
if pointlog:
pass
#pointlog(j, :) = x;
# Main optimization loop.
listF = [fold]
if eval is not None:
evalue, timevalue = eval(x, *args)
evalList = [evalue]
time = [timevalue]
while (j <= niters):
# Calculate first and second directional derivatives.
if (success == 1):
mu = np.dot(d, gradnew)
if (mu >= 0):
d = - gradnew
mu = np.dot(d, gradnew)
kappa = np.dot(d, d)
if (kappa < eps):
logging.getLogger(__name__).info("FNEW: " + str(fnow))
#options(8) = fnow
if eval is not None:
return x, listF, evalList, time
else:
return x, listF
sigma = old_div(sigma0,sqrt(kappa))
xplus = x + sigma*d
gplus = f(xplus, *args)[1]
gradCount += 1
theta = old_div((np.dot(d, (gplus - gradnew))),sigma);
# Increase effective curvature and evaluate step size alpha.
delta = theta + beta*kappa
if (delta <= 0):
delta = beta*kappa
beta = beta - old_div(theta,kappa)
alpha = old_div(- mu,delta)
# Calculate the comparison ratio.
xnew = x + alpha*d
fnew = f(xnew, *args)[0]
funcCount += 1;
Delta = 2*(fnew - fold)/(alpha*mu)
if (Delta >= 0):
success = 1;
nsuccess += 1;
x = xnew;
fnow = fnew;
listF.append(fnow)
if eval is not None:
evalue, timevalue = eval(x, *args)
evalList.append(evalue)
time.append(timevalue)
else:
success = 0;
fnow = fold;
if flog:
# Store relevant variables
#flog(j) = fnow; # Current function value
pass
if pointlog:
#pointlog(j,:) = x; # Current position
pass
if scalelog:
#scalelog(j) = beta; # Current scale parameter
pass
if display > 0:
logging.getLogger(__name__).info('***** Cycle %4d Error %11.6f Scale %e', j, fnow, beta)
if (success == 1):
# Test for termination
# print type (alpha), type(d), type(tolX), type(fnew), type(fold)
if ((max(abs(alpha*d)) < tolX) & (abs(fnew-fold) < tolO)):
# options(8) = fnew;
# print "FNEW: " , fnew
if eval is not None:
return x, listF, evalList, time
else:
return x, listF
else:
# Update variables for new position
fold = fnew
gradold = gradnew
gradnew = f(x, *args)[1]
gradCount += 1
# If the gradient is zero then we are done.
if (np.dot(gradnew, gradnew) == 0):
# print "FNEW: " , fnew
# options(8) = fnew;
if eval is not None:
return x, listF, evalList, time
else:
return x, listF
# Adjust beta according to comparison ratio.
if (Delta < 0.25):
beta = min(4.0*beta, betamax);
if (Delta > 0.75):
beta = max(0.5*beta, betamin);
# Update search direction using Polak-Ribiere formula, or re-start
# in direction of negative gradient after nparams steps.
if (nsuccess == nparams):
d = -gradnew;
nsuccess = 0;
else:
if (success == 1):
gamma = old_div(np.dot((gradold - gradnew), gradnew),(mu))
d = gamma*d - gradnew;
j += 1
# If we get here, then we haven't terminated in the given number of
# iterations.
# options(8) = fold;
if (display):
logging.getLogger(__name__).info("maximum number of iterations reached")
if eval is not None:
return x, listF, evalList, time
else:
return x, listF
|
tests/integration/test_hive_query/test.py
|
DevTeamBK/ClickHouse
| 8,629 |
86540
|
import logging
import os
import time
import pytest
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler())
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster = ClickHouseCluster(__file__)
cluster.add_instance(
"h0_0_0",
main_configs=["configs/config.xml"],
extra_configs=["configs/hdfs-site.xml", "data/prepare_hive_data.sh"],
with_hive=True,
)
logging.info("Starting cluster ...")
cluster.start()
cluster.copy_file_to_container(
"roottesthivequery_hdfs1_1",
"/ClickHouse/tests/integration/test_hive_query/data/prepare_hive_data.sh",
"/prepare_hive_data.sh",
)
cluster.exec_in_container(
"roottesthivequery_hdfs1_1", ["bash", "-c", "bash /prepare_hive_data.sh"]
)
yield cluster
finally:
cluster.shutdown()
def test_create_parquet_table(started_cluster):
logging.info("Start testing creating hive table ...")
node = started_cluster.instances["h0_0_0"]
test_passed = False
for i in range(10):
node.query("set input_format_parquet_allow_missing_columns = true")
result = node.query(
"""
DROP TABLE IF EXISTS default.demo_parquet;
CREATE TABLE default.demo_parquet (`id` Nullable(String), `score` Nullable(Int32), `day` Nullable(String)) ENGINE = Hive('thrift://hivetest:9083', 'test', 'demo') PARTITION BY(day)
"""
)
logging.info("create result {}".format(result))
if result.strip() == "":
test_passed = True
break
time.sleep(60)
assert test_passed
def test_create_parquet_table_1(started_cluster):
logging.info("Start testing creating hive table ...")
node = started_cluster.instances["h0_0_0"]
for i in range(10):
node.query("set input_format_parquet_allow_missing_columns = true")
result = node.query(
"""
DROP TABLE IF EXISTS default.demo_parquet_parts;
CREATE TABLE default.demo_parquet_parts (`id` Nullable(String), `score` Nullable(Int32), `day` Nullable(String), `hour` String) ENGINE = Hive('thrift://hivetest:9083', 'test', 'parquet_demo') PARTITION BY(day, hour);
"""
)
logging.info("create result {}".format(result))
if result.strip() == "":
test_passed = True
break
time.sleep(60)
assert test_passed
def test_create_orc_table(started_cluster):
logging.info("Start testing creating hive table ...")
node = started_cluster.instances["h0_0_0"]
test_passed = False
for i in range(10):
result = node.query(
"""
DROP TABLE IF EXISTS default.demo_orc;
CREATE TABLE default.demo_orc (`id` Nullable(String), `score` Nullable(Int32), `day` Nullable(String)) ENGINE = Hive('thrift://hivetest:9083', 'test', 'demo_orc') PARTITION BY(day)
"""
)
logging.info("create result {}".format(result))
if result.strip() == "":
test_passed = True
break
time.sleep(60)
assert test_passed
def test_create_text_table(started_cluster):
logging.info("Start testing creating hive table ...")
node = started_cluster.instances["h0_0_0"]
result = node.query(
"""
DROP TABLE IF EXISTS default.demo_text;
CREATE TABLE default.demo_text (`id` Nullable(String), `score` Nullable(Int32), `day` Nullable(String)) ENGINE = Hive('thrift://hivetest:9083', 'test', 'demo_text') PARTITION BY (tuple())
"""
)
logging.info("create result {}".format(result))
assert result.strip() == ""
def test_parquet_groupby(started_cluster):
logging.info("Start testing groupby ...")
node = started_cluster.instances["h0_0_0"]
result = node.query(
"""
SELECT day, count(*) FROM default.demo_parquet group by day order by day
"""
)
expected_result = """2021-11-01 1
2021-11-05 2
2021-11-11 1
2021-11-16 2
"""
assert result == expected_result
def test_parquet_in_filter(started_cluster):
logging.info("Start testing groupby ...")
node = started_cluster.instances["h0_0_0"]
result = node.query(
"""
SELECT count(*) FROM default.demo_parquet_parts where day = '2021-11-05' and hour in ('00')
"""
)
expected_result = """2
"""
logging.info("query result:{}".format(result))
assert result == expected_result
def test_orc_groupby(started_cluster):
logging.info("Start testing groupby ...")
node = started_cluster.instances["h0_0_0"]
result = node.query(
"""
SELECT day, count(*) FROM default.demo_orc group by day order by day
"""
)
expected_result = """2021-11-01 1
2021-11-05 2
2021-11-11 1
2021-11-16 2
"""
assert result == expected_result
@pytest.mark.parametrize(
"table,use_local_cache_for_remote_storage,enable_orc_file_minmax_index,enable_orc_stripe_minmax_index",
[
pytest.param(
"demo_orc_no_cache_no_index",
"false",
"false",
"false",
id="demo_orc_no_cache_no_index",
),
pytest.param(
"demo_orc_with_cache_no_index",
"true",
"false",
"false",
id="demo_orc_with_cache_no_index",
),
pytest.param(
"demo_orc_no_cache_file_index",
"false",
"true",
"false",
id="demo_orc_no_cache_file_index",
),
pytest.param(
"demo_orc_with_cache_file_index",
"true",
"true",
"false",
id="demo_orc_with_cache_file_index",
),
pytest.param(
"demo_orc_no_cache_stripe_index",
"false",
"true",
"true",
id="demo_orc_no_cache_stripe_index",
),
pytest.param(
"demo_orc_with_cache_stripe_index",
"true",
"true",
"true",
id="demo_orc_with_cache_stripe_index",
),
],
)
def test_orc_minmax_index(
started_cluster,
table,
use_local_cache_for_remote_storage,
enable_orc_file_minmax_index,
enable_orc_stripe_minmax_index,
):
node = started_cluster.instances["h0_0_0"]
result = node.query(
"""
DROP TABLE IF EXISTS default.{table};
CREATE TABLE default.{table} (`id` Nullable(String), `score` Nullable(Int32), `day` Nullable(String)) ENGINE = Hive('thrift://hivetest:9083', 'test', 'demo_orc') PARTITION BY(day)
SETTINGS enable_orc_file_minmax_index = {enable_orc_file_minmax_index}, enable_orc_stripe_minmax_index = {enable_orc_stripe_minmax_index};
""".format(
table=table,
enable_orc_file_minmax_index=enable_orc_file_minmax_index,
enable_orc_stripe_minmax_index=enable_orc_stripe_minmax_index,
)
)
assert result.strip() == ""
for i in range(2):
result = node.query(
"""
SELECT day, id, score FROM default.{table} where day >= '2021-11-05' and day <= '2021-11-16' and score >= 15 and score <= 30 order by day, id
SETTINGS use_local_cache_for_remote_storage = {use_local_cache_for_remote_storage}
""".format(
table=table,
use_local_cache_for_remote_storage=use_local_cache_for_remote_storage,
)
)
assert (
result
== """2021-11-05 abd 15
2021-11-16 aaa 22
"""
)
@pytest.mark.parametrize(
"table,use_local_cache_for_remote_storage,enable_parquet_rowgroup_minmax_index",
[
pytest.param(
"demo_parquet_no_cache_no_index",
"false",
"false",
id="demo_parquet_no_cache_no_index",
),
pytest.param(
"demo_parquet_with_cache_no_index",
"true",
"false",
id="demo_parquet_with_cache_no_index",
),
pytest.param(
"demo_parquet_no_cache_rowgroup_index",
"false",
"true",
id="demo_parquet_no_cache_rowgroup_index",
),
pytest.param(
"demo_parquet_with_cache_rowgroup_index",
"true",
"true",
id="demo_parquet_with_cache_rowgroup_index",
),
],
)
def test_parquet_minmax_index(
started_cluster,
table,
use_local_cache_for_remote_storage,
enable_parquet_rowgroup_minmax_index,
):
node = started_cluster.instances["h0_0_0"]
result = node.query(
"""
DROP TABLE IF EXISTS default.{table};
CREATE TABLE default.{table} (`id` Nullable(String), `score` Nullable(Int32), `day` Nullable(String)) ENGINE = Hive('thrift://hivetest:9083', 'test', 'demo') PARTITION BY(day)
SETTINGS enable_parquet_rowgroup_minmax_index = {enable_parquet_rowgroup_minmax_index}
""".format(
table=table,
enable_parquet_rowgroup_minmax_index=enable_parquet_rowgroup_minmax_index,
)
)
assert result.strip() == ""
for i in range(2):
result = node.query(
"""
SELECT day, id, score FROM default.{table} where day >= '2021-11-05' and day <= '2021-11-16' and score >= 15 and score <= 30 order by day, id
SETTINGS use_local_cache_for_remote_storage = {use_local_cache_for_remote_storage}
""".format(
table=table,
use_local_cache_for_remote_storage=use_local_cache_for_remote_storage,
)
)
assert (
result
== """2021-11-05 abd 15
2021-11-16 aaa 22
"""
)
def test_hive_columns_prunning(started_cluster):
logging.info("Start testing groupby ...")
node = started_cluster.instances["h0_0_0"]
result = node.query(
"""
SELECT count(*) FROM default.demo_parquet_parts where day = '2021-11-05'
"""
)
expected_result = """4
"""
logging.info("query result:{}".format(result))
assert result == expected_result
def test_text_count(started_cluster):
node = started_cluster.instances["h0_0_0"]
result = node.query(
"""
SELECT day, count(*) FROM default.demo_orc group by day order by day SETTINGS format_csv_delimiter = '\x01'
"""
)
expected_result = """2021-11-01 1
2021-11-05 2
2021-11-11 1
2021-11-16 2
"""
assert result == expected_result
def test_parquet_groupby_with_cache(started_cluster):
logging.info("Start testing groupby ...")
node = started_cluster.instances["h0_0_0"]
result = node.query(
"""
SELECT day, count(*) FROM default.demo_parquet group by day order by day
"""
)
expected_result = """2021-11-01 1
2021-11-05 2
2021-11-11 1
2021-11-16 2
"""
assert result == expected_result
def test_parquet_groupby_by_hive_function(started_cluster):
logging.info("Start testing groupby ...")
node = started_cluster.instances["h0_0_0"]
result = node.query(
"""
SELECT day, count(*) FROM hive('thrift://hivetest:9083', 'test', 'demo', '`id` Nullable(String), `score` Nullable(Int32), `day` Nullable(String)', 'day') group by day order by day
"""
)
expected_result = """2021-11-01 1
2021-11-05 2
2021-11-11 1
2021-11-16 2
"""
assert result == expected_result
def test_cache_read_bytes(started_cluster):
node = started_cluster.instances["h0_0_0"]
result = node.query(
"""
CREATE TABLE IF NOT EXISTS default.demo_parquet_1 (`id` Nullable(String), `score` Nullable(Int32), `day` Nullable(String)) ENGINE = Hive('thrift://hivetest:9083', 'test', 'demo') PARTITION BY(day)
"""
)
test_passed = False
for i in range(10):
result = node.query(
"""
SELECT * FROM default.demo_parquet_1 settings input_format_parquet_allow_missing_columns = true
"""
)
node.query("system flush logs")
result = node.query(
"select sum(ProfileEvent_ExternalDataSourceLocalCacheReadBytes) from system.metric_log where ProfileEvent_ExternalDataSourceLocalCacheReadBytes > 0"
)
if result.strip() == "0":
logging.info("ProfileEvent_ExternalDataSourceLocalCacheReadBytes == 0")
time.sleep(10)
continue
test_passed = True
break
assert test_passed
def test_cache_dir_use(started_cluster):
node = started_cluster.instances["h0_0_0"]
result0 = node.exec_in_container(
["bash", "-c", "ls /tmp/clickhouse_local_cache | wc -l"]
)
result1 = node.exec_in_container(
["bash", "-c", "ls /tmp/clickhouse_local_cache1 | wc -l"]
)
assert result0 != "0" and result1 != "0"
|
src/tests/t_general.py
|
tizenorg/platform.upstream.krb5
| 372 |
86559
|
#!/usr/bin/python
from k5test import *
for realm in multipass_realms(create_host=False):
# Create a policy and see if it survives a dump/load.
realm.run_kadminl('addpol fred')
dumpfile = os.path.join(realm.testdir, 'dump')
realm.run_as_master([kdb5_util, 'dump', dumpfile])
realm.run_as_master([kdb5_util, 'load', dumpfile])
output = realm.run_kadminl('getpols')
if 'fred\n' not in output:
fail('Policy not preserved across dump/load.')
# Check that kinit fails appropriately with the wrong password.
output = realm.run_as_client([kinit, realm.user_princ], input='wrong\n',
expected_code=1)
if 'Password incorrect while getting initial credentials' not in output:
fail('Expected error message not seen in kinit output')
# Check that we can kinit as a different principal.
realm.kinit(realm.admin_princ, password('<PASSWORD>'))
realm.klist(realm.admin_princ)
# Test FAST kinit.
fastpw = password('<PASSWORD>')
realm.run_kadminl('ank -pw %s +requires_preauth user/fast' % fastpw)
realm.kinit('user/fast', fastpw)
realm.kinit('user/fast', fastpw, flags=['-T', realm.ccache])
realm.klist('user/fast@%s' % realm.realm)
# Test kinit against kdb keytab
realm.run_as_master([kinit, "-k", "-t",
"KDB:", realm.user_princ])
# Test kdestroy and klist of a non-existent ccache.
realm.run_as_client([kdestroy])
output = realm.run_as_client([klist], expected_code=1)
if 'No credentials cache found' not in output:
fail('Expected error message not seen in klist output')
# Test handling of kvno values beyond 255.
princ = 'foo/bar@%s' % realm.realm
realm.addprinc(princ)
realm.run_kadminl('modprinc -kvno 252 %s' % princ)
for kvno in range(253, 259):
realm.run_kadminl('ktadd -k %s %s' % (realm.keytab, princ))
realm.klist_keytab(princ)
output = realm.run_kadminl('getprinc %s' % princ)
if 'Key: vno 258,' not in output:
fail('Expected vno not seen in kadmin.local output')
success('Dump/load, FAST kinit, kdestroy, kvno wrapping.')
|
djangoerp/core/templatetags/avatar.py
|
xarala221/django-erp
| 345 |
86580
|
#!/usr/bin/env python
"""This file is part of the django ERP project.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
__author__ = '<NAME> <<EMAIL>>'
__copyright__ = 'Copyright (c) 2013-2015, django ERP Team'
__version__ = '0.0.5'
from hashlib import md5
from django import template
from django.utils.html import format_html, mark_safe, escape
register = template.Library()
@register.simple_tag
def avatar(email, size=32, default="mm", css_class="avatar image"):
"""Returns the gravatar image associated to the given email.
More info: http://www.gravatar.com
Example tag usage: {% avatar email_address 80 "http://.../my_default_image.jpg" [css_class] %}
"""
# Creates and returns the URL.
h = ""
if email:
h = md5(email.encode('utf-8')).hexdigest()
url = 'http://www.gravatar.com/avatar/%s?s=%s&r=g' % (h, escape(size))
# Adds a default image URL (if present).
if default:
url += "&d=%s" % escape(default)
url = mark_safe(url)
return format_html('<img class="{}" width="{}" height="{}" src="{}" />', css_class, size, size, url)
|
DQM/SiStripMonitorClient/python/RecoForDQM_Cosmic_cff.py
|
ckamtsikis/cmssw
| 852 |
86593
|
<reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
# Digitiser ####
# SiStrip
from EventFilter.SiStripRawToDigi.SiStripDigis_cfi import *
siStripDigis.ProductLabel = 'source'
# SiPixel
from EventFilter.SiPixelRawToDigi.SiPixelRawToDigi_cfi import *
siPixelDigis.InputLabel = 'source'
# Local Reco Cosmic ####
from RecoLocalTracker.Configuration.RecoLocalTracker_Cosmics_cff import *
#DefaultClusterizer.ConditionsLabel = '' #not needed to specify it is used as default
# Track Reconstruction Cosmic ########
from RecoTracker.Configuration.RecoTrackerP5_cff import *
# Beam Spot ########
from RecoVertex.BeamSpotProducer.BeamSpot_cff import *
# Reconstruction Sequence
RecoForDQMCosmic = cms.Sequence(siPixelDigis*siStripDigis*offlineBeamSpot*trackerlocalreco*ctftracksP5)
|
twilio/rest/api/v2010/account/call/event.py
|
BrimmingDev/twilio-python
| 1,362 |
86615
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import values
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class EventList(ListResource):
def __init__(self, version, account_sid, call_sid):
"""
Initialize the EventList
:param Version version: Version that contains the resource
:param account_sid: The SID of the Account that created this resource
:param call_sid: The unique string that identifies this resource
:returns: twilio.rest.api.v2010.account.call.event.EventList
:rtype: twilio.rest.api.v2010.account.call.event.EventList
"""
super(EventList, self).__init__(version)
# Path Solution
self._solution = {'account_sid': account_sid, 'call_sid': call_sid, }
self._uri = '/Accounts/{account_sid}/Calls/{call_sid}/Events.json'.format(**self._solution)
def stream(self, limit=None, page_size=None):
"""
Streams EventInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.call.event.EventInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, limit=None, page_size=None):
"""
Lists EventInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.call.event.EventInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of EventInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of EventInstance
:rtype: twilio.rest.api.v2010.account.call.event.EventPage
"""
data = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(method='GET', uri=self._uri, params=data, )
return EventPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of EventInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of EventInstance
:rtype: twilio.rest.api.v2010.account.call.event.EventPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return EventPage(self._version, response, self._solution)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.EventList>'
class EventPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the EventPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: The SID of the Account that created this resource
:param call_sid: The unique string that identifies this resource
:returns: twilio.rest.api.v2010.account.call.event.EventPage
:rtype: twilio.rest.api.v2010.account.call.event.EventPage
"""
super(EventPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of EventInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.call.event.EventInstance
:rtype: twilio.rest.api.v2010.account.call.event.EventInstance
"""
return EventInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
call_sid=self._solution['call_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.EventPage>'
class EventInstance(InstanceResource):
def __init__(self, version, payload, account_sid, call_sid):
"""
Initialize the EventInstance
:returns: twilio.rest.api.v2010.account.call.event.EventInstance
:rtype: twilio.rest.api.v2010.account.call.event.EventInstance
"""
super(EventInstance, self).__init__(version)
# Marshaled Properties
self._properties = {'request': payload.get('request'), 'response': payload.get('response'), }
# Context
self._context = None
self._solution = {'account_sid': account_sid, 'call_sid': call_sid, }
@property
def request(self):
"""
:returns: Call Request.
:rtype: dict
"""
return self._properties['request']
@property
def response(self):
"""
:returns: Call Response with Events.
:rtype: dict
"""
return self._properties['response']
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.EventInstance>'
|
monk/pytorch_tests.py
|
take2rohit/monk_v1
| 542 |
86634
|
<reponame>take2rohit/monk_v1<gh_stars>100-1000
import os
import sys
import time
from monk.pip_unit_tests.pytorch.test_optimizer_sgd import test_optimizer_sgd
from monk.pip_unit_tests.pytorch.test_optimizer_nesterov_sgd import test_optimizer_nesterov_sgd
from monk.pip_unit_tests.pytorch.test_optimizer_rmsprop import test_optimizer_rmsprop
from monk.pip_unit_tests.pytorch.test_optimizer_momentum_rmsprop import test_optimizer_momentum_rmsprop
from monk.pip_unit_tests.pytorch.test_optimizer_adam import test_optimizer_adam
from monk.pip_unit_tests.pytorch.test_optimizer_adamax import test_optimizer_adamax
from monk.pip_unit_tests.pytorch.test_optimizer_adamw import test_optimizer_adamw
from monk.pip_unit_tests.pytorch.test_optimizer_adadelta import test_optimizer_adadelta
from monk.pip_unit_tests.pytorch.test_optimizer_adagrad import test_optimizer_adagrad
from monk.pip_unit_tests.pytorch.test_loss_l1 import test_loss_l1
from monk.pip_unit_tests.pytorch.test_loss_l2 import test_loss_l2
from monk.pip_unit_tests.pytorch.test_loss_l2 import test_loss_l2
from monk.pip_unit_tests.pytorch.test_loss_softmax_crossentropy import test_loss_softmax_crossentropy
from monk.pip_unit_tests.pytorch.test_loss_crossentropy import test_loss_crossentropy
from monk.pip_unit_tests.pytorch.test_loss_sigmoid_binary_crossentropy import test_loss_sigmoid_binary_crossentropy
from monk.pip_unit_tests.pytorch.test_loss_binary_crossentropy import test_loss_binary_crossentropy
from monk.pip_unit_tests.pytorch.test_loss_kldiv import test_loss_kldiv
from monk.pip_unit_tests.pytorch.test_loss_poisson_nll import test_loss_poisson_nll
from monk.pip_unit_tests.pytorch.test_loss_huber import test_loss_huber
from monk.pip_unit_tests.pytorch.test_loss_hinge import test_loss_hinge
from monk.pip_unit_tests.pytorch.test_loss_squared_hinge import test_loss_squared_hinge
from monk.pip_unit_tests.pytorch.test_loss_multimargin import test_loss_multimargin
from monk.pip_unit_tests.pytorch.test_loss_squared_multimargin import test_loss_squared_multimargin
from monk.pip_unit_tests.pytorch.test_loss_multilabelmargin import test_loss_multilabelmargin
from monk.pip_unit_tests.pytorch.test_loss_multilabelsoftmargin import test_loss_multilabelsoftmargin
from monk.pip_unit_tests.pytorch.test_layer_convolution1d import test_layer_convolution1d
from monk.pip_unit_tests.pytorch.test_layer_convolution2d import test_layer_convolution2d
from monk.pip_unit_tests.pytorch.test_layer_convolution3d import test_layer_convolution3d
from monk.pip_unit_tests.pytorch.test_layer_transposed_convolution1d import test_layer_transposed_convolution1d
from monk.pip_unit_tests.pytorch.test_layer_transposed_convolution2d import test_layer_transposed_convolution2d
from monk.pip_unit_tests.pytorch.test_layer_transposed_convolution3d import test_layer_transposed_convolution3d
from monk.pip_unit_tests.pytorch.test_layer_max_pooling1d import test_layer_max_pooling1d
from monk.pip_unit_tests.pytorch.test_layer_max_pooling2d import test_layer_max_pooling2d
from monk.pip_unit_tests.pytorch.test_layer_max_pooling3d import test_layer_max_pooling3d
from monk.pip_unit_tests.pytorch.test_layer_average_pooling1d import test_layer_average_pooling1d
from monk.pip_unit_tests.pytorch.test_layer_average_pooling2d import test_layer_average_pooling2d
from monk.pip_unit_tests.pytorch.test_layer_average_pooling3d import test_layer_average_pooling3d
from monk.pip_unit_tests.pytorch.test_layer_global_max_pooling1d import test_layer_global_max_pooling1d
from monk.pip_unit_tests.pytorch.test_layer_global_max_pooling2d import test_layer_global_max_pooling2d
from monk.pip_unit_tests.pytorch.test_layer_global_max_pooling3d import test_layer_global_max_pooling3d
from monk.pip_unit_tests.pytorch.test_layer_global_average_pooling1d import test_layer_global_average_pooling1d
from monk.pip_unit_tests.pytorch.test_layer_global_average_pooling2d import test_layer_global_average_pooling2d
from monk.pip_unit_tests.pytorch.test_layer_global_average_pooling3d import test_layer_global_average_pooling3d
from monk.pip_unit_tests.pytorch.test_layer_batch_normalization import test_layer_batch_normalization
from monk.pip_unit_tests.pytorch.test_layer_instance_normalization import test_layer_instance_normalization
from monk.pip_unit_tests.pytorch.test_layer_layer_normalization import test_layer_layer_normalization
from monk.pip_unit_tests.pytorch.test_layer_identity import test_layer_identity
from monk.pip_unit_tests.pytorch.test_layer_fully_connected import test_layer_fully_connected
from monk.pip_unit_tests.pytorch.test_layer_dropout import test_layer_dropout
from monk.pip_unit_tests.pytorch.test_layer_flatten import test_layer_flatten
from monk.pip_unit_tests.pytorch.test_activation_relu import test_activation_relu
from monk.pip_unit_tests.pytorch.test_activation_sigmoid import test_activation_sigmoid
from monk.pip_unit_tests.pytorch.test_activation_tanh import test_activation_tanh
from monk.pip_unit_tests.pytorch.test_activation_softplus import test_activation_softplus
from monk.pip_unit_tests.pytorch.test_activation_softsign import test_activation_softsign
from monk.pip_unit_tests.pytorch.test_activation_elu import test_activation_elu
from monk.pip_unit_tests.pytorch.test_activation_leaky_relu import test_activation_leaky_relu
from monk.pip_unit_tests.pytorch.test_activation_prelu import test_activation_prelu
from monk.pip_unit_tests.pytorch.test_activation_selu import test_activation_selu
from monk.pip_unit_tests.pytorch.test_activation_hardshrink import test_activation_hardshrink
from monk.pip_unit_tests.pytorch.test_activation_hardtanh import test_activation_hardtanh
from monk.pip_unit_tests.pytorch.test_activation_logsigmoid import test_activation_logsigmoid
from monk.pip_unit_tests.pytorch.test_activation_relu6 import test_activation_relu6
from monk.pip_unit_tests.pytorch.test_activation_rrelu import test_activation_rrelu
from monk.pip_unit_tests.pytorch.test_activation_celu import test_activation_celu
from monk.pip_unit_tests.pytorch.test_activation_softshrink import test_activation_softshrink
from monk.pip_unit_tests.pytorch.test_activation_tanhshrink import test_activation_tanhshrink
from monk.pip_unit_tests.pytorch.test_activation_threshold import test_activation_threshold
from monk.pip_unit_tests.pytorch.test_activation_softmin import test_activation_softmin
from monk.pip_unit_tests.pytorch.test_activation_softmax import test_activation_softmax
from monk.pip_unit_tests.pytorch.test_activation_logsoftmax import test_activation_logsoftmax
from monk.pip_unit_tests.pytorch.test_layer_concatenate import test_layer_concatenate
from monk.pip_unit_tests.pytorch.test_layer_add import test_layer_add
from monk.pip_unit_tests.pytorch.test_block_resnet_v1 import test_block_resnet_v1
from monk.pip_unit_tests.pytorch.test_block_resnet_v2 import test_block_resnet_v2
from monk.pip_unit_tests.pytorch.test_block_resnet_v1_bottleneck import test_block_resnet_v1_bottleneck
from monk.pip_unit_tests.pytorch.test_block_resnet_v2_bottleneck import test_block_resnet_v2_bottleneck
from monk.pip_unit_tests.pytorch.test_block_resnext import test_block_resnext
from monk.pip_unit_tests.pytorch.test_block_mobilenet_v2_linear_bottleneck import test_block_mobilenet_v2_linear_bottleneck
from monk.pip_unit_tests.pytorch.test_block_mobilenet_v2_inverted_linear_bottleneck import test_block_mobilenet_v2_inverted_linear_bottleneck
from monk.pip_unit_tests.pytorch.test_block_squeezenet_fire import test_block_squeezenet_fire
from monk.pip_unit_tests.pytorch.test_block_densenet import test_block_densenet
from monk.pip_unit_tests.pytorch.test_block_conv_bn_relu import test_block_conv_bn_relu
from monk.pip_unit_tests.pytorch.test_block_inception_a import test_block_inception_a
from monk.pip_unit_tests.pytorch.test_block_inception_b import test_block_inception_b
from monk.pip_unit_tests.pytorch.test_block_inception_c import test_block_inception_c
from monk.pip_unit_tests.pytorch.test_block_inception_d import test_block_inception_d
from monk.pip_unit_tests.pytorch.test_block_inception_e import test_block_inception_e
from monk.pip_functionality_tests.pytorch.test_default_train import test_default_train
from monk.pip_functionality_tests.pytorch.test_default_eval_infer import test_default_eval_infer
from monk.pip_functionality_tests.pytorch.test_update_copy_from import test_update_copy_from
from monk.pip_functionality_tests.pytorch.test_update_normal import test_update_normal
from monk.pip_functionality_tests.pytorch.test_update_eval_infer import test_update_eval_infer
from monk.pip_functionality_tests.pytorch.test_expert_train import test_expert_train
from monk.pip_functionality_tests.pytorch.test_expert_eval_infer import test_expert_eval_infer
from monk.pip_functionality_tests.pytorch.test_switch_default import test_switch_default
from monk.pip_functionality_tests.pytorch.test_switch_expert import test_switch_expert
from monk.pip_functionality_tests.pytorch.test_compare import test_compare
from monk.pip_functionality_tests.pytorch.test_analyse import test_analyse
def run_functionality_tests():
origstdout = sys.stdout
print("Running Tests...");
sys.stdout = open("test_logs.txt", 'w');
system_dict = {};
system_dict["total_tests"] = 0;
system_dict["successful_tests"] = 0;
system_dict["failed_tests_lists"] = [];
system_dict["failed_tests_exceptions"] = [];
system_dict["skipped_tests_lists"] = [];
start = time.time()
print("Running 1/11");
system_dict = test_default_train(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running 2/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_default_eval_infer(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running 3/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_update_copy_from(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running 4/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_update_normal(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running 5/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_update_eval_infer(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running 6/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_expert_train(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("Running 7/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_expert_eval_infer(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running 8/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_switch_default(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running 9/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_switch_expert(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running 10/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_compare(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running 11/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_analyse(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
sys.stdout = open("test_logs.txt", 'a');
end = time.time();
print("Total Tests - {}".format(system_dict["total_tests"]));
print("Time Taken - {} sec".format(end-start));
print("Num Successful Tests - {}".format(system_dict["successful_tests"]));
print("Num Failed Tests - {}".format(len(system_dict["failed_tests_lists"])));
print("Num Skipped Tests - {}".format(len(system_dict["skipped_tests_lists"])));
print("");
for i in range(len(system_dict["failed_tests_lists"])):
print("{}. Failed Test:".format(i+1));
print("Name - {}".format(system_dict["failed_tests_lists"][i]));
print("Error - {}".format(system_dict["failed_tests_exceptions"][i]));
print("");
print("Skipped Tests List - {}".format(system_dict["skipped_tests_lists"]));
print("");
sys.stdout = origstdout;
print("Total Tests - {}".format(system_dict["total_tests"]));
print("Time Taken - {} sec".format(end-start));
print("Num Successful Tests - {}".format(system_dict["successful_tests"]));
print("Num Failed Tests - {}".format(len(system_dict["failed_tests_lists"])));
print("Num Skipped Tests - {}".format(len(system_dict["skipped_tests_lists"])));
print("See test_logs.txt for errors");
print("");
os.system("rm -r workspace");
def run_unit_tests():
origstdout = sys.stdout
print("Running Tests...");
sys.stdout = open("test_logs.txt", 'w');
system_dict = {};
system_dict["total_tests"] = 0;
system_dict["successful_tests"] = 0;
system_dict["failed_tests_lists"] = [];
system_dict["failed_tests_exceptions"] = [];
system_dict["skipped_tests_lists"] = [];
start = time.time()
exp_num = 1;
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_sgd(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_nesterov_sgd(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_rmsprop(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_adam(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_adamax(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_adamw(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_adadelta(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_adagrad(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_l1(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_l2(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_softmax_crossentropy(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_crossentropy(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_sigmoid_binary_crossentropy(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_binary_crossentropy(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_kldiv(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_poisson_nll(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_huber(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_hinge(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_squared_hinge(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_multimargin(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_squared_multimargin(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_multilabelmargin(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_multilabelsoftmargin(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_convolution1d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_convolution2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_convolution3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_transposed_convolution1d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_transposed_convolution2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_transposed_convolution3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_max_pooling1d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_max_pooling2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_max_pooling3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_average_pooling1d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_average_pooling2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_average_pooling3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_max_pooling1d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_max_pooling2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_max_pooling3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_average_pooling1d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_average_pooling2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_average_pooling3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_batch_normalization(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_instance_normalization(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_layer_normalization(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_identity(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_fully_connected(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_dropout(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_flatten(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_relu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_sigmoid(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_tanh(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_softplus(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_softsign(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_elu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_leaky_relu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_prelu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_selu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_hardshrink(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_hardtanh(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_logsigmoid(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_relu6(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_rrelu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_celu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_softshrink(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_tanhshrink(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_threshold(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_softmin(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_softmax(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_logsoftmax(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_concatenate(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_add(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_resnet_v1(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_resnet_v2(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_resnet_v1_bottleneck(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_resnet_v2_bottleneck(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_resnext(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_mobilenet_v2_linear_bottleneck(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_mobilenet_v2_inverted_linear_bottleneck(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_squeezenet_fire(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_densenet(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_conv_bn_relu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_inception_a(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_inception_b(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_inception_c(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_inception_d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_inception_e(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
sys.stdout = open("test_logs.txt", 'a');
end = time.time();
print("Total Tests - {}".format(system_dict["total_tests"]));
print("Time Taken - {} sec".format(end-start));
print("Num Successful Tests - {}".format(system_dict["successful_tests"]));
print("Num Failed Tests - {}".format(len(system_dict["failed_tests_lists"])));
print("Num Skipped Tests - {}".format(len(system_dict["skipped_tests_lists"])));
print("");
for i in range(len(system_dict["failed_tests_lists"])):
print("{}. Failed Test:".format(i+1));
print("Name - {}".format(system_dict["failed_tests_lists"][i]));
print("Error - {}".format(system_dict["failed_tests_exceptions"][i]));
print("");
print("Skipped Tests List - {}".format(system_dict["skipped_tests_lists"]));
print("");
sys.stdout = origstdout;
print("Total Tests - {}".format(system_dict["total_tests"]));
print("Time Taken - {} sec".format(end-start));
print("Num Successful Tests - {}".format(system_dict["successful_tests"]));
print("Num Failed Tests - {}".format(len(system_dict["failed_tests_lists"])));
print("Num Skipped Tests - {}".format(len(system_dict["skipped_tests_lists"])));
print("See test_logs.txt for errors");
print("");
os.system("rm -r workspace");
|
python/GafferSceneUI/BoundQueryUI.py
|
ddesmond/gaffer
| 561 |
86659
|
##########################################################################
#
# Copyright (c) 2021, Cinesite VFX Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferScene
Gaffer.Metadata.registerNode(
GafferScene.BoundQuery,
"description",
"""
Queries a particular location in a scene and outputs the bound.
""",
"layout:activator:spaceIsRelative", lambda node : node["space"].getValue() == GafferScene.BoundQuery.Space.Relative,
plugs = {
"scene" : [
"description",
"""
The scene to query the bounds for.
"""
],
"location" : [
"description",
"""
The location within the scene to query the bound at.
> Note : If the location does not exist then the query will not be
> performed and all outputs will be set to their default values.
""",
"plugValueWidget:type", "GafferSceneUI.ScenePathPlugValueWidget",
"scenePathPlugValueWidget:scene", "scene",
"nodule:type", ""
],
"space" : [
"description",
"""
The space to query the bound in.
""",
"preset:Local", GafferScene.BoundQuery.Space.Local,
"preset:World", GafferScene.BoundQuery.Space.World,
"preset:Relative", GafferScene.BoundQuery.Space.Relative,
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
"nodule:type", ""
],
"relativeLocation" : [
"description",
"""
The location within the scene to use for relative space mode.
> Note : If the location does not exist then the query will not be
> performed and all outputs will be set to their default values.
""",
"plugValueWidget:type", "GafferSceneUI.ScenePathPlugValueWidget",
"scenePathPlugValueWidget:scene", "scene",
"layout:activator", "spaceIsRelative",
"nodule:type", ""
],
"bound" : [
"description",
"""
Bounding box at specified location in specified space.
""",
"layout:section", "Settings.Outputs"
],
"center" : [
"description",
"""
Center point vector of the requested bound.
""",
"layout:section", "Settings.Outputs"
],
"size" : [
"description",
"""
Size vector of the requested bound.
""",
"layout:section", "Settings.Outputs"
],
}
)
|
desktop/core/ext-py/ply-3.9/example/optcalc/calc.py
|
kokosing/hue
| 7,986 |
86701
|
<filename>desktop/core/ext-py/ply-3.9/example/optcalc/calc.py<gh_stars>1000+
# -----------------------------------------------------------------------------
# calc.py
#
# A simple calculator with variables. This is from O'Reilly's
# "Lex and Yacc", p. 63.
# -----------------------------------------------------------------------------
import sys
sys.path.insert(0, "../..")
if sys.version_info[0] >= 3:
raw_input = input
tokens = (
'NAME', 'NUMBER',
'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'EQUALS',
'LPAREN', 'RPAREN',
)
# Tokens
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_EQUALS = r'='
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
def t_NUMBER(t):
r'\d+'
try:
t.value = int(t.value)
except ValueError:
print("Integer value too large %s" % t.value)
t.value = 0
return t
t_ignore = " \t"
def t_newline(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# Build the lexer
import ply.lex as lex
lex.lex(optimize=1)
# Parsing rules
precedence = (
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE'),
('right', 'UMINUS'),
)
# dictionary of names
names = {}
def p_statement_assign(t):
'statement : NAME EQUALS expression'
names[t[1]] = t[3]
def p_statement_expr(t):
'statement : expression'
print(t[1])
def p_expression_binop(t):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if t[2] == '+':
t[0] = t[1] + t[3]
elif t[2] == '-':
t[0] = t[1] - t[3]
elif t[2] == '*':
t[0] = t[1] * t[3]
elif t[2] == '/':
t[0] = t[1] / t[3]
elif t[2] == '<':
t[0] = t[1] < t[3]
def p_expression_uminus(t):
'expression : MINUS expression %prec UMINUS'
t[0] = -t[2]
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
t[0] = t[2]
def p_expression_number(t):
'expression : NUMBER'
t[0] = t[1]
def p_expression_name(t):
'expression : NAME'
try:
t[0] = names[t[1]]
except LookupError:
print("Undefined name '%s'" % t[1])
t[0] = 0
def p_error(t):
if t:
print("Syntax error at '%s'" % t.value)
else:
print("Syntax error at EOF")
import ply.yacc as yacc
yacc.yacc(optimize=1)
while 1:
try:
s = raw_input('calc > ')
except EOFError:
break
yacc.parse(s)
|
tests/test_plotly.py
|
paultimothymooney/docker-python-2
| 2,030 |
86704
|
import unittest
import plotly.graph_objs as go
class TestPlotly(unittest.TestCase):
def test_figure(self):
trace = {'x': [1, 2], 'y': [1, 3]}
data = [ trace ]
go.Figure(data=data)
|
datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/windows/__init__.py
|
tdimnet/integrations-core
| 663 |
86764
|
<reponame>tdimnet/integrations-core
# (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import click
from ...console import CONTEXT_SETTINGS
from .pdh import pdh
ALL_COMMANDS = [pdh]
@click.group(context_settings=CONTEXT_SETTINGS, short_help='Windows utilities')
def windows():
pass
for command in ALL_COMMANDS:
windows.add_command(command)
|
src/robots/__init__.py
|
sergioisidoro/django-robots
| 252 |
86777
|
<gh_stars>100-1000
from pkg_resources import get_distribution
__version__ = get_distribution("django-robots").version
|
convlab2/policy/ppo/multiwoz/__init__.py
|
Malavikka/ConvLab-2
| 339 |
86781
|
<reponame>Malavikka/ConvLab-2<filename>convlab2/policy/ppo/multiwoz/__init__.py
from convlab2.policy.ppo.multiwoz.ppo_policy import PPOPolicy
|
omniglot/archs.py
|
tam17aki/pytorch-adacos
| 209 |
86847
|
import numpy as np
from torch import nn
from torch.nn import functional as F
import torch
from torchvision import models
import torchvision
__all__ = ['ResNet_IR']
class ResNet_IR(nn.Module):
def __init__(self, args):
super().__init__()
if args.backbone == 'resnet18':
self.backbone = models.resnet18(pretrained=True)
last_channels = 512
elif args.backbone == 'resnet34':
self.backbone = models.resnet34(pretrained=True)
last_channels = 512
elif args.backbone == 'resnet50':
self.backbone = models.resnet50(pretrained=True)
last_channels = 2048
elif args.backbone == 'resnet101':
self.backbone = models.resnet101(pretrained=True)
last_channels = 2048
elif args.backbone == 'resnet152':
self.backbone = models.resnet152(pretrained=True)
last_channels = 2048
self.features = nn.Sequential(
self.backbone.conv1,
self.backbone.bn1,
self.backbone.relu,
self.backbone.layer1,
self.backbone.layer2,
self.backbone.layer3,
self.backbone.layer4)
self.bn1 = nn.BatchNorm2d(last_channels)
self.dropout = nn.Dropout2d(0.5)
self.fc = nn.Linear(8*8*last_channels, args.num_features)
self.bn2 = nn.BatchNorm1d(args.num_features)
def freeze_bn(self):
for m in self.features.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.requires_grad = False
m.bias.requires_grad = False
def forward(self, x):
x = self.features(x)
x = self.bn1(x)
x = self.dropout(x)
x = x.view(x.shape[0], -1)
x = self.fc(x)
output = self.bn2(x)
return output
|
kuwala/common/jupyter/modules/poi_controller.py
|
bmahmoudyan/kuwala
| 381 |
86849
|
import json
import os
from geojson import Polygon
from kuwala.modules.common import polyfill_polygon
# Get the aggregated number of a specific POI category per H3 index at a given resolution
def get_pois_by_category_in_h3(sp, category, resolution, polygon_coords):
polygon_cells = None
if polygon_coords:
polygon_coords = json.loads(polygon_coords)
polygon = Polygon(polygon_coords)
polygon_cells = list(polyfill_polygon(polygon, resolution=resolution))
# noinspection SqlNoDataSourceInspection
query = '''
CALL {
MATCH (pc:PoiCategory)<-[:BELONGS_TO]-(po:PoiOSM)-[:BELONGS_TO]->(p:Poi)-[:LOCATED_AT]->(h:H3Index)
''' + f'''
WITH p, pc, io.kuwala.h3.h3ToParent(h.h3Index, {resolution}) AS h3_index
WHERE {f'h3_index IN {polygon_cells} AND' if polygon_cells else ''} pc.name = '{category}'
RETURN p
UNION
MATCH (pc:PoiCategory)<-[:BELONGS_TO]-(pg:PoiGoogle)-[b:BELONGS_TO]->(p:Poi)-[:LOCATED_AT]->(h:H3Index)
WITH p, pc, io.kuwala.h3.h3ToParent(h.h3Index, {resolution}) AS h3_index
WHERE
{f'h3_index IN {polygon_cells} AND' if polygon_cells else ''}
b.confidence >= 0.8 AND
pc.name = '{category}'
RETURN p
''' + '''}
WITH p
MATCH (p)-[:LOCATED_AT]->(h:H3Index)
''' + f'''WITH p, io.kuwala.h3.h3ToParent(h.h3Index, {resolution}) AS h3_index
RETURN h3_index, COUNT(p) AS number_of_{category}
'''
url = os.getenv('NEO4J_HOST') or 'bolt://localhost:7687'
return sp.read.format("org.neo4j.spark.DataSource") \
.option("url", url) \
.option("authentication.type", "basic") \
.option("authentication.basic.username", "neo4j") \
.option("authentication.basic.password", "password") \
.option("query", query) \
.load()
|
megatron/module.py
|
eric-haibin-lin/Megatron-LM
| 309 |
86873
|
<gh_stars>100-1000
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Megatron Module"""
import torch
class MegatronModule(torch.nn.Module):
"""Megatron specific extentions of torch Module."""
def __init__(self):
super(MegatronModule, self).__init__()
def state_dict_for_save_checkpoint(self, destination=None, prefix='',
keep_vars=False):
"""Use this function to override the state dict for
saving checkpoints."""
return self.state_dict(destination, prefix, keep_vars)
|
tick/base/tests/standard_normal_distribution_test.py
|
sumau/tick
| 411 |
86887
|
# License: BSD 3 clause
# -*- coding: utf8 -*-
import unittest
from tick.base.build.base import standard_normal_cdf, \
standard_normal_inv_cdf
from scipy.stats import norm
import numpy as np
from numpy.random import normal, uniform
class Test(unittest.TestCase):
def setUp(self):
self.size = 10
def test_standard_normal_cdf(self):
"""...Test normal cumulative distribution function
"""
tested_sample = normal(size=self.size)
actual = np.array([standard_normal_cdf(s) for s in tested_sample])
expected = norm.cdf(tested_sample)
np.testing.assert_almost_equal(actual, expected, decimal=7)
def test_standard_normal_inv_cdf(self):
"""...Test inverse of normal cumulative distribution function
"""
tested_sample = uniform(size=self.size)
actual = np.array([standard_normal_inv_cdf(s) for s in tested_sample])
expected = norm.ppf(tested_sample)
np.testing.assert_almost_equal(actual, expected, decimal=7)
actual_array = np.empty(self.size)
standard_normal_inv_cdf(tested_sample, actual_array)
np.testing.assert_almost_equal(actual_array, expected, decimal=7)
|
fastflix/widgets/panels/info_panel.py
|
ObviousInRetrospect/FastFlix
| 388 |
86895
|
<filename>fastflix/widgets/panels/info_panel.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import re
from itertools import chain
from pathlib import Path
from typing import List, Union
from box import Box, BoxList
from PySide6 import QtCore, QtGui, QtWidgets
from fastflix.language import t
from fastflix.models.encode import AttachmentTrack
from fastflix.models.fastflix_app import FastFlixApp
from fastflix.models.video import Video
from fastflix.shared import link
logger = logging.getLogger("fastflix")
class InfoPanel(QtWidgets.QTabWidget):
def __init__(self, parent, app: FastFlixApp):
super().__init__(parent)
self.app = app
self.main = parent.main
self.attachments = Box()
def reset(self):
for i in range(self.count() - 1, -1, -1):
self.removeTab(i)
if not self.app.fastflix.current_video:
return
all_stream = []
for x in self.app.fastflix.current_video.streams.values():
all_stream.extend(x)
for stream in sorted(all_stream, key=lambda z: z["index"]):
widget = QtWidgets.QTextBrowser(self)
widget.setReadOnly(True)
widget.setDisabled(False)
widget.setText(Box(stream).to_yaml(default_flow_style=False))
self.addTab(widget, f"{stream['index']}: {stream['codec_type'].title()} ({stream.get('codec_name', '')})")
|
bot-bosses/tsadmiral/MyBot.py
|
HaliteChallenge/Halite-II
| 232 |
86925
|
"""
Welcome to your first Halite-II bot!
This bot's name is Settler. It's purpose is simple (don't expect it to win complex games :) ):
1. Initialize game
2. If a ship is not docked and there are unowned planets
2.a. Try to Dock in the planet if close enough
2.b If not, go towards the planet
Note: Please do not place print statements here as they are used to communicate with the Halite engine. If you need
to log anything use the logging module.
"""
# Let's start by importing the Halite Starter Kit so we can interface with the Halite engine
import hlt
import numpy
import math
import gc
import hlt.entity
import hlt.collision
import logging
import time
import random
# GAME START
# Here we define the bot's name as Settler and initialize the game, including communication with the Halite engine.
game = hlt.Game("MyBot16")
initialized = False
first_dock = False
cos = [math.cos(math.radians(x)) for x in range(360)]
sin = [math.sin(math.radians(x)) for x in range(360)]
def compute_dist(dx, dy):
return numpy.sqrt(dx * dx + dy * dy)
def compute_square_dist(dx, dy):
return dx * dx + dy * dy
def custom_intersect_segment_circle(start, end, circle, *, fudge=0.5):
# threshold = 2 * hlt.constants.MAX_SPEED + fudge + circle.radius
# if numpy.abs(start.x - circle.x) > threshold or numpy.abs(start.y - circle.y) > threshold:
# return False
dx = end.x - start.x
dy = end.y - start.y
a = dx**2 + dy**2
b = -2 * (start.x**2 - start.x*end.x - start.x*circle.x + end.x*circle.x +
start.y**2 - start.y*end.y - start.y*circle.y + end.y*circle.y)
c = (start.x - circle.x)**2 + (start.y - circle.y)**2
if a == 0.0:
# Start and end are the same point
return start.calculate_distance_between(circle) <= circle.radius + fudge
# Time along segment when closest to the circle (vertex of the quadratic)
t = min(-b / (2 * a), 1.0)
if t < 0:
return False
closest_x = start.x + dx * t
closest_y = start.y + dy * t
closest_distance = hlt.entity.Position(closest_x, closest_y).calculate_distance_between(circle)
return closest_distance <= circle.radius + fudge
SKIP_THRESHOLD = (hlt.constants.MAX_SPEED + 1.1) ** 2
def exists_obstacles_between(ship, target, all_planets, all_ships, all_my_ships_moves, ignore=()):
obstacles = []
entities = ([] if issubclass(hlt.entity.Planet, ignore) else all_planets) \
+ ([] if issubclass(hlt.entity.Ship , ignore) else all_ships) \
+ ([] if issubclass(hlt.entity.Ship , ignore) else all_my_ships_moves)
if not issubclass(hlt.entity.Planet, ignore):
for foreign_entity in all_planets:
if foreign_entity == ship or foreign_entity == target:
continue
if custom_intersect_segment_circle(ship, target, foreign_entity, fudge=ship.radius + 0.1):
return True
if not issubclass(hlt.entity.Ship, ignore):
for foreign_entity in all_ships + all_my_ships_moves:
if foreign_entity == ship or foreign_entity == target:
continue
if compute_square_dist(foreign_entity.x - ship.x, foreign_entity.y - ship.y) > SKIP_THRESHOLD:
continue
if custom_intersect_segment_circle(ship, target, foreign_entity, fudge=ship.radius + 0.1):
return True
return False
def custom_navigate(ship, target, game_map, max_speed, min_speed, speed_decay, step, all_planets, all_ships, all_my_ships_moves,
avoid_obstacles=True, max_corrections=90, angular_step=1,
ignore_ships=False, ignore_planets=False, suicide=False):
# Assumes a position, not planet (as it would go to the center of the planet otherwise)
if max_corrections <= 0:
return 999999, None, None
if not suicide:
distance = ship.calculate_distance_between(target) - target.radius - ship.radius
else:
distance = ship.calculate_distance_between(target)
angle = int(ship.calculate_angle_between(target))
ignore = () if not (ignore_ships or ignore_planets) \
else hlt.entity.Ship if (ignore_ships and not ignore_planets) \
else hlt.entity.Planet if (ignore_planets and not ignore_ships) \
else hlt.entity.Entity
if avoid_obstacles and exists_obstacles_between(ship, target, all_planets, all_ships, all_my_ships_moves, ignore):
new_angle = angle + angular_step
while new_angle >= 360:
new_angle -= 360
while new_angle < 0:
new_angle += 360
new_target_dx = cos[int(new_angle)] * distance
new_target_dy = sin[int(new_angle)] * distance
new_target = hlt.entity.Position(ship.x + new_target_dx, ship.y + new_target_dy)
return custom_navigate(ship, new_target, game_map, max_speed, min_speed, speed_decay, step + 1, all_planets, all_ships, all_my_ships_moves, True, max_corrections - 1, angular_step, ignore_ships, ignore_planets, suicide)
# TODO formulize this better
speed = max(max_speed - step * speed_decay, min_speed)
speed = speed if (distance >= speed) else distance - 0.1
final_target_dx = cos[int(angle)] * speed
final_target_dy = sin[int(angle)] * speed
final_target = hlt.entity.Position(ship.x + final_target_dx, ship.y + final_target_dy)
final_target.radius = ship.radius
return step, final_target, ship.thrust(speed, angle)
# parameters
ANGULAR_STEP = 6
MAX_SPEED = hlt.constants.MAX_SPEED
MIN_SPEED = hlt.constants.MAX_SPEED * 0.5
SPEED_DECAY = 0.0
MAX_CORRECTIONS = 30
MIN_OPPONENT_DIST_TO_DOCK = 25.0
MIN_OPPONENT_DIST_TO_TARGET_PLANET = 25.0
DOCKED_BONUS = 0.0
PLANET_BONUS = 10.0
UNDOCKED_BONUS = -100.0
MAX_OPPONENT_SHIP_TARGET_CNT = 4
MAX_MY_SHIP_TARGET_CNT = 4
PLANET_DOCKED_ALLIES_BONUS = 40.0
OPPONENT_SHIP_CLOSE_TO_MY_DOCKED_BONUS = 40.0
MAX_DIST_TO_TARGET_OPPONENT_UNDOCKED_SHIP = 15.0
#PLANET_CAPACITY_BONUS =
#UNDOCKED_OPPONENT_CLOSE_TO_MY_DOCKED_BONUS = 10.0
PLANET_NEARBY_PLANET_MAX_BONUS = 36.0
PLANET_NEARBY_PLANET_BIAS = 3.0
PLANET_NEARBY_PLANET_SLOPE = 0.25
SUICIDE_UNDOCKED_OPPONENT_DIST = 15.0
ALL_IN_DIST = 50.0
PLANET_FAR_FROM_CENTER_BONUS = 1.0
MAX_PLANET_FAR_FROM_CENTER_BONUS = 70.0
SUICIDE_HEALTH_MULT = 1.0
CLOSE_OPPONENT_DIST = 12.0
CLOSE_ALLY_DIST = 5.0
DOUBLE_NAVIGATE_SHIP_CNT = 999
def planet_nearby_empty_planet_score(dist_matrix, planet_owner, planet_capacity):
score = numpy.maximum(0.0, PLANET_NEARBY_PLANET_BIAS - dist_matrix * PLANET_NEARBY_PLANET_SLOPE)
score = ((planet_owner == -1) * planet_capacity)[numpy.newaxis,:] * ((planet_owner == -1) * planet_capacity)[:,numpy.newaxis] * score
return numpy.minimum(PLANET_NEARBY_PLANET_MAX_BONUS, numpy.sum(score, axis=0))
#PLANET_DOCK_SYNERGE_BONUS = 5.0
# TODOS
# 2. parameter tuning
# 5. collide to planets?
# 6. if timeout, move ship to center of the enemies or allies?
# 7. Add our own planet in target to be more defensive
# 8. count ships of I and opponent to figure out who's winning. If even, be more defensive
# 9. if I have more ships, collide to opponent planet
# 10. go to my ally when there's more enemy
# 11. if you are a lone warrior, far away from my docked ship, and many enemies in your target but no allies, get back
# 12. In a 4P game, be more defensive
# 13. Defend early game rush
# 14. Create a pivot
early_game_all_in = 0
while True:
# TURN START
st = time.time()
# Update the map for the new turn and get the latest version
game_map = game.update_map()
# Here we define the set of commands to be sent to the Halite engine at the end of the turn
command_queue = []
# initialize game info
if not initialized:
my_id = game_map.my_id
me = game_map.get_me()
width = game_map.width
height = game_map.height
initialized = True
# cache players, planets and ships
all_players_ids = game_map._players.keys()
num_players = len(all_players_ids)
all_planets = game_map.all_planets()
all_my_ships = game_map.get_me().all_ships()
num_my_ships = len(all_my_ships)
all_opponent_ships = []
for pid in all_players_ids:
if my_id != pid:
all_opponent_ships += game_map.get_player(pid).all_ships()
num_opponent_ships = len(all_opponent_ships)
all_ships = all_my_ships + all_opponent_ships
# cache coordinates and misc
all_my_ships_x = numpy.array([v.x for v in all_my_ships])
all_my_ships_y = numpy.array([v.y for v in all_my_ships])
all_my_ships_center_x = numpy.mean(all_my_ships_x)
all_my_ships_center_y = numpy.mean(all_my_ships_y)
all_opponent_ships_x = numpy.array([v.x for v in all_opponent_ships])
all_opponent_ships_y = numpy.array([v.y for v in all_opponent_ships])
all_opponent_ships_center_x = numpy.mean(all_opponent_ships_x)
all_opponent_ships_center_y = numpy.mean(all_opponent_ships_y)
all_planets_x = numpy.array([v.x for v in all_planets])
all_planets_y = numpy.array([v.y for v in all_planets])
my_ships_status = numpy.array([v.docking_status for v in all_my_ships])
num_my_undocked_ships = numpy.sum(my_ships_status == hlt.entity.Ship.DockingStatus.UNDOCKED)
opponent_ships_status = numpy.array([v.docking_status for v in all_opponent_ships])
num_opponent_undocked_ships = numpy.sum(opponent_ships_status == hlt.entity.Ship.DockingStatus.UNDOCKED)
planet_owner = numpy.array([-1 if v.owner is None else v.owner.id for v in all_planets])
def compute_dist_matrix(x1, y1, x2, y2):
dx = x1[:,numpy.newaxis] - x2[numpy.newaxis,:]
dy = y1[:,numpy.newaxis] - y2[numpy.newaxis,:]
return numpy.sqrt(dx * dx + dy * dy)
my_ship_dist_matrix = compute_dist_matrix(all_my_ships_x, all_my_ships_y, all_my_ships_x, all_my_ships_y)
ship_dist_matrix = compute_dist_matrix(all_my_ships_x, all_my_ships_y, all_opponent_ships_x, all_opponent_ships_y)
planet_dist_matrix = compute_dist_matrix(all_my_ships_x, all_my_ships_y, all_planets_x, all_planets_y)
planet_planet_dist_matrix = compute_dist_matrix(all_planets_x, all_planets_y, all_planets_x, all_planets_y)
closest_opponent_ship = numpy.min(ship_dist_matrix, axis=1)
closest_undocked_opponent_ship = numpy.min(ship_dist_matrix + 99999999.0 * (opponent_ships_status != hlt.entity.Ship.DockingStatus.UNDOCKED)[numpy.newaxis,:], axis=1)
cnt_too_close_to_dock_opponent = numpy.sum((ship_dist_matrix < MIN_OPPONENT_DIST_TO_DOCK) * ((my_ships_status == hlt.entity.Ship.DockingStatus.DOCKED) | (my_ships_status == hlt.entity.Ship.DockingStatus.DOCKING))[:,numpy.newaxis], axis=0)
cnt_too_close_to_dock_ally = numpy.sum((ship_dist_matrix < MIN_OPPONENT_DIST_TO_DOCK) * ((my_ships_status == hlt.entity.Ship.DockingStatus.DOCKED) | (my_ships_status == hlt.entity.Ship.DockingStatus.DOCKING))[:,numpy.newaxis], axis=1)
close_opponent_ship_cnt = numpy.sum((ship_dist_matrix < CLOSE_OPPONENT_DIST) * (opponent_ships_status == hlt.entity.Ship.DockingStatus.UNDOCKED)[numpy.newaxis,:], axis=1)
close_ally_ship_cnt = numpy.sum((my_ship_dist_matrix < CLOSE_ALLY_DIST), axis=1)
cnt_too_close_to_dock_closest_ally = numpy.zeros(len(all_my_ships), dtype=numpy.int)
for i in range(len(all_opponent_ships)):
if opponent_ships_status[i] == hlt.entity.Ship.DockingStatus.UNDOCKED:
# TODO optimize this
k = numpy.argmin(ship_dist_matrix[:,i] + 99999999.0 * ((my_ships_status == hlt.entity.Ship.DockingStatus.UNDOCKED) | (my_ships_status == hlt.entity.Ship.DockingStatus.UNDOCKING)))
if ship_dist_matrix[k][i] < MIN_OPPONENT_DIST_TO_DOCK:
cnt_too_close_to_dock_closest_ally[k] += 1
planet_capacity = numpy.array([p.num_docking_spots for p in all_planets])
planet_docked_cnt = numpy.array([len(p._docked_ship_ids) for p in all_planets]) #TODO does this include docking ships?
planet_remaining_cnt = planet_capacity - planet_docked_cnt
# my ship target scores
my_ship_score = numpy.array([0.0] * len(all_my_ships))
my_ship_score += OPPONENT_SHIP_CLOSE_TO_MY_DOCKED_BONUS * cnt_too_close_to_dock_closest_ally
my_ship_score += -99999999.0 * (cnt_too_close_to_dock_closest_ally == 0)
my_ship_max_target_cnt = numpy.minimum(MAX_MY_SHIP_TARGET_CNT, cnt_too_close_to_dock_closest_ally)
# opponent ship target scores
opponent_ship_score = numpy.array([0.0] * len(all_opponent_ships))
opponent_ship_score += OPPONENT_SHIP_CLOSE_TO_MY_DOCKED_BONUS * cnt_too_close_to_dock_opponent
opponent_ship_score += UNDOCKED_BONUS * \
((opponent_ships_status == hlt.entity.Ship.DockingStatus.UNDOCKED) | (opponent_ships_status == hlt.entity.Ship.DockingStatus.UNDOCKING))
opponent_ship_score += DOCKED_BONUS * \
((opponent_ships_status == hlt.entity.Ship.DockingStatus.DOCKED) | (opponent_ships_status == hlt.entity.Ship.DockingStatus.DOCKING))
opponent_ship_max_target_cnt = numpy.array([MAX_OPPONENT_SHIP_TARGET_CNT] * len(all_opponent_ships))
# planet target scores
planet_score = numpy.array([PLANET_BONUS] * len(all_planets))
if not first_dock and num_players == 2:
planet_score[numpy.argmin(planet_dist_matrix[0])] += 20.0 # so that all ships go to the same planet at the beginning
planet_score[(planet_owner == my_id)] += PLANET_DOCKED_ALLIES_BONUS
if num_players == 2:
planet_score += planet_nearby_empty_planet_score(planet_planet_dist_matrix, planet_owner, planet_capacity)
elif num_players > 2:
planet_score += numpy.minimum(MAX_PLANET_FAR_FROM_CENTER_BONUS, PLANET_FAR_FROM_CENTER_BONUS * (compute_dist(all_planets_x - width / 2.0, all_planets_y - height / 2.0)))
planet_max_target_cnt = planet_remaining_cnt.copy()
my_ship_target_cnt = numpy.array([0] * len(all_my_ships))
opponent_ship_target_cnt = numpy.array([0] * len(all_opponent_ships))
planet_target_cnt = numpy.array([0] * len(all_planets))
my_ship_target_available = my_ship_target_cnt < my_ship_max_target_cnt
opponent_ship_target_available = opponent_ship_target_cnt < opponent_ship_max_target_cnt
planet_target_available = planet_target_cnt < planet_max_target_cnt
# Early game exception
if early_game_all_in == 0:
if len(all_my_ships) != 3 or len(all_opponent_ships) != 3 or num_players > 2 or numpy.sum(my_ships_status != hlt.entity.Ship.DockingStatus.UNDOCKED) == 3:
early_game_all_in = 2
if numpy.min(ship_dist_matrix) < ALL_IN_DIST:
early_game_all_in = 1
if early_game_all_in == 1:
opponent_ship_score += 1.0e9
# compute scores of all edges
scores = [0.0] * (len(all_my_ships) * (1 + len(all_planets) + len(all_opponent_ships) + len(all_my_ships)))
len_scores = 0
for k in range(len(all_my_ships)):
ed = time.time()
if ed - st > 1.7:
break
ship = all_my_ships[k]
if ship.docking_status != ship.DockingStatus.UNDOCKED:
continue
if not early_game_all_in == 1:
opponent_too_close_to_target_planet = False if closest_undocked_opponent_ship[k] > MIN_OPPONENT_DIST_TO_TARGET_PLANET else True
opponent_too_close_to_dock = False if closest_undocked_opponent_ship[k] > MIN_OPPONENT_DIST_TO_DOCK else True
for i in range(len(all_planets)):
planet = all_planets[i]
if planet.owner == None or planet.owner.id == my_id:
dist_score = -(planet_dist_matrix[k][i] - planet.radius)
# TODO move this to planet_score
opponent_score = -99999999.0 if opponent_too_close_to_target_planet else 0.0 # TODO opponent_score # TODO geographical_score
total_score = planet_score[i] + dist_score + opponent_score
scores[len_scores] = (total_score, k, i, 'planet')
len_scores += 1
if ship.can_dock(planet) and not opponent_too_close_to_dock:
total_score = 99999999.0
scores[len_scores] = (total_score, k, i, 'dock')
len_scores += 1
else:
# TODO: suicide to opponent planet when I got more ships
pass
for i in range(len(all_my_ships)):
if my_ships_status[i] == hlt.entity.Ship.DockingStatus.UNDOCKED or my_ships_status[i] == hlt.entity.Ship.DockingStatus.UNDOCKING:
continue
mship = all_my_ships[i]
dist_score = -(my_ship_dist_matrix[k][i] - mship.radius)
total_score = my_ship_score[i] + dist_score
scores[len_scores] = (total_score, k, i, 'my_ship')
len_scores += 1
for i in range(len(all_opponent_ships)):
if ship_dist_matrix[k][i] > MAX_DIST_TO_TARGET_OPPONENT_UNDOCKED_SHIP and opponent_ships_status[i] == hlt.entity.Ship.DockingStatus.UNDOCKED and not early_game_all_in == 1:
continue
oship = all_opponent_ships[i]
dist_score = -(ship_dist_matrix[k][i] - oship.radius)
# TODO geograpihcal_score
total_score = opponent_ship_score[i] + dist_score
scores[len_scores] = (total_score, k, i, 'opponent_ship')
len_scores += 1
# choose action in decreasing score order
all_my_ships_moves_from = []
all_my_ships_moves_to = []
ship_used = numpy.array([False] * len(all_my_ships))
scores = sorted(scores[:len_scores], reverse=True)
for i in range(len(scores)):
ed = time.time()
if ed - st > 1.7:
break
ship_idx = scores[i][1]
my_ship = all_my_ships[ship_idx]
target_idx = scores[i][2]
action = scores[i][3]
if ship_used[ship_idx]:
continue
command = None
if action == 'dock':
if not planet_target_available[target_idx]:
continue
target = all_planets[target_idx]
command = my_ship.dock(target)
first_dock = True
planet_target_cnt[target_idx] += 1
if planet_target_cnt[target_idx] >= planet_max_target_cnt[target_idx]:
planet_target_available[target_idx] = False
elif action == 'planet':
if not planet_target_available[target_idx]:
continue
target = all_planets[target_idx]
# rand_angle = random.randint(0, 359)
# rand_dist = random.uniform(0.0, radius
# rand_target = hlt.entity.Position(target.x +
step, ship_move, command = custom_navigate(my_ship, target, game_map, MAX_SPEED, MIN_SPEED, SPEED_DECAY, 0,
all_planets, all_ships, all_my_ships_moves_to,
avoid_obstacles=True, max_corrections=MAX_CORRECTIONS,
angular_step=ANGULAR_STEP, ignore_ships=False, ignore_planets=False, suicide=False)
if step != 0 and num_my_ships < DOUBLE_NAVIGATE_SHIP_CNT :
step2, ship_move2, command2 = custom_navigate(my_ship, target, game_map, MAX_SPEED, MIN_SPEED, SPEED_DECAY, 0,
all_planets, all_ships, all_my_ships_moves_to,
avoid_obstacles=True, max_corrections=MAX_CORRECTIONS,
angular_step=-ANGULAR_STEP, ignore_ships=False, ignore_planets=False, suicide=False)
if step2 < step:
ship_move = ship_move2
command = command2
if (ship_move is not None) and (command is not None):
# TODO refactor this
collide = False
for j in range(len(all_my_ships_moves_from)):
end = hlt.entity.Position(ship_move.x - (all_my_ships_moves_to[j].x - all_my_ships_moves_from[j].x),
ship_move.y - (all_my_ships_moves_to[j].y - all_my_ships_moves_from[j].y))
end.radius = my_ship.radius
if custom_intersect_segment_circle(my_ship, end, all_my_ships_moves_from[j], fudge=my_ship.radius + 0.1):
collide = True
break
if not collide:
all_my_ships_moves_to.append(ship_move)
all_my_ships_moves_from.append(my_ship)
planet_target_cnt[target_idx] += 1
if planet_target_cnt[target_idx] >= planet_max_target_cnt[target_idx]:
planet_target_available[target_idx] = False
else:
command = None
ship_move = None
elif action == 'my_ship':
if not my_ship_target_available[target_idx]:
continue
target = all_my_ships[target_idx]
suicide = False
step, ship_move, command = custom_navigate(my_ship, target, game_map, MAX_SPEED, MIN_SPEED, SPEED_DECAY, 0,
all_planets, all_ships, all_my_ships_moves_to,
avoid_obstacles=True, max_corrections=MAX_CORRECTIONS,
angular_step=ANGULAR_STEP, ignore_ships=False, ignore_planets=False, suicide=suicide)
if step != 0 and num_my_ships < DOUBLE_NAVIGATE_SHIP_CNT :
step2, ship_move2, command2 = custom_navigate(my_ship, target, game_map, MAX_SPEED, MIN_SPEED, SPEED_DECAY, 0,
all_planets, all_ships, all_my_ships_moves_to,
avoid_obstacles=True, max_corrections=MAX_CORRECTIONS,
angular_step=-ANGULAR_STEP, ignore_ships=False, ignore_planets=False, suicide=suicide)
if step2 < step:
ship_move = ship_move2
command = command2
if (ship_move is not None) and (command is not None):
collide = False
for j in range(len(all_my_ships_moves_from)):
end = hlt.entity.Position(ship_move.x - (all_my_ships_moves_to[j].x - all_my_ships_moves_from[j].x),
ship_move.y - (all_my_ships_moves_to[j].y - all_my_ships_moves_from[j].y))
end.radius = my_ship.radius
if custom_intersect_segment_circle(my_ship, end, all_my_ships_moves_from[j], fudge=my_ship.radius + 0.1):
collide = True
break
if not collide:
all_my_ships_moves_to.append(ship_move)
all_my_ships_moves_from.append(my_ship)
my_ship_target_cnt[target_idx] += 1
if my_ship_target_cnt[target_idx] >= my_ship_max_target_cnt[target_idx]:
my_ship_target_available[target_idx] = False
else:
command = None
ship_move = None
elif action == 'opponent_ship':
if not opponent_ship_target_available[target_idx]:
continue
target = all_opponent_ships[target_idx]
suicide = False
ignore_ships = False
if not early_game_all_in == 1:
if my_ship.health <= SUICIDE_HEALTH_MULT * hlt.constants.WEAPON_DAMAGE * float(close_opponent_ship_cnt[ship_idx]) / float(close_ally_ship_cnt[ship_idx]) or \
(opponent_ships_status[target_idx] == hlt.entity.Ship.DockingStatus.DOCKED and closest_undocked_opponent_ship[ship_idx] < SUICIDE_UNDOCKED_OPPONENT_DIST):
suicide = True
ignore_ships = True
else:
if my_ship.health <= SUICIDE_HEALTH_MULT * hlt.constants.WEAPON_DAMAGE * float(close_opponent_ship_cnt[ship_idx]) / float(close_ally_ship_cnt[ship_idx]):
suicide = True
ignore_ships = True
step, ship_move, command = custom_navigate(my_ship, target, game_map, MAX_SPEED, MIN_SPEED, SPEED_DECAY, 0,
all_planets, all_ships, all_my_ships_moves_to,
avoid_obstacles=True, max_corrections=MAX_CORRECTIONS,
angular_step=ANGULAR_STEP, ignore_ships=ignore_ships, ignore_planets=False, suicide=suicide)
if step != 0 and num_my_ships < DOUBLE_NAVIGATE_SHIP_CNT :
step2, ship_move2, command2 = custom_navigate(my_ship, target, game_map, MAX_SPEED, MIN_SPEED, SPEED_DECAY, 0,
all_planets, all_ships, all_my_ships_moves_to,
avoid_obstacles=True, max_corrections=MAX_CORRECTIONS,
angular_step=-ANGULAR_STEP, ignore_ships=ignore_ships, ignore_planets=False, suicide=suicide)
if step2 < step:
ship_move = ship_move2
command = command2
if (ship_move is not None) and (command is not None):
collide = False
for j in range(len(all_my_ships_moves_from)):
end = hlt.entity.Position(ship_move.x - (all_my_ships_moves_to[j].x - all_my_ships_moves_from[j].x),
ship_move.y - (all_my_ships_moves_to[j].y - all_my_ships_moves_from[j].y))
end.radius = my_ship.radius
if custom_intersect_segment_circle(my_ship, end, all_my_ships_moves_from[j], fudge=my_ship.radius + 0.1):
collide = True
break
if not collide:
all_my_ships_moves_to.append(ship_move)
all_my_ships_moves_from.append(my_ship)
opponent_ship_target_cnt[target_idx] += 1
if opponent_ship_target_cnt[target_idx] >= opponent_ship_max_target_cnt[target_idx]:
opponent_ship_target_available[target_idx] = False
else:
command = None
ship_move = None
else:
assert False
if command is not None:
ship_used[ship_idx] = True
command_queue.append(command)
# logging.info('my_id ' + str(my_id))
# for i in range(len(all_planets)):
# planet = all_planets[i]
# logging.info(planet.owner)
# Send our set of commands to the Halite engine for this turn
game.send_command_queue(command_queue)
# TURN END
# GAME END
|
src/api-service/__app__/onefuzzlib/azure/creds.py
|
tonybaloney/onefuzz
| 2,692 |
86967
|
<reponame>tonybaloney/onefuzz
#!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import functools
import logging
import os
import urllib.parse
from typing import Any, Callable, Dict, List, Optional, TypeVar, cast
from uuid import UUID
import requests
from azure.core.exceptions import ClientAuthenticationError
from azure.identity import DefaultAzureCredential
from azure.keyvault.secrets import SecretClient
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.subscription import SubscriptionClient
from memoization import cached
from msrestazure.azure_active_directory import MSIAuthentication
from msrestazure.tools import parse_resource_id
from onefuzztypes.primitives import Container, Region
from .monkeypatch import allow_more_workers, reduce_logging
# https://docs.microsoft.com/en-us/graph/api/overview?view=graph-rest-1.0
GRAPH_RESOURCE = "https://graph.microsoft.com"
GRAPH_RESOURCE_ENDPOINT = "https://graph.microsoft.com/v1.0"
@cached
def get_msi() -> MSIAuthentication:
allow_more_workers()
reduce_logging()
return MSIAuthentication()
@cached
def get_identity() -> DefaultAzureCredential:
allow_more_workers()
reduce_logging()
return DefaultAzureCredential()
@cached
def get_base_resource_group() -> Any: # should be str
return parse_resource_id(os.environ["ONEFUZZ_RESOURCE_GROUP"])["resource_group"]
@cached
def get_base_region() -> Region:
client = ResourceManagementClient(
credential=get_identity(), subscription_id=get_subscription()
)
group = client.resource_groups.get(get_base_resource_group())
return Region(group.location)
@cached
def get_subscription() -> Any: # should be str
return parse_resource_id(os.environ["ONEFUZZ_DATA_STORAGE"])["subscription"]
@cached
def get_insights_instrumentation_key() -> Any: # should be str
return os.environ["APPINSIGHTS_INSTRUMENTATIONKEY"]
@cached
def get_insights_appid() -> str:
return os.environ["APPINSIGHTS_APPID"]
@cached
def get_instance_name() -> str:
return os.environ["ONEFUZZ_INSTANCE_NAME"]
@cached
def get_instance_url() -> str:
return "https://%s.azurewebsites.net" % get_instance_name()
@cached
def get_instance_id() -> UUID:
from .containers import get_blob
from .storage import StorageType
blob = get_blob(Container("base-config"), "instance_id", StorageType.config)
if blob is None:
raise Exception("missing instance_id")
return UUID(blob.decode())
DAY_IN_SECONDS = 60 * 60 * 24
@cached(ttl=DAY_IN_SECONDS)
def get_regions() -> List[Region]:
subscription = get_subscription()
client = SubscriptionClient(credential=get_identity())
locations = client.subscriptions.list_locations(subscription)
return sorted([Region(x.name) for x in locations])
class GraphQueryError(Exception):
def __init__(self, message: str, status_code: Optional[int]) -> None:
super(GraphQueryError, self).__init__(message)
self.message = message
self.status_code = status_code
def query_microsoft_graph(
method: str,
resource: str,
params: Optional[Dict] = None,
body: Optional[Dict] = None,
) -> Dict:
cred = get_identity()
access_token = cred.get_token(f"{GRAPH_RESOURCE}/.default")
url = urllib.parse.urljoin(f"{GRAPH_RESOURCE_ENDPOINT}/", resource)
headers = {
"Authorization": "Bearer %s" % access_token.token,
"Content-Type": "application/json",
}
response = requests.request(
method=method, url=url, headers=headers, params=params, json=body
)
if 200 <= response.status_code < 300:
if response.content and response.content.strip():
json = response.json()
if isinstance(json, Dict):
return json
else:
raise GraphQueryError(
"invalid data expected a json object: HTTP"
f" {response.status_code} - {json}",
response.status_code,
)
else:
return {}
else:
error_text = str(response.content, encoding="utf-8", errors="backslashreplace")
raise GraphQueryError(
f"request did not succeed: HTTP {response.status_code} - {error_text}",
response.status_code,
)
def query_microsoft_graph_list(
method: str,
resource: str,
params: Optional[Dict] = None,
body: Optional[Dict] = None,
) -> List[Any]:
result = query_microsoft_graph(
method,
resource,
params,
body,
)
value = result.get("value")
if isinstance(value, list):
return value
else:
raise GraphQueryError("Expected data containing a list of values", None)
@cached
def get_scaleset_identity_resource_path() -> str:
scaleset_id_name = "%s-scalesetid" % get_instance_name()
resource_group_path = "/subscriptions/%s/resourceGroups/%s/providers" % (
get_subscription(),
get_base_resource_group(),
)
return "%s/Microsoft.ManagedIdentity/userAssignedIdentities/%s" % (
resource_group_path,
scaleset_id_name,
)
@cached
def get_scaleset_principal_id() -> UUID:
api_version = "2018-11-30" # matches the apiversion in the deployment template
client = ResourceManagementClient(
credential=get_identity(), subscription_id=get_subscription()
)
uid = client.resources.get_by_id(get_scaleset_identity_resource_path(), api_version)
return UUID(uid.properties["principalId"])
@cached
def get_keyvault_client(vault_url: str) -> SecretClient:
return SecretClient(vault_url=vault_url, credential=DefaultAzureCredential())
def clear_azure_client_cache() -> None:
# clears the memoization of the Azure clients.
from .compute import get_compute_client
from .containers import get_blob_service
from .network_mgmt_client import get_network_client
from .storage import get_mgmt_client
# currently memoization.cache does not project the wrapped function's types.
# As a workaround, CI comments out the `cached` wrapper, then runs the type
# validation. This enables calling the wrapper's clear_cache if it's not
# disabled.
for func in [
get_msi,
get_identity,
get_compute_client,
get_blob_service,
get_network_client,
get_mgmt_client,
]:
clear_func = getattr(func, "clear_cache", None)
if clear_func is not None:
clear_func()
T = TypeVar("T", bound=Callable[..., Any])
class retry_on_auth_failure:
def __call__(self, func: T) -> T:
@functools.wraps(func)
def decorated(*args, **kwargs): # type: ignore
try:
return func(*args, **kwargs)
except ClientAuthenticationError as err:
logging.warning(
"clearing authentication cache after auth failure: %s", err
)
clear_azure_client_cache()
return func(*args, **kwargs)
return cast(T, decorated)
|
pyx12/examples/deident834.py
|
azoner/pyx12
| 120 |
86969
|
#! /usr/bin/env python
import sys
import getopt
import os.path
import logging
import random
# Intrapackage imports
libpath = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))
if os.path.isdir(libpath):
sys.path.insert(0, libpath)
import pyx12
import pyx12.x12file
import pyx12.x12context
import pyx12.params
import pyx12.segment
from collections import namedtuple
__author__ = '<NAME>'
__version__ = '1.0'
__date__ = '2015-02-12'
"""
De-indentify 834 Enrollment file
Not production ready
"""
VERBOSE = 0
logger = logging.getLogger()
sub_idx = 0
Demographic = namedtuple('Demographic', 'primaryId, ssn, \
medicaidId, dob, dod, firstname, lastname, middlename, street, street2, county')
class FakeDeidentify(object):
def __init__(self):
pass
def getDeidentified(self, primaryId, datatree):
demo = Demographic(primaryId, '99999999', '009999999', '19500101', \
'', 'Joe', 'Smith', '', '123 Elm', '', '99')
return demo
class RandomDeidentify(object):
def __init__(self):
self.identities = {}
def getDeidentified(self, primaryId, datatree):
if primaryId in self.identities:
return self.identities[primaryId]
demo = Demographic(
primaryId = "{0:0>10}".format(random.randint(1000, 99999999999)),
ssn = "{0:0>9}".format(random.randint(10000, 999999999)),
medicaidId = "{0:0>10}".format(random.randint(1000, 99999999999)),
dob = '19520101',
dod = '',
firstname = 'AA',
lastname = 'Smith',
middlename = '',
street = "{0} Oak".format(random.randint(10, 9999)),
street2 = '',
county = '98'
)
self.identities[primaryId] = demo
return demo
def deidentify_file(fd_in):
"""
"""
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(param, errh, fd_in)
#deident = FakeDeidentify()
deident = RandomDeidentify()
with open('newfile.txt', 'w', encoding='ascii') as fd_out:
wr = pyx12.x12file.X12Writer(fd_out)
for datatree in src.iter_segments('2000'):
if datatree.id == '2000':
scrub2000(datatree, deident)
for seg1 in datatree.iterate_segments():
#wr.Write(seg1['segment'].format())
print((seg1['segment'].format()))
def scrub2000(loop_sub, deident):
primaryId = loop_sub.get_value('2100A/NM109')
demo = deident.getDeidentified(primaryId, loop_sub)
loop_sub.set_value('INS12', demo.dod)
loop_sub.set_value('REF[0F]02', demo.primaryId)
loop_sub.set_value('2100A/NM103', demo.lastname)
loop_sub.set_value('2100A/NM104', demo.firstname)
loop_sub.set_value('2100A/NM105', demo.middlename)
loop_sub.set_value('2100A/NM109', demo.medicaidId)
loop_sub.set_value('2100A/N301', demo.street)
loop_sub.set_value('2100A/N302', demo.street2)
loop_sub.set_value('2100A/N406', demo.county)
loop_sub.set_value('2100A/DMG02', demo.dob)
def usage():
pgm_nme = os.path.basename(sys.argv[0])
sys.stdout.write('%s %s (%s)\n' % (pgm_nme, __version__, __date__))
sys.stdout.write('usage: %s [options] source_file\n' % (pgm_nme))
sys.stdout.write('\noptions:\n')
sys.stdout.write(' -h Help\n')
sys.stdout.write(' -d Debug mode\n')
sys.stdout.write(' -o output_directory \n')
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 'dhv')
except getopt.error as msg:
usage()
return False
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
stdout_hdlr = logging.StreamHandler()
stdout_hdlr.setFormatter(formatter)
logger.addHandler(stdout_hdlr)
logger.setLevel(logging.INFO)
for o, a in opts:
if o == '-h':
usage()
return True
if o == '-d':
logger.setLevel(logging.DEBUG)
if o == '-v':
logger.setLevel(logging.DEBUG)
for file_in in args:
if not os.path.isfile(file_in):
logger.error('File %s was not found' % (file_in))
usage()
return False
#file_name = os.path.basename(file_in)
fd_in = open(file_in, 'r', encoding='ascii')
deidentify_file(fd_in)
return True
if __name__ == '__main__':
sys.exit(not main())
|
dex-net/src/dexnet/database/keys.py
|
peter0749/PointNetGPD
| 193 |
86996
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
Permission to use, copy, modify, and distribute this software and its documentation for educational,
research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
hereby granted, provided that the above copyright notice, this paragraph and the following two
paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
7201, <EMAIL>, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
# Keys for easy lookups in HDF5 databases
METRICS_KEY = 'metrics'
OBJECTS_KEY = 'objects'
MESH_KEY = 'mesh'
SDF_KEY = 'sdf'
GRASPS_KEY = 'grasps'
GRIPPERS_KEY = 'grippers'
NUM_GRASPS_KEY = 'num_grasps'
LOCAL_FEATURES_KEY = 'local_features'
GLOBAL_FEATURES_KEY = 'global_features'
SHOT_FEATURES_KEY = 'shot'
RENDERED_IMAGES_KEY = 'rendered_images'
SENSOR_DATA_KEY = 'sensor_data'
STP_KEY = 'stable_poses'
CATEGORY_KEY = 'category'
MASS_KEY = 'mass'
CONVEX_PIECES_KEY = 'convex_pieces'
CREATION_KEY = 'time_created'
DATASETS_KEY = 'datasets'
DATASET_KEY = 'dataset'
# data keys for easy access
SDF_DATA_KEY = 'data'
SDF_ORIGIN_KEY = 'origin'
SDF_RES_KEY = 'resolution'
SDF_POSE_KEY = 'pose'
SDF_SCALE_KEY = 'scale'
SDF_FRAME_KEY = 'frame'
MESH_VERTICES_KEY = 'vertices'
MESH_TRIANGLES_KEY = 'triangles'
MESH_NORMALS_KEY = 'normals'
MESH_POSE_KEY = 'pose'
MESH_SCALE_KEY = 'scale'
MESH_DENSITY_KEY = 'density'
LOCAL_FEATURE_NUM_FEAT_KEY = 'num_features'
LOCAL_FEATURE_DESC_KEY = 'descriptors'
LOCAL_FEATURE_RF_KEY = 'rfs'
LOCAL_FEATURE_POINT_KEY = 'points'
LOCAL_FEATURE_NORMAL_KEY = 'normals'
SHOT_FEATURES_KEY = 'shot'
FEATURE_KEY = 'feature'
NUM_STP_KEY = 'num_stable_poses'
POSE_KEY = 'pose'
STABLE_POSE_PROB_KEY = 'p'
STABLE_POSE_ROT_KEY = 'r'
STABLE_POSE_PT_KEY = 'x0'
NUM_GRASPS_KEY = 'num_grasps'
GRASP_KEY = 'grasp'
GRASP_ID_KEY = 'id'
GRASP_TYPE_KEY = 'type'
GRASP_CONFIGURATION_KEY = 'configuration'
GRASP_RF_KEY = 'frame'
GRASP_TIMESTAMP_KEY = 'timestamp'
GRASP_METRICS_KEY = 'metrics'
GRASP_FEATURES_KEY = 'features'
GRASP_FEATURE_NAME_KEY = 'name'
GRASP_FEATURE_TYPE_KEY = 'type'
GRASP_FEATURE_VECTOR_KEY = 'vector'
NUM_IMAGES_KEY = 'num_images'
IMAGE_KEY = 'image'
IMAGE_DATA_KEY = 'image_data'
IMAGE_FRAME_KEY = 'image_frame'
CAM_POS_KEY = 'cam_pos'
CAM_ROT_KEY = 'cam_rot'
CAM_INT_PT_KEY = 'cam_int_pt'
CAM_FRAME_KEY = 'cam_frame'
# Extras
RENDERED_IMAGE_TYPES = ['segmask', 'depth', 'scaled_depth']
# Metadata
METADATA_KEY = 'metadata'
METADATA_TYPE_KEY = 'type'
METADATA_DESC_KEY = 'description'
METADATA_FUNC_KEY = 'func'
# Connected components
CONNECTED_COMPONENTS_KEY = 'connected_components'
|
paddlespeech/server/utils/onnx_infer.py
|
jerryuhoo/PaddleSpeech
| 1,379 |
87000
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Optional
import onnxruntime as ort
def get_sess(model_path: Optional[os.PathLike]=None, sess_conf: dict=None):
sess_options = ort.SessionOptions()
sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
sess_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
if "gpu" in sess_conf["device"]:
# fastspeech2/mb_melgan can't use trt now!
if sess_conf["use_trt"]:
providers = ['TensorrtExecutionProvider']
else:
providers = ['CUDAExecutionProvider']
elif sess_conf["device"] == "cpu":
providers = ['CPUExecutionProvider']
sess_options.intra_op_num_threads = sess_conf["cpu_threads"]
sess = ort.InferenceSession(
model_path, providers=providers, sess_options=sess_options)
return sess
|
omaha_server/feedback/tasks.py
|
makar21/omaha-server
| 142 |
87005
|
# coding: utf8
"""
This software is licensed under the Apache 2 license, quoted below.
Copyright 2014 Crystalnix Limited
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy of
the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.
"""
import logging
import os
from django.core.mail.message import EmailMessage
from django.core.files.storage import default_storage
from omaha_server.celery import app
from feedback.models import Feedback
logger =logging.getLogger(__name__)
email_body_tmpl = """
Description: %s
Page URL: %s
User email: %s
User IP: %s
Feedback JSON data: %s
"""
@app.task(name='tasks.send_email_feedback', ignore_result=True, max_retries=12, bind=True)
def send_email_feedback(self, feedback_pk, sender, recipents):
try:
feedback = Feedback.objects.get(pk=feedback_pk)
except Feedback.DoesNotExist as exc:
logger.error('Failed processing_crash_dump',
exc_info=True,
extra=dict(crash_pk=feedback_pk))
raise self.retry(exc=exc, countdown=2 ** send_email_feedback.request.retries)
recipients = [x.strip() for x in recipents.split(',')]
body = email_body_tmpl % (
feedback.description, feedback.page_url, feedback.email,
feedback.ip, feedback.feedback_data,
)
email = EmailMessage("Feedback # %s" % feedback_pk, body, sender, recipients)
attachments = [
feedback.screenshot,
feedback.blackbox,
feedback.system_logs,
feedback.attached_file
]
for attach in attachments:
if attach:
email.attach(os.path.basename(attach.name), attach.read())
email.send()
|
Examples/AppKit/CocoaBindings/TableModelWithSearch/FilteringArrayController.py
|
Khan/pyobjc-framework-Cocoa
| 132 |
87032
|
<gh_stars>100-1000
#
# FilteringArrayController.py
# TableModelWithSearch
#
# Created by <NAME> on Sun Apr 04 2004.
# Copyright (c) 2004 __MyCompanyName__. All rights reserved.
#
from Cocoa import *
import re
kLiteralSearch = u'Literal Search'
kRegularExpressionSearch = u'Regular Expression Search'
def regexForSearchString(searchString, searchType):
if not searchString:
return None
searchString = searchString.strip()
if searchType == kLiteralSearch:
searchString = re.escape(searchString.strip()) + ur'(?i)'
return re.compile(searchString)
def dictValueFilter(dicts, regex):
for dct in dicts:
for value in dct.itervalues():
print value
if regex.search(value):
yield dct
break
class FilteringArrayController (NSArrayController):
searchString = None
lastRegex = None
searchType = kLiteralSearch
def arrangeObjects_(self, objects):
supermethod = super(FilteringArrayController, self).arrangeObjects_
try:
regex = regexForSearchString(self.searchString, self.searchType)
except:
regex = self.lastRegex
self.lastRegex = regex
if regex is None:
return supermethod(objects)
return supermethod(list(dictValueFilter(objects, regex)))
@objc.IBAction
def performSearch_(self, sender):
self.searchString = sender.stringValue()
self.rearrangeObjects()
@objc.IBAction
def changeSearchType_(self, searchType):
self.lastRegex = None
self.searchString = None
self.searchType = searchType
self.rearrangeObjects()
|
examples/bicycle/kifdd_triangle.py
|
okkhoy/rlpy
| 265 |
87036
|
<gh_stars>100-1000
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from past.utils import old_div
import rlpy
import numpy as np
from hyperopt import hp
param_space = {
'kernel_resolution':
hp.loguniform("kernel_resolution", np.log(5), np.log(50)),
'discover_threshold':
hp.loguniform(
"discover_threshold",
np.log(1e4),
np.log(1e8)),
'lambda_': hp.uniform("lambda_", 0., 1.),
'boyan_N0': hp.loguniform("boyan_N0", np.log(1e1), np.log(1e5)),
'initial_learn_rate': hp.loguniform("initial_learn_rate", np.log(5e-2), np.log(1))}
def make_experiment(
exp_id=1, path="./Results/Temp/{domain}/{agent}/{representation}/",
discover_threshold=88044.,
boyan_N0=64502,
lambda_=0.43982644088,
initial_learn_rate=0.920244401,
kernel_resolution=11.6543336229):
opt = {}
opt["exp_id"] = exp_id
opt["path"] = path
opt["max_steps"] = 150000
opt["num_policy_checks"] = 30
opt["checks_per_policy"] = 1
active_threshold = 0.01
max_base_feat_sim = 0.5
sparsify = 1
domain = rlpy.Domains.BicycleRiding()
opt["domain"] = domain
kernel_width = old_div((domain.statespace_limits[:, 1] - domain.statespace_limits[:, 0]), kernel_resolution)
representation = rlpy.Representations.KernelizediFDD(domain, sparsify=sparsify,
kernel=rlpy.Representations.linf_triangle_kernel,
kernel_args=[kernel_width],
active_threshold=active_threshold,
discover_threshold=discover_threshold,
normalization=True,
max_active_base_feat=10,
max_base_feat_sim=max_base_feat_sim)
policy = rlpy.Policies.eGreedy(representation, epsilon=0.1)
# agent = SARSA(representation,policy,domain,initial_learn_rate=initial_learn_rate,
# lambda_=.0, learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
opt["agent"] = rlpy.Agents.Q_Learning(policy, representation, discount_factor=domain.discount_factor,
lambda_=lambda_, initial_learn_rate=initial_learn_rate,
learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
experiment = rlpy.Experiments.Experiment(**opt)
return experiment
if __name__ == '__main__':
from rlpy.Tools.run import run_profiled
# run_profiled(make_experiment)
experiment = make_experiment(1)
experiment.run(visualize_learning=True,
visualize_performance=True)
experiment.plot()
# experiment.save()
|
pythran/tests/scipy/hausdorff.py
|
davidbrochart/pythran
| 1,647 |
87046
|
#
# Copyright (C) <NAME>, <NAME>, and <NAME>, 2016
#
# Distributed under the same BSD license as Scipy.
#
# adapted from scipy's cython version
import numpy as np
import numpy.random as random
#pythran export directed_hausdorff(float64[:,:], float64[:,:], int)
#pythran export directed_hausdorff_noshuffle(float64[:,:], float64[:,:])
#runas import numpy as np; x = np.arange((100 * 100.)).reshape(100,-1); y = np.ones((100,100)) * 3; directed_hausdorff_noshuffle(x, y)
def directed_hausdorff(ar1, ar2, seed=0):
N1, data_dims = ar1.shape
N2 = ar2.shape[0]
i_store = j_store = i_ret = j_ret = 0
# shuffling the points in each array generally increases the likelihood of
# an advantageous break in the inner search loop and never decreases the
# performance of the algorithm
random.seed(seed)
resort1 = np.arange(N1)
resort2 = np.arange(N2)
random.shuffle(resort1)
random.shuffle(resort2)
ar1 = np.asarray(ar1)[resort1]
ar2 = np.asarray(ar2)[resort2]
cmax = 0
for i in range(N1):
cmin = np.inf
for j in range(N2):
d = np.sum((ar1[i] - ar2[j]) ** 2)
# faster performance with square of distance
# avoid sqrt until very end
if d < cmax: # break out of `for j` loop
break
if d < cmin: # always true on first iteration of for-j loop
cmin = d
i_store = i
j_store = j
else:
# always true on first iteration of for-j loop, after that only
# if d >= cmax
if cmin != np.inf and cmin > cmax:
cmax = cmin
i_ret = i_store
j_ret = j_store
return np.sqrt(cmax), resort1[i_ret], resort2[j_ret]
def directed_hausdorff_noshuffle(ar1, ar2, seed=0):
N1, data_dims = ar1.shape
N2 = ar2.shape[0]
i_store = j_store = i_ret = j_ret = 0
resort1 = np.arange(N1)
resort2 = np.arange(N2)
ar1 = np.asarray(ar1)[resort1]
ar2 = np.asarray(ar2)[resort2]
cmax = 0
for i in range(N1):
cmin = np.inf
for j in range(N2):
d = np.sum((ar1[i] - ar2[j]) ** 2)
# faster performance with square of distance
# avoid sqrt until very end
if d < cmax: # break out of `for j` loop
break
if d < cmin: # always true on first iteration of for-j loop
cmin = d
i_store = i
j_store = j
else:
# always true on first iteration of for-j loop, after that only
# if d >= cmax
if cmin != np.inf and cmin > cmax:
cmax = cmin
i_ret = i_store
j_ret = j_store
return np.sqrt(cmax), resort1[i_ret], resort2[j_ret]
|
src/ralph/dns/tests.py
|
pinoatrome/ralph
| 1,668 |
87077
|
<reponame>pinoatrome/ralph<filename>src/ralph/dns/tests.py
# -*- coding: utf-8 -*-
from unittest.mock import patch
from django.db import transaction
from django.test import override_settings, TestCase, TransactionTestCase
from ralph.assets.tests.factories import (
ConfigurationClassFactory,
EthernetFactory
)
from ralph.data_center.models import BaseObjectCluster, DataCenterAsset
from ralph.dns.dnsaas import DNSaaS
from ralph.dns.forms import DNSRecordForm, RecordType
from ralph.dns.publishers import _get_txt_data_to_publish_to_dnsaas
from ralph.dns.views import (
add_errors,
DNSaaSIntegrationNotEnabledError,
DNSView
)
from ralph.networks.tests.factories import IPAddressFactory
from ralph.virtual.models import VirtualServer
from ralph.virtual.tests.factories import VirtualServerFactory
class TestGetDnsRecords(TestCase):
@patch.object(DNSaaS, '_get_oauth_token')
def setUp(self, mocked):
mocked.return_value = 'token'
self.dnsaas = DNSaaS()
@patch.object(DNSaaS, 'get_api_result')
def test_return_empty_when_api_returns_empty(self, mocked):
mocked.return_value = []
found_dns = self.dnsaas.get_dns_records(['192.168.0.1'])
self.assertEqual(found_dns, [])
def test_return_empty_when_no_ipaddress(self):
found_dns = self.dnsaas.get_dns_records([])
self.assertEqual(found_dns, [])
@patch.object(DNSaaS, 'get_api_result')
def test_return_dns_records_when_api_returns_records(self, mocked):
data = {
'content': '127.0.0.3',
'name': '1.test.pl',
'type': 'A',
'id': 1
}
mocked.return_value = [data]
found_dns = self.dnsaas.get_dns_records(['192.168.0.1'])
self.assertEqual(len(found_dns), 1)
self.assertEqual(found_dns[0]['content'], data['content'])
self.assertEqual(found_dns[0]['name'], data['name'])
self.assertEqual(found_dns[0]['type'], RecordType.a)
@override_settings(DNSAAS_URL='http://dnsaas.com/')
def test_build_url(self):
self.assertEqual(
self.dnsaas.build_url('domains'),
'http://dnsaas.com/api/domains/'
)
@override_settings(DNSAAS_URL='http://dnsaas.com/')
def test_build_url_with_version(self):
self.assertEqual(
self.dnsaas.build_url('domains'),
'http://dnsaas.com/api/domains/'
)
@override_settings(DNSAAS_URL='http://dnsaas.com/')
def test_build_url_with_id(self):
self.assertEqual(
self.dnsaas.build_url('domains', id=1),
'http://dnsaas.com/api/domains/1/'
)
@override_settings(DNSAAS_URL='http://dnsaas.com/')
def test_build_url_with_get_params(self):
self.assertEqual(
self.dnsaas.build_url('domains', get_params=[('name', 'ralph')]),
'http://dnsaas.com/api/domains/?name=ralph'
)
@override_settings(DNSAAS_URL='http://dnsaas.com/')
def test_build_url_with_id_and_get_params(self):
self.assertEqual(
self.dnsaas.build_url(
'domains', id=1, get_params=[('name', 'ralph')]
),
'http://dnsaas.com/api/domains/1/?name=ralph'
)
class TestDNSView(TestCase):
@override_settings(ENABLE_DNSAAS_INTEGRATION=False)
def test_dnsaasintegration_disabled(self):
with self.assertRaises(DNSaaSIntegrationNotEnabledError):
DNSView()
@override_settings(ENABLE_DNSAAS_INTEGRATION=True)
@patch('ralph.dns.views.DNSaaS._get_oauth_token')
def test_dnsaasintegration_enabled(self, _get_oauth_token_mock):
# should not raise exception
_get_oauth_token_mock.return_value = 'token'
DNSView()
class TestGetTXTDataToPublishToDNSaaS(TestCase):
@classmethod
def setUpClass(cls):
from ralph.data_center.tests.factories import (
ClusterFactory,
DataCenterAssetFactory,
RackFactory,
)
super().setUpClass()
cls.dc_asset = DataCenterAssetFactory(
hostname='ralph0.allegro.pl',
service_env__service__name='service',
service_env__environment__name='test',
model__name='DL360',
model__manufacturer__name='Asus',
model__category__name='ATS',
rack=RackFactory(
name='Rack #100',
server_room__name='Server Room A',
server_room__data_center__name='DC1',
),
position=1,
slot_no='1',
configuration_path__class_name='www',
configuration_path__module__name='ralph',
)
cls.dc_ip = IPAddressFactory(
base_object=cls.dc_asset,
ethernet=EthernetFactory(base_object=cls.dc_asset),
)
IPAddressFactory(
base_object=cls.dc_asset,
ethernet=EthernetFactory(base_object=cls.dc_asset),
is_management=True,
)
cls.virtual_server = VirtualServerFactory(
hostname='s000.local',
configuration_path=ConfigurationClassFactory(
class_name='worker',
module__name='auth'
),
service_env__service__name='service',
service_env__environment__name='prod',
type__name='Xen',
parent=DataCenterAssetFactory(
hostname='parent',
model__name='DL380p',
model__manufacturer__name='Brother',
model__category__name='Database Machine',
rack=RackFactory(
name='Rack #101',
server_room__name='Server Room B',
server_room__data_center__name='DC2',
),
position=1,
slot_no='1',
),
)
# refresh virtual server to get parent as BaseObject, not
# DataCenterAsset
cls.vs_ip = IPAddressFactory(
base_object=cls.virtual_server,
ethernet=EthernetFactory(base_object=cls.virtual_server),
)
cls.virtual_server = VirtualServer.objects.get(
pk=cls.virtual_server.id
)
cluster = ClusterFactory(
hostname='',
type__name='Application',
configuration_path__class_name='www',
configuration_path__module__name='ralph',
service_env__service__name='service',
service_env__environment__name='preprod',
)
cls.boc_1 = BaseObjectCluster.objects.create(
cluster=cluster,
base_object=DataCenterAssetFactory(
rack=RackFactory(), position=1,
)
)
cls.boc_2 = BaseObjectCluster.objects.create(
cluster=cluster,
base_object=DataCenterAssetFactory(
rack=RackFactory(
server_room__data_center__name='DC2',
server_room__name='Server Room B',
name='Rack #101',
),
position=1,
),
is_master=True
)
cls.cluster = ClusterFactory._meta.model.objects.get(pk=cluster)
cls.cluster_ip = IPAddressFactory(
base_object=cls.cluster,
ethernet=EthernetFactory(base_object=cls.cluster),
)
def test_dc_asset_gets_data_ok(self):
data = _get_txt_data_to_publish_to_dnsaas(self.dc_asset)
self.assertEqual(data, [{
'content': 'www',
'ips': [self.dc_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'VENTURE',
'service_uid': self.dc_asset.service.uid
}, {
'content': 'ralph',
'ips': [self.dc_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'ROLE',
'service_uid': self.dc_asset.service.uid
}, {
'content': 'ralph/www',
'ips': [self.dc_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'CONFIGURATION_PATH',
'service_uid': self.dc_asset.service.uid
}, {
'content': 'service - test',
'ips': [self.dc_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'SERVICE_ENV',
'service_uid': self.dc_asset.service.uid
}, {
'content': '[ATS] Asus DL360',
'ips': [self.dc_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'MODEL',
'service_uid': self.dc_asset.service.uid
}, {
'content': 'DC1 / Server Room A / Rack #100 / 1 / 1',
'ips': [self.dc_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'LOCATION',
'service_uid': self.dc_asset.service.uid
}])
def test_dc_asset_without_service_gets_data_ok(self):
self.dc_asset.service_env = None
self.dc_asset.save()
data = _get_txt_data_to_publish_to_dnsaas(self.dc_asset)
self.assertEqual(data, [{
'content': 'www',
'ips': [self.dc_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'VENTURE'
}, {
'content': 'ralph',
'ips': [self.dc_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'ROLE'
}, {
'content': 'ralph/www',
'ips': [self.dc_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'CONFIGURATION_PATH'
}, {
'content': '[ATS] Asus DL360',
'ips': [self.dc_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'MODEL'
}, {
'content': 'DC1 / Server Room A / Rack #100 / 1 / 1',
'ips': [self.dc_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'LOCATION'
}])
def test_virtual_server_gets_data_ok(self):
data = _get_txt_data_to_publish_to_dnsaas(self.virtual_server)
self.assertEqual(data, [{
'content': 'worker',
'ips': [self.vs_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'VENTURE',
'service_uid': self.virtual_server.service.uid
}, {
'content': 'auth',
'ips': [self.vs_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'ROLE',
'service_uid': self.virtual_server.service.uid
}, {
'content': 'auth/worker',
'ips': [self.vs_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'CONFIGURATION_PATH',
'service_uid': self.virtual_server.service.uid
}, {
'content': 'service - prod',
'ips': [self.vs_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'SERVICE_ENV',
'service_uid': self.virtual_server.service.uid
}, {
'content': 'Xen',
'ips': [self.vs_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'MODEL',
'service_uid': self.virtual_server.service.uid
}, {
'content': 'DC2 / Server Room B / Rack #101 / 1 / 1 / parent',
'ips': [self.vs_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'LOCATION',
'service_uid': self.virtual_server.service.uid
}])
def test_virtual_server_without_service_gets_data_ok(self):
self.virtual_server.service_env = None
self.virtual_server.save()
data = _get_txt_data_to_publish_to_dnsaas(self.virtual_server)
self.assertEqual(data, [{
'content': 'worker',
'ips': [self.vs_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'VENTURE'
}, {
'content': 'auth',
'ips': [self.vs_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'ROLE'
}, {
'content': 'auth/worker',
'ips': [self.vs_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'CONFIGURATION_PATH'
}, {
'content': 'Xen',
'ips': [self.vs_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'MODEL'
}, {
'content': 'DC2 / Server Room B / Rack #101 / 1 / 1 / parent',
'ips': [self.vs_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'LOCATION'
}])
def test_cluster_gets_data_ok(self):
data = _get_txt_data_to_publish_to_dnsaas(self.cluster)
self.assertEqual(data, [{
'content': 'www',
'ips': [self.cluster_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'VENTURE',
'service_uid': self.cluster.service.uid
}, {
'content': 'ralph',
'ips': [self.cluster_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'ROLE',
'service_uid': self.cluster.service.uid
}, {
'content': 'ralph/www',
'ips': [self.cluster_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'CONFIGURATION_PATH',
'service_uid': self.cluster.service.uid
}, {
'content': 'service - preprod',
'ips': [self.cluster_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'SERVICE_ENV',
'service_uid': self.cluster.service.uid
}, {
'content': 'Application',
'ips': [self.cluster_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'MODEL',
'service_uid': self.cluster.service.uid
}, {
'content': 'DC2 / Server Room B / Rack #101 / 1',
'ips': [self.cluster_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'LOCATION',
'service_uid': self.cluster.service.uid
}])
def test_cluster_without_service_gets_data_ok(self):
self.cluster.service_env = None
self.cluster.save()
data = _get_txt_data_to_publish_to_dnsaas(self.cluster)
self.assertEqual(data, [{
'content': 'www',
'ips': [self.cluster_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'VENTURE'
}, {
'content': 'ralph',
'ips': [self.cluster_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'ROLE'
}, {
'content': 'ralph/www',
'ips': [self.cluster_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'CONFIGURATION_PATH'
}, {
'content': 'Application',
'ips': [self.cluster_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'MODEL'
}, {
'content': 'DC2 / Server Room B / Rack #101 / 1',
'ips': [self.cluster_ip.address],
'owner': 'ralph',
'target_owner': 'ralph',
'purpose': 'LOCATION'
}])
class TestPublishAutoTXTToDNSaaS(TransactionTestCase):
@classmethod
def setUpClass(cls):
from ralph.data_center.tests.factories import (
DataCenterAssetFactory,
RackFactory,
)
super().setUpClass()
cls.dc_asset = DataCenterAssetFactory(
hostname='ralph0.allegro.pl',
service_env__service__name='service',
service_env__environment__name='test',
model__name='DL360',
model__manufacturer__name='Asus',
model__category__name='ATS',
rack=RackFactory(
name='Rack #100',
server_room__name='Server Room A',
server_room__data_center__name='DC1',
),
position=1,
slot_no='1',
configuration_path__class_name='www',
configuration_path__module__name='ralph',
)
cls.dc_ip = IPAddressFactory(
base_object=cls.dc_asset,
ethernet=EthernetFactory(base_object=cls.dc_asset),
)
IPAddressFactory(
base_object=cls.dc_asset,
ethernet=EthernetFactory(base_object=cls.dc_asset),
is_management=True,
)
@override_settings(
DNSAAS_AUTO_TXT_RECORD_TOPIC_NAME='dnsaas_auto_txt_record'
)
@patch('ralph.dns.publishers.publish')
def test_publishing_auto_txt_data_when_dc_asset_updated(self, publish_mock):
# fetch clean instance
dc_asset = DataCenterAsset.objects.get(pk=self.dc_asset)
with transaction.atomic():
dc_asset.save()
self.assertEqual(publish_mock.call_count, 1)
publish_data = publish_mock.call_args[0][1]
# owner could be non-deterministic, depending on order of tests
# and it's not part of this test to check its correctness
for data_dict in publish_data:
data_dict.pop('owner')
self.assertCountEqual(publish_data, [
{
'content': 'www',
'ips': [self.dc_ip.address],
'target_owner': 'ralph',
'purpose': 'VENTURE',
'service_uid': dc_asset.service.uid
}, {
'content': 'ralph',
'ips': [self.dc_ip.address],
'target_owner': 'ralph',
'purpose': 'ROLE',
'service_uid': dc_asset.service.uid
}, {
'content': 'ralph/www',
'ips': [self.dc_ip.address],
'target_owner': 'ralph',
'purpose': 'CONFIGURATION_PATH',
'service_uid': dc_asset.service.uid
}, {
'content': 'service - test',
'ips': [self.dc_ip.address],
'target_owner': 'ralph',
'purpose': 'SERVICE_ENV',
'service_uid': dc_asset.service.uid
}, {
'content': '[ATS] Asus DL360',
'ips': [self.dc_ip.address],
'target_owner': 'ralph',
'purpose': 'MODEL',
'service_uid': dc_asset.service.uid
}, {
'content': 'DC1 / Server Room A / Rack #100 / 1 / 1',
'ips': [self.dc_ip.address],
'target_owner': 'ralph',
'purpose': 'LOCATION',
'service_uid': dc_asset.service.uid
}
])
class TestDNSForm(TestCase):
def test_unknown_field_goes_to_non_field_errors(self):
errors = {'errors': [{'reason': 'unknown', 'comment': 'value'}]}
form = DNSRecordForm({})
add_errors(form, errors)
self.assertIn('value', form.non_field_errors())
|
rpython/rtyper/lltypesystem/llgroup.py
|
nanjekyejoannah/pypy
| 381 |
87091
|
import weakref
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
from rpython.rlib.rarithmetic import LONG_BIT
class GroupType(lltype.ContainerType):
"""A 'group' that stores static structs together in memory.
On 32-bit platforms, the point is that they can be referenced by a
GroupMemberOffset which only takes 2 bytes (a USHORT), so the total
size of a group is limited to 18 (= the 16 bits in a USHORT, plus 2
bits at the end that are zero and so don't need to be stored).
On 64-bit platforms, we check that the address they end up at is
within the first 32 bits, so that we can store that address in half
a long (i.e. in a UINT).
"""
_gckind = 'raw'
Group = GroupType()
class group(lltype._container):
_TYPE = Group
outdated = None
def __init__(self, name):
self.name = name
self.members = []
def add_member(self, structptr):
TYPE = lltype.typeOf(structptr)
assert isinstance(TYPE.TO, lltype.Struct)
assert TYPE.TO._gckind == 'raw'
struct = structptr._as_obj()
prevgroup = _membership.get(struct)
if prevgroup is not None:
prevgroup.outdated = (
"structure %s was inserted into another group" % (struct,))
assert struct._parentstructure() is None
index = len(self.members)
self.members.append(struct)
_membership[struct] = self
return GroupMemberOffset(self, index)
def member_of_group(structptr):
return _membership.get(structptr._as_obj(), None)
_membership = weakref.WeakValueDictionary()
if LONG_BIT == 32:
HALFSHIFT = 16
HALFWORD = rffi.USHORT
r_halfword = rffi.r_ushort
else:
HALFSHIFT = 32
HALFWORD = rffi.UINT
r_halfword = rffi.r_uint
class GroupMemberOffset(llmemory.Symbolic):
"""The offset of a struct inside a group, stored compactly in a HALFWORD
(a USHORT or UINT). Can only be used by the lloperation 'get_group_member'.
"""
def annotation(self):
from rpython.annotator import model
return model.SomeInteger(knowntype=r_halfword)
def lltype(self):
return HALFWORD
def __init__(self, grp, memberindex):
assert lltype.typeOf(grp) == Group
self.grpptr = grp._as_ptr()
self.index = memberindex
self.member = grp.members[memberindex]._as_ptr()
def __repr__(self):
return '%s(%s, %s)' % (self.__class__.__name__,
self.grpptr, self.index)
def __nonzero__(self):
return True
def _get_group_member(self, grpptr):
assert grpptr == self.grpptr, "get_group_member: wrong group!"
return self.member
def _get_next_group_member(self, grpptr, skipoffset):
# ad-hoc: returns a pointer to the group member that follows this one,
# given information in 'skipoffset' about how much to skip -- which
# is the size of the current member.
assert grpptr == self.grpptr, "get_next_group_member: wrong group!"
assert isinstance(skipoffset, llmemory.ItemOffset)
assert skipoffset.TYPE == lltype.typeOf(self.member).TO
assert skipoffset.repeat == 1
return self.grpptr._as_obj().members[self.index + 1]._as_ptr()
class CombinedSymbolic(llmemory.Symbolic):
"""A general-purpose Signed symbolic that combines an unsigned half-word
(USHORT on 32-bit platforms, UINT on 64-bit platforms) and the rest
of the word (typically flags). Only supports extracting the half-word
with 'llop.extract_ushort', and extracting the rest of the word with
'&~0xFFFF' or with a direct masking like '&0x10000' (resp. on 64-bit
platform, with '&~0xFFFFFFFF' or '&0x100000000').
"""
__slots__ = ['lowpart', 'rest']
MASK = (1<<HALFSHIFT)-1 # 0xFFFF or 0xFFFFFFFF
def annotation(self):
from rpython.annotator import model
return model.SomeInteger()
def lltype(self):
return lltype.Signed
def __init__(self, lowpart, rest):
assert (rest & CombinedSymbolic.MASK) == 0
self.lowpart = lowpart
self.rest = rest
def __repr__(self):
return '<CombinedSymbolic %r|%s>' % (self.lowpart, self.rest)
def __nonzero__(self):
return True
def __and__(self, other):
if (other & CombinedSymbolic.MASK) == 0:
return self.rest & other
if (other & CombinedSymbolic.MASK) == CombinedSymbolic.MASK:
return CombinedSymbolic(self.lowpart, self.rest & other)
raise Exception("other=0x%x" % other)
def __or__(self, other):
assert (other & CombinedSymbolic.MASK) == 0
return CombinedSymbolic(self.lowpart, self.rest | other)
def __add__(self, other):
assert (other & CombinedSymbolic.MASK) == 0
return CombinedSymbolic(self.lowpart, self.rest + other)
def __sub__(self, other):
assert (other & CombinedSymbolic.MASK) == 0
return CombinedSymbolic(self.lowpart, self.rest - other)
def __rshift__(self, other):
assert other >= HALFSHIFT
return self.rest >> other
def __eq__(self, other):
if (isinstance(other, CombinedSymbolic) and
self.lowpart is other.lowpart):
return self.rest == other.rest
else:
return NotImplemented
def __ne__(self, other):
if (isinstance(other, CombinedSymbolic) and
self.lowpart is other.lowpart):
return self.rest != other.rest
else:
return NotImplemented
|
examples/protocols/sockets/scripts/udpclient.py
|
Dangku/ESP8266_RTOS_SDK
| 2,701 |
87093
|
# This example code is in the Public Domain (or CC0 licensed, at your option.)
# Unless required by applicable law or agreed to in writing, this
# software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied.
# -*- coding: utf-8 -*-
from builtins import input
import socket
import sys
# ----------- Config ----------
PORT = 3333
IP_VERSION = 'IPv4'
IPV4 = '192.168.0.167'
IPV6 = 'FE80::32AE:A4FF:FE80:5288'
# -------------------------------
if IP_VERSION == 'IPv4':
host = IPV4
family_addr = socket.AF_INET
elif IP_VERSION == 'IPv6':
host = IPV6
family_addr = socket.AF_INET6
else:
print('IP_VERSION must be IPv4 or IPv6')
sys.exit(1)
try:
sock = socket.socket(family_addr, socket.SOCK_DGRAM)
except socket.error as msg:
print('Failed to create socket')
sys.exit()
while True:
msg = input('Enter message to send : ')
try:
sock.sendto(msg.encode(), (host, PORT))
reply, addr = sock.recvfrom(128)
if not reply: break
print('Reply[' + addr[0] + ':' + str(addr[1]) + '] - ' + str(reply))
except socket.error as msg:
print('Error Code : ' + str(msg[0]) + ' Message: ' + msg[1])
sys.exit()
|
keanu-python/docs/remove_underscores.py
|
bwplotka/keanu
| 153 |
87094
|
<gh_stars>100-1000
import os
try:
os.rename('_build/html/_static', '_build/html/static')
os.rename('_build/html/_modules', '_build/html/modules')
os.rename('_build/html/_sources', '_build/html/sources')
except:
pass
root_dir = '_build/html'
for directory, subdirectories, files in os.walk(root_dir):
for fileName in files:
try:
fileName = os.path.join(directory, fileName)
file = open(fileName, 'r')
contents = file.read()
file.close()
file = open(fileName, 'w')
replaced_contents = contents.replace('_static', 'static')
replaced_contents = replaced_contents.replace('_modules', 'modules')
replaced_contents = replaced_contents.replace('_sources', 'sources')
file.write(replaced_contents)
except:
pass
print("Finished renaming all directories and mentions of directories with underscores")
|
examples/competitive/bot.py
|
DrInfy/python-sc2
| 242 |
87096
|
<filename>examples/competitive/bot.py
import sc2
class CompetitiveBot(sc2.BotAI):
async def on_start(self):
print("Game started")
# Do things here before the game starts
async def on_step(self, iteration):
# Populate this function with whatever your bot should do!
pass
def on_end(self, result):
print("Game ended.")
# Do things here after the game ends
|
src/maestral/cli/cli_settings.py
|
gliptak/maestral
| 436 |
87124
|
from __future__ import annotations
from typing import TYPE_CHECKING
import click
from .output import echo, ok
from .common import convert_api_errors, existing_config_option, inject_proxy
from .core import DropboxPath, CliException
if TYPE_CHECKING:
from ..main import Maestral
@click.command(
help="""
Automatically start the sync daemon on login.
A systemd or launchd service will be created to start a sync daemon for the given
configuration on user login.
""",
)
@click.option("--yes", "-Y", is_flag=True, default=False)
@click.option("--no", "-N", is_flag=True, default=False)
@existing_config_option
def autostart(yes: bool, no: bool, config_name: str) -> None:
from ..autostart import AutoStart
auto_start = AutoStart(config_name)
if not auto_start.implementation:
echo(
"Autostart is currently not supported for your platform.\n"
"Autostart requires systemd on Linux or launchd on macOS."
)
return
if yes or no:
if yes:
auto_start.enable()
ok("Enabled start on login.")
else:
auto_start.disable()
ok("Disabled start on login.")
else:
if auto_start.enabled:
echo("Autostart is enabled. Use -N to disable.")
else:
echo("Autostart is disabled. Use -Y to enable.")
@click.group(help="View and manage excluded folders.")
def excluded():
pass
@excluded.command(name="list", help="List all excluded files and folders.")
@inject_proxy(fallback=True, existing_config=True)
def excluded_list(m: Maestral) -> None:
excluded_items = m.excluded_items
excluded_items.sort()
if len(excluded_items) == 0:
echo("No excluded files or folders.")
else:
for item in excluded_items:
echo(item)
@excluded.command(
name="add",
help="Add a file or folder to the excluded list and re-sync.",
)
@click.argument("dropbox_path", type=DropboxPath())
@inject_proxy(fallback=True, existing_config=True)
@convert_api_errors
def excluded_add(m: Maestral, dropbox_path: str) -> None:
if dropbox_path == "/":
raise CliException("Cannot exclude the root directory.")
m.exclude_item(dropbox_path)
ok(f"Excluded '{dropbox_path}'.")
@excluded.command(
name="remove",
help="""
Remove a file or folder from the excluded list and re-sync.
It is safe to call this method with items which have already been included, they will
not be downloaded again. If the given path lies inside an excluded folder, the parent
folder will be included as well (but no other items inside it).
""",
)
@click.argument("dropbox_path", type=DropboxPath())
@inject_proxy(fallback=False, existing_config=True)
@convert_api_errors
def excluded_remove(m: Maestral, dropbox_path: str) -> None:
if dropbox_path == "/":
return echo("The root directory is always included")
m.include_item(dropbox_path)
ok(f"Included '{dropbox_path}'. Now downloading...")
@click.group(help="Manage desktop notifications.")
def notify():
pass
@notify.command(
name="level",
help="Get or set the level for desktop notifications.",
)
@click.argument(
"level_name",
required=False,
type=click.Choice(["ERROR", "SYNCISSUE", "FILECHANGE"], case_sensitive=False),
)
@inject_proxy(fallback=True, existing_config=True)
def notify_level(m: Maestral, level_name: str) -> None:
from .. import notify as _notify
if level_name:
m.notification_level = _notify.level_name_to_number(level_name)
ok(f"Notification level set to {level_name}.")
else:
level_name = _notify.level_number_to_name(m.notification_level)
echo(f"Notification level: {level_name}.")
@notify.command(
name="snooze",
help="Snooze desktop notifications of file changes.",
)
@click.argument("minutes", type=click.IntRange(min=0))
@inject_proxy(fallback=True, existing_config=True)
def notify_snooze(m: Maestral, minutes: int) -> None:
m.notification_snooze = minutes
if minutes > 0:
ok(f"Notifications snoozed for {minutes} min. Set snooze to 0 to reset.")
else:
ok("Notifications enabled.")
|
neo/bin/prompt.py
|
volekerb/neo-python
| 387 |
87128
|
<reponame>volekerb/neo-python
#!/usr/bin/env python3
import argparse
import datetime
import os
import traceback
import asyncio
import termios
import sys
from prompt_toolkit.completion import WordCompleter
from prompt_toolkit.history import FileHistory
from prompt_toolkit.shortcuts import print_formatted_text, PromptSession
from prompt_toolkit.formatted_text import FormattedText
from prompt_toolkit.application import get_app as prompt_toolkit_get_app
from neo import __version__
from neo.Core.Blockchain import Blockchain
from neo.Storage.Implementation.DBFactory import getBlockchainDB
from neo.Implementations.Notifications.NotificationDB import NotificationDB
from neo.Prompt.Commands.Wallet import CommandWallet
from neo.Prompt.Commands.Show import CommandShow
from neo.Prompt.Commands.Search import CommandSearch
from neo.Prompt.Commands.Config import CommandConfig
from neo.Prompt.Commands.SC import CommandSC
from neo.Prompt.PromptData import PromptData
from neo.Prompt.InputParser import InputParser
from neo.Settings import settings, PrivnetConnectionError
from neo.UserPreferences import preferences
from neo.logging import log_manager
from neo.Prompt.PromptPrinter import prompt_print, token_style
from neo.Network.nodemanager import NodeManager
import neo.Storage.Implementation.DBFactory as DBFactory
logger = log_manager.getLogger()
from prompt_toolkit.eventloop import use_asyncio_event_loop
from neo.Network.p2pservice import NetworkService
from contextlib import suppress
class PromptFileHistory(FileHistory):
def append(self, string):
string = self.redact_command(string)
if len(string) == 0:
return
self.strings.append(string)
# Save to file.
with open(self.filename, 'ab') as f:
def write(t):
f.write(t.encode('utf-8'))
write('\n# %s\n' % datetime.datetime.now())
for line in string.split('\n'):
write('+%s\n' % line)
def redact_command(self, string):
if len(string) == 0:
return string
command = [comm for comm in ['import wif', 'export wif', 'import nep2', 'export nep2'] if comm in string]
if len(command) > 0:
command = command[0]
# only redacts command if wif/nep2 keys are in the command, not if the argument is left empty.
if command in string and len(command + " ") < len(string):
# example: import wif 5HueCGU8 --> import wif <wif>
return command + " <" + command.split(" ")[1] + ">"
else:
return string
return string
class PromptInterface:
prompt_completer = None
history = None
go_on = True
wallet_loop_deferred = None
Wallet = None
_known_things = []
_commands = [
CommandWallet(), CommandShow(), CommandSearch(), CommandConfig(), CommandSC()
]
_command_descs = [desc for c in _commands for desc in c.command_descs_with_sub_commands()]
commands = {command.command_desc().command: command for command in _commands}
start_height = None
start_dt = None
prompt_session = None
def __init__(self, history_filename=None):
PromptData.Prompt = self
if history_filename:
PromptInterface.history = PromptFileHistory(history_filename)
self.input_parser = InputParser()
self.start_height = Blockchain.Default().Height
self.start_dt = datetime.datetime.utcnow()
def get_bottom_toolbar(self, cli=None):
out = []
try:
if PromptData.Wallet is None:
return "[%s] Progress: 0/%s/%s" % (settings.net_name,
str(Blockchain.Default().Height),
str(Blockchain.Default().HeaderHeight))
else:
return "[%s] Progress: %s/%s/%s" % (settings.net_name, str(PromptData.Wallet._current_height),
str(Blockchain.Default().Height),
str(Blockchain.Default().HeaderHeight))
except Exception as e:
pass
return out
def get_completer(self):
standard_completions = list({word for d in self._command_descs for word in d.command.split()}) # Use a set to ensure unicity of words
standard_completions += ['quit', 'help', 'exit']
if PromptData.Wallet:
for addr in PromptData.Wallet.Addresses:
if addr not in self._known_things:
self._known_things.append(addr)
for alias in PromptData.Wallet.NamedAddr:
if alias.Title not in self._known_things:
self._known_things.append(alias.Title)
for tkn in PromptData.Wallet.GetTokens().values():
if tkn.symbol not in self._known_things:
self._known_things.append(tkn.symbol)
all_completions = standard_completions + self._known_things
PromptInterface.prompt_completer = WordCompleter(all_completions)
return PromptInterface.prompt_completer
def quit(self):
print('Shutting down. This may take a bit...')
self.go_on = False
PromptData.close_wallet()
raise SystemExit
def help(self):
prompt_print(f"\nCommands:")
for command_group in sorted(self.commands.keys()):
command = self.commands[command_group]
prompt_print(f" {command_group:<15} - {command.command_desc().short_help}")
prompt_print(f"\nRun 'COMMAND help' for more information on a command.")
def on_looperror(self, err):
logger.debug("On DB loop error! %s " % err)
async def run(self):
nodemgr = NodeManager()
while not nodemgr.running:
await asyncio.sleep(0.1)
tokens = [("class:neo", 'NEO'), ("class:default", ' cli. Type '),
("class:command", '\'help\' '), ("class:default", 'to get started')]
print_formatted_text(FormattedText(tokens), style=token_style)
print('\n')
session = PromptSession("neo> ",
completer=self.get_completer(),
history=self.history,
bottom_toolbar=self.get_bottom_toolbar,
style=token_style,
refresh_interval=3,
)
self.prompt_session = session
result = ""
while self.go_on:
# with patch_stdout():
try:
result = await session.prompt(async_=True)
except EOFError:
# Control-D pressed: quit
return self.quit()
except KeyboardInterrupt:
# Control-C pressed: pause for user input
# temporarily mute stdout during user input
# components like `network` set at DEBUG level will spam through the console
# making it impractical to input user data
log_manager.mute_stdio()
print('Logging output muted during user input...')
try:
result = await session.prompt(async_=True)
except Exception as e:
logger.error("Exception handling input: %s " % e)
# and re-enable stdio
log_manager.unmute_stdio()
except Exception as e:
logger.error("Exception handling input: %s " % e)
try:
command, arguments = self.input_parser.parse_input(result)
if command is not None and len(command) > 0:
command = command.lower()
if command in self.commands:
cmd = self.commands[command]
if len(arguments) > 0 and arguments[-1] == 'help':
cmd.handle_help(arguments)
else:
cmd.execute(arguments)
else:
if command == 'quit' or command == 'exit':
self.quit()
elif command == 'help':
self.help()
elif command is None:
print("Please specify a command")
else:
print("Command '%s' not found" % command)
except Exception as e:
print("Could not execute command: %s" % e)
traceback.print_stack()
traceback.print_exc()
def main():
parser = argparse.ArgumentParser()
# Network group
group = parser.add_mutually_exclusive_group()
group.add_argument("-m", "--mainnet", action="store_true", default=False,
help="Use MainNet instead of the default TestNet")
group.add_argument("-p", "--privnet", nargs="?", metavar="host", const=True, default=False,
help="Use a private net instead of the default TestNet, optionally using a custom host (default: 127.0.0.1)")
group.add_argument("--coznet", action="store_true", default=False,
help="Use the CoZ network instead of the default TestNet")
group.add_argument("-u", "--unittest", nargs="?", metavar="host", const=True, default=False,
help="Use a private net instead of the default TestNet, optionally using a custom host (default: 127.0.0.1)")
group.add_argument("-c", "--config", action="store", help="Use a specific config file")
# Theme
parser.add_argument("-t", "--set-default-theme", dest="theme",
choices=["dark", "light"],
help="Set the default theme to be loaded from the config file. Default: 'dark'")
# Verbose
parser.add_argument("-v", "--verbose", action="store_true", default=False,
help="Show smart-contract events by default")
# Where to store stuff
parser.add_argument("--datadir", action="store",
help="Absolute path to use for database directories")
# peers
parser.add_argument("--minpeers", action="store", type=int, choices=range(1, 10 + 1), metavar="[1-10]",
help="Min peers to use for P2P Joining")
parser.add_argument("--maxpeers", action="store", type=int, default=5, choices=range(1, 10 + 1), metavar="[1-10]",
help="Max peers to use for P2P Joining")
# Show the neo-python version
parser.add_argument("--version", action="version",
version="neo-python v{version}".format(version=__version__))
args = parser.parse_args()
# Setting the datadir must come before setting the network, else the wrong path is checked at net setup.
if args.datadir:
settings.set_data_dir(args.datadir)
# Setup depending on command line arguments. By default, the testnet settings are already loaded.
if args.config:
settings.setup(args.config)
elif args.mainnet:
settings.setup_mainnet()
elif args.privnet:
try:
settings.setup_privnet(args.privnet)
except PrivnetConnectionError as e:
logger.error(str(e))
return
elif args.coznet:
settings.setup_coznet()
elif args.unittest:
settings.setup_unittest_net()
# Logfile settings & setup
logfile_fn = os.path.join(settings.DATA_DIR_PATH, 'prompt.log')
logfile_max_bytes = 5e7 # 50 MB
logfile_backup_count = 3 # 3 logfiles history
settings.set_logfile(logfile_fn, logfile_max_bytes, logfile_backup_count)
if args.theme:
preferences.set_theme(args.theme)
if args.verbose:
settings.set_log_smart_contract_events(True)
def set_min_peers(num_peers) -> bool:
try:
settings.set_min_peers(num_peers)
print("Minpeers set to ", num_peers)
return True
except ValueError:
print("Please supply a positive integer for minpeers")
return False
def set_max_peers(num_peers) -> bool:
try:
settings.set_max_peers(num_peers)
print("Maxpeers set to ", num_peers)
return True
except ValueError:
print("Please supply a positive integer for maxpeers")
return False
minpeers = args.minpeers
maxpeers = args.maxpeers
if minpeers and maxpeers:
if minpeers > maxpeers:
print("minpeers setting cannot be bigger than maxpeers setting")
return
if not set_min_peers(minpeers) or not set_max_peers(maxpeers):
return
elif minpeers:
if not set_min_peers(minpeers):
return
if minpeers > settings.CONNECTED_PEER_MAX:
if not set_max_peers(minpeers):
return
elif maxpeers:
if not set_max_peers(maxpeers):
return
if maxpeers < settings.CONNECTED_PEER_MIN:
if not set_min_peers(maxpeers):
return
loop = asyncio.get_event_loop()
# put prompt_toolkit on top of asyncio to avoid blocking
use_asyncio_event_loop()
# Instantiate the blockchain and subscribe to notifications
blockchain = Blockchain(DBFactory.getBlockchainDB(settings.chain_leveldb_path))
Blockchain.RegisterBlockchain(blockchain)
# Try to set up a notification db
if NotificationDB.instance():
NotificationDB.instance().start()
# Start the prompt interface
fn_prompt_history = os.path.join(settings.DATA_DIR_PATH, '.prompt.py.history')
cli = PromptInterface(fn_prompt_history)
cli_task = loop.create_task(cli.run())
p2p = NetworkService()
loop.create_task(p2p.start())
async def shutdown():
all_tasks = asyncio.all_tasks()
for task in all_tasks:
task.cancel()
with suppress(asyncio.CancelledError):
await task
# prompt_toolkit hack for not cleaning up see: https://github.com/prompt-toolkit/python-prompt-toolkit/issues/787
old_attrs = termios.tcgetattr(sys.stdin)
try:
loop.run_forever()
except SystemExit:
pass
finally:
with suppress(asyncio.InvalidStateError):
app = prompt_toolkit_get_app()
if app.is_running:
app.exit()
with suppress((SystemExit, Exception)):
cli_task.exception()
loop.run_until_complete(p2p.shutdown())
loop.run_until_complete(shutdown())
loop.run_until_complete(loop.shutdown_asyncgens())
loop.stop()
loop.close()
# Run things
# After the reactor is stopped, gracefully shutdown the database.
NotificationDB.close()
Blockchain.Default().Dispose()
# clean up prompt_toolkit mess, see above
termios.tcsetattr(sys.stdin, termios.TCSANOW, old_attrs)
if __name__ == "__main__":
main()
|
numba/pylowering.py
|
mawanda-jun/numba
| 1,738 |
87130
|
<gh_stars>1000+
"""
Lowering implementation for object mode.
"""
from __future__ import print_function, division, absolute_import
from llvmlite.llvmpy.core import Type, Constant
import llvmlite.llvmpy.core as lc
import operator
from . import cgutils, generators, ir, types, utils
from .errors import ForbiddenConstruct
from .lowering import BaseLower
from .utils import builtins, HAS_MATMUL_OPERATOR, IS_PY3
# Issue #475: locals() is unsupported as calling it naively would give
# out wrong results.
_unsupported_builtins = set([locals])
# Map operators to methods on the PythonAPI class
PYTHON_BINOPMAP = {
operator.add: ("number_add", False),
operator.sub: ("number_subtract", False),
operator.mul: ("number_multiply", False),
operator.truediv: ("number_truedivide", False),
operator.floordiv: ("number_floordivide", False),
operator.mod: ("number_remainder", False),
operator.pow: ("number_power", False),
operator.lshift: ("number_lshift", False),
operator.rshift: ("number_rshift", False),
operator.and_: ("number_and", False),
operator.or_: ("number_or", False),
operator.xor: ("number_xor", False),
# inplace operators
operator.iadd: ("number_add", True),
operator.isub: ("number_subtract", True),
operator.imul: ("number_multiply", True),
operator.itruediv: ("number_truedivide", True),
operator.ifloordiv: ("number_floordivide", True),
operator.imod: ("number_remainder", True),
operator.ipow: ("number_power", True),
operator.ilshift: ("number_lshift", True),
operator.irshift: ("number_rshift", True),
operator.iand: ("number_and", True),
operator.ior: ("number_or", True),
operator.ixor: ("number_xor", True),
}
if not IS_PY3:
PYTHON_BINOPMAP[operator.div] = ("number_divide", False)
PYTHON_BINOPMAP[operator.idiv] = ("number_divide", True)
if HAS_MATMUL_OPERATOR:
PYTHON_BINOPMAP[operator.matmul] = ("number_matrix_multiply", False)
PYTHON_BINOPMAP[operator.imatmul] = ("number_matrix_multiply", True)
PYTHON_COMPAREOPMAP = {
operator.eq: '==',
operator.ne: '!=',
operator.lt: '<',
operator.le: '<=',
operator.gt: '>',
operator.ge: '>=',
operator.is_: 'is',
operator.is_not: 'is not',
operator.contains: 'in'
}
class PyLower(BaseLower):
GeneratorLower = generators.PyGeneratorLower
def init(self):
# Strings to be frozen into the Environment object
self._frozen_strings = set()
self._live_vars = set()
def pre_lower(self):
super(PyLower, self).pre_lower()
self.init_pyapi()
# Pre-computed for later use
from .dispatcher import OmittedArg
self.omitted_typobj = self.pyapi.unserialize(
self.pyapi.serialize_object(OmittedArg))
def post_lower(self):
pass
def pre_block(self, block):
self.init_vars(block)
def lower_inst(self, inst):
if isinstance(inst, ir.Assign):
value = self.lower_assign(inst)
self.storevar(value, inst.target.name)
elif isinstance(inst, ir.SetItem):
target = self.loadvar(inst.target.name)
index = self.loadvar(inst.index.name)
value = self.loadvar(inst.value.name)
ok = self.pyapi.object_setitem(target, index, value)
self.check_int_status(ok)
elif isinstance(inst, ir.DelItem):
target = self.loadvar(inst.target.name)
index = self.loadvar(inst.index.name)
ok = self.pyapi.object_delitem(target, index)
self.check_int_status(ok)
elif isinstance(inst, ir.SetAttr):
target = self.loadvar(inst.target.name)
value = self.loadvar(inst.value.name)
ok = self.pyapi.object_setattr(target,
self._freeze_string(inst.attr),
value)
self.check_int_status(ok)
elif isinstance(inst, ir.DelAttr):
target = self.loadvar(inst.target.name)
ok = self.pyapi.object_delattr(target,
self._freeze_string(inst.attr))
self.check_int_status(ok)
elif isinstance(inst, ir.StoreMap):
dct = self.loadvar(inst.dct.name)
key = self.loadvar(inst.key.name)
value = self.loadvar(inst.value.name)
ok = self.pyapi.dict_setitem(dct, key, value)
self.check_int_status(ok)
elif isinstance(inst, ir.Return):
retval = self.loadvar(inst.value.name)
if self.generator_info:
# StopIteration
# We own a reference to the "return value", but we
# don't return it.
self.pyapi.decref(retval)
self.genlower.return_from_generator(self)
return
# No need to incref() as the reference is already owned.
self.call_conv.return_value(self.builder, retval)
elif isinstance(inst, ir.Branch):
cond = self.loadvar(inst.cond.name)
if cond.type == Type.int(1):
istrue = cond
else:
istrue = self.pyapi.object_istrue(cond)
zero = lc.Constant.null(istrue.type)
pred = self.builder.icmp(lc.ICMP_NE, istrue, zero)
tr = self.blkmap[inst.truebr]
fl = self.blkmap[inst.falsebr]
self.builder.cbranch(pred, tr, fl)
elif isinstance(inst, ir.Jump):
target = self.blkmap[inst.target]
self.builder.branch(target)
elif isinstance(inst, ir.Del):
self.delvar(inst.value)
elif isinstance(inst, ir.Raise):
if inst.exception is not None:
exc = self.loadvar(inst.exception.name)
# A reference will be stolen by raise_object() and another
# by return_exception_raised().
self.incref(exc)
else:
exc = None
self.pyapi.raise_object(exc)
self.return_exception_raised()
else:
raise NotImplementedError(type(inst), inst)
def lower_assign(self, inst):
"""
The returned object must have a new reference
"""
value = inst.value
if isinstance(value, (ir.Const, ir.FreeVar)):
return self.lower_const(value.value)
elif isinstance(value, ir.Var):
val = self.loadvar(value.name)
self.incref(val)
return val
elif isinstance(value, ir.Expr):
return self.lower_expr(value)
elif isinstance(value, ir.Global):
return self.lower_global(value.name, value.value)
elif isinstance(value, ir.Yield):
return self.lower_yield(value)
elif isinstance(value, ir.Arg):
obj = self.fnargs[value.index]
# When an argument is omitted, the dispatcher hands it as
# _OmittedArg(<default value>)
typobj = self.pyapi.get_type(obj)
slot = cgutils.alloca_once_value(self.builder, obj)
is_omitted = self.builder.icmp_unsigned('==', typobj,
self.omitted_typobj)
with self.builder.if_else(is_omitted, likely=False) as (omitted, present):
with present:
self.incref(obj)
self.builder.store(obj, slot)
with omitted:
# The argument is omitted => get the default value
obj = self.pyapi.object_getattr_string(obj, 'value')
self.builder.store(obj, slot)
return self.builder.load(slot)
else:
raise NotImplementedError(type(value), value)
def lower_yield(self, inst):
yp = self.generator_info.yield_points[inst.index]
assert yp.inst is inst
self.genlower.init_generator_state(self)
# Save live vars in state
# We also need to save live vars that are del'ed afterwards.
y = generators.LowerYield(self, yp, yp.live_vars | yp.weak_live_vars)
y.lower_yield_suspend()
# Yield to caller
val = self.loadvar(inst.value.name)
# Let caller own the reference
self.pyapi.incref(val)
self.call_conv.return_value(self.builder, val)
# Resumption point
y.lower_yield_resume()
# None is returned by the yield expression
return self.pyapi.make_none()
def lower_binop(self, expr, op, inplace=False):
lhs = self.loadvar(expr.lhs.name)
rhs = self.loadvar(expr.rhs.name)
assert not isinstance(op, str)
if op in PYTHON_BINOPMAP:
fname, inplace = PYTHON_BINOPMAP[op]
fn = getattr(self.pyapi, fname)
res = fn(lhs, rhs, inplace=inplace)
else:
# Assumed to be rich comparison
fn = PYTHON_COMPAREOPMAP.get(expr.fn, expr.fn)
if fn == 'in': # 'in' and operator.contains have args reversed
lhs, rhs = rhs, lhs
res = self.pyapi.object_richcompare(lhs, rhs, fn)
self.check_error(res)
return res
def lower_expr(self, expr):
if expr.op == 'binop':
return self.lower_binop(expr, expr.fn, inplace=False)
elif expr.op == 'inplace_binop':
return self.lower_binop(expr, expr.fn, inplace=True)
elif expr.op == 'unary':
value = self.loadvar(expr.value.name)
if expr.fn == operator.neg:
res = self.pyapi.number_negative(value)
elif expr.fn == operator.pos:
res = self.pyapi.number_positive(value)
elif expr.fn == operator.not_:
res = self.pyapi.object_not(value)
self.check_int_status(res)
longval = self.builder.zext(res, self.pyapi.long)
res = self.pyapi.bool_from_long(longval)
elif expr.fn == operator.invert:
res = self.pyapi.number_invert(value)
else:
raise NotImplementedError(expr)
self.check_error(res)
return res
elif expr.op == 'call':
argvals = [self.loadvar(a.name) for a in expr.args]
fn = self.loadvar(expr.func.name)
args = self.pyapi.tuple_pack(argvals)
if expr.vararg:
# Expand *args
new_args = self.pyapi.number_add(args,
self.loadvar(expr.vararg.name))
self.decref(args)
args = new_args
if not expr.kws:
# No named arguments
ret = self.pyapi.call(fn, args, None)
else:
# Named arguments
keyvalues = [(k, self.loadvar(v.name)) for k, v in expr.kws]
kws = self.pyapi.dict_pack(keyvalues)
ret = self.pyapi.call(fn, args, kws)
self.decref(kws)
self.decref(args)
self.check_error(ret)
return ret
elif expr.op == 'getattr':
obj = self.loadvar(expr.value.name)
res = self.pyapi.object_getattr(obj, self._freeze_string(expr.attr))
self.check_error(res)
return res
elif expr.op == 'build_tuple':
items = [self.loadvar(it.name) for it in expr.items]
res = self.pyapi.tuple_pack(items)
self.check_error(res)
return res
elif expr.op == 'build_list':
items = [self.loadvar(it.name) for it in expr.items]
res = self.pyapi.list_pack(items)
self.check_error(res)
return res
elif expr.op == 'build_map':
res = self.pyapi.dict_new(expr.size)
self.check_error(res)
for k, v in expr.items:
key = self.loadvar(k.name)
value = self.loadvar(v.name)
ok = self.pyapi.dict_setitem(res, key, value)
self.check_int_status(ok)
return res
elif expr.op == 'build_set':
items = [self.loadvar(it.name) for it in expr.items]
res = self.pyapi.set_new()
self.check_error(res)
for it in items:
ok = self.pyapi.set_add(res, it)
self.check_int_status(ok)
return res
elif expr.op == 'getiter':
obj = self.loadvar(expr.value.name)
res = self.pyapi.object_getiter(obj)
self.check_error(res)
return res
elif expr.op == 'iternext':
iterobj = self.loadvar(expr.value.name)
item = self.pyapi.iter_next(iterobj)
is_valid = cgutils.is_not_null(self.builder, item)
pair = self.pyapi.tuple_new(2)
with self.builder.if_else(is_valid) as (then, otherwise):
with then:
self.pyapi.tuple_setitem(pair, 0, item)
with otherwise:
self.check_occurred()
# Make the tuple valid by inserting None as dummy
# iteration "result" (it will be ignored).
self.pyapi.tuple_setitem(pair, 0, self.pyapi.make_none())
self.pyapi.tuple_setitem(pair, 1, self.pyapi.bool_from_bool(is_valid))
return pair
elif expr.op == 'pair_first':
pair = self.loadvar(expr.value.name)
first = self.pyapi.tuple_getitem(pair, 0)
self.incref(first)
return first
elif expr.op == 'pair_second':
pair = self.loadvar(expr.value.name)
second = self.pyapi.tuple_getitem(pair, 1)
self.incref(second)
return second
elif expr.op == 'exhaust_iter':
iterobj = self.loadvar(expr.value.name)
tup = self.pyapi.sequence_tuple(iterobj)
self.check_error(tup)
# Check tuple size is as expected
tup_size = self.pyapi.tuple_size(tup)
expected_size = self.context.get_constant(types.intp, expr.count)
has_wrong_size = self.builder.icmp(lc.ICMP_NE,
tup_size, expected_size)
with cgutils.if_unlikely(self.builder, has_wrong_size):
self.return_exception(ValueError)
return tup
elif expr.op == 'getitem':
value = self.loadvar(expr.value.name)
index = self.loadvar(expr.index.name)
res = self.pyapi.object_getitem(value, index)
self.check_error(res)
return res
elif expr.op == 'static_getitem':
value = self.loadvar(expr.value.name)
index = self.context.get_constant(types.intp, expr.index)
indexobj = self.pyapi.long_from_ssize_t(index)
self.check_error(indexobj)
res = self.pyapi.object_getitem(value, indexobj)
self.decref(indexobj)
self.check_error(res)
return res
elif expr.op == 'getslice':
target = self.loadvar(expr.target.name)
start = self.loadvar(expr.start.name)
stop = self.loadvar(expr.stop.name)
slicefn = self.get_builtin_obj("slice")
sliceobj = self.pyapi.call_function_objargs(slicefn, (start, stop))
self.decref(slicefn)
self.check_error(sliceobj)
res = self.pyapi.object_getitem(target, sliceobj)
self.check_error(res)
return res
elif expr.op == 'cast':
val = self.loadvar(expr.value.name)
self.incref(val)
return val
else:
raise NotImplementedError(expr)
def lower_const(self, const):
# All constants are frozen inside the environment
index = self.env_manager.add_const(const)
ret = self.env_manager.read_const(index)
self.check_error(ret)
self.incref(ret)
return ret
def lower_global(self, name, value):
"""
1) Check global scope dictionary.
2) Check __builtins__.
2a) is it a dictionary (for non __main__ module)
2b) is it a module (for __main__ module)
"""
moddict = self.get_module_dict()
obj = self.pyapi.dict_getitem(moddict, self._freeze_string(name))
self.incref(obj) # obj is borrowed
try:
if value in _unsupported_builtins:
raise ForbiddenConstruct("builtins %s() is not supported"
% name, loc=self.loc)
except TypeError:
# `value` is unhashable, ignore
pass
if hasattr(builtins, name):
obj_is_null = self.is_null(obj)
bbelse = self.builder.basic_block
with self.builder.if_then(obj_is_null):
mod = self.pyapi.dict_getitem(moddict,
self._freeze_string("__builtins__"))
builtin = self.builtin_lookup(mod, name)
bbif = self.builder.basic_block
retval = self.builder.phi(self.pyapi.pyobj)
retval.add_incoming(obj, bbelse)
retval.add_incoming(builtin, bbif)
else:
retval = obj
with cgutils.if_unlikely(self.builder, self.is_null(retval)):
self.pyapi.raise_missing_global_error(name)
self.return_exception_raised()
return retval
# -------------------------------------------------------------------------
def get_module_dict(self):
return self.env_body.globals
def get_builtin_obj(self, name):
# XXX The builtins dict could be bound into the environment
moddict = self.get_module_dict()
mod = self.pyapi.dict_getitem(moddict,
self._freeze_string("__builtins__"))
return self.builtin_lookup(mod, name)
def builtin_lookup(self, mod, name):
"""
Args
----
mod:
The __builtins__ dictionary or module, as looked up in
a module's globals.
name: str
The object to lookup
"""
fromdict = self.pyapi.dict_getitem(mod, self._freeze_string(name))
self.incref(fromdict) # fromdict is borrowed
bbifdict = self.builder.basic_block
with cgutils.if_unlikely(self.builder, self.is_null(fromdict)):
# This happen if we are using the __main__ module
frommod = self.pyapi.object_getattr(mod, self._freeze_string(name))
with cgutils.if_unlikely(self.builder, self.is_null(frommod)):
self.pyapi.raise_missing_global_error(name)
self.return_exception_raised()
bbifmod = self.builder.basic_block
builtin = self.builder.phi(self.pyapi.pyobj)
builtin.add_incoming(fromdict, bbifdict)
builtin.add_incoming(frommod, bbifmod)
return builtin
def check_occurred(self):
"""
Return if an exception occurred.
"""
err_occurred = cgutils.is_not_null(self.builder,
self.pyapi.err_occurred())
with cgutils.if_unlikely(self.builder, err_occurred):
self.return_exception_raised()
def check_error(self, obj):
"""
Return if *obj* is NULL.
"""
with cgutils.if_unlikely(self.builder, self.is_null(obj)):
self.return_exception_raised()
return obj
def check_int_status(self, num, ok_value=0):
"""
Raise an exception if *num* is smaller than *ok_value*.
"""
ok = lc.Constant.int(num.type, ok_value)
pred = self.builder.icmp(lc.ICMP_SLT, num, ok)
with cgutils.if_unlikely(self.builder, pred):
self.return_exception_raised()
def is_null(self, obj):
return cgutils.is_null(self.builder, obj)
def return_exception_raised(self):
"""
Return with the currently raised exception.
"""
self.cleanup_vars()
self.call_conv.return_exc(self.builder)
def init_vars(self, block):
"""
Initialize live variables for *block*.
"""
self._live_vars = set(self.func_ir.get_block_entry_vars(block))
def _getvar(self, name, ltype=None):
if name not in self.varmap:
self.varmap[name] = self.alloca(name, ltype=ltype)
return self.varmap[name]
def loadvar(self, name):
"""
Load the llvm value of the variable named *name*.
"""
# If this raises then the live variables analysis is wrong
assert name in self._live_vars, name
ptr = self.varmap[name]
val = self.builder.load(ptr)
with cgutils.if_unlikely(self.builder, self.is_null(val)):
self.pyapi.raise_missing_name_error(name)
self.return_exception_raised()
return val
def delvar(self, name):
"""
Delete the variable slot with the given name. This will decref
the corresponding Python object.
"""
# If this raises then the live variables analysis is wrong
self._live_vars.remove(name)
ptr = self._getvar(name) # initializes `name` if not already
self.decref(self.builder.load(ptr))
# This is a safety guard against double decref's, but really
# the IR should be correct and have only one Del per variable
# and code path.
self.builder.store(cgutils.get_null_value(ptr.type.pointee), ptr)
def storevar(self, value, name, clobber=False):
"""
Stores a llvm value and allocate stack slot if necessary.
The llvm value can be of arbitrary type.
"""
is_redefine = name in self._live_vars and not clobber
ptr = self._getvar(name, ltype=value.type)
if is_redefine:
old = self.builder.load(ptr)
else:
self._live_vars.add(name)
assert value.type == ptr.type.pointee, (str(value.type),
str(ptr.type.pointee))
self.builder.store(value, ptr)
# Safe to call decref even on non python object
if is_redefine:
self.decref(old)
def cleanup_vars(self):
"""
Cleanup live variables.
"""
for name in self._live_vars:
ptr = self._getvar(name)
self.decref(self.builder.load(ptr))
def alloca(self, name, ltype=None):
"""
Allocate a stack slot and initialize it to NULL.
The default is to allocate a pyobject pointer.
Use ``ltype`` to override.
"""
if ltype is None:
ltype = self.context.get_value_type(types.pyobject)
with self.builder.goto_block(self.entry_block):
ptr = self.builder.alloca(ltype, name=name)
self.builder.store(cgutils.get_null_value(ltype), ptr)
return ptr
def incref(self, value):
self.pyapi.incref(value)
def decref(self, value):
"""
This is allow to be called on non pyobject pointer, in which case
no code is inserted.
"""
lpyobj = self.context.get_value_type(types.pyobject)
if value.type == lpyobj:
self.pyapi.decref(value)
def _freeze_string(self, string):
"""
Freeze a Python string object into the code.
"""
return self.lower_const(string)
|
openfda/parallel/reducer.py
|
FDA/openfda
| 388 |
87155
|
import collections
import logging
import os
import tempfile
from pickle import loads
import leveldb
logger = logging.getLogger('mapreduce')
def group_by_key(iterator):
'''Group identical keys together.
Given a sorted iterator of (key, value) pairs, returns an iterator of
(key1, values), (key2, values).
'''
last_key = None
values = []
for key, value in iterator:
value = loads(value)
key = key.decode()
user_key, _ = key.rsplit('.', 1)
if user_key != last_key:
if last_key is not None:
yield last_key, values
last_key = user_key
values = [value]
else:
values.append(value)
if last_key is not None:
yield last_key, values
class Reducer(object):
def initialize(self, input_queue, tmp_prefix, output_class, output_prefix, shard_idx, num_shards):
self.tmp_prefix = tmp_prefix
self.output_prefix = output_prefix
self.input_queue = input_queue
self.output_class = output_class
self.shard_idx = shard_idx
self.num_shards = num_shards
def reduce_shard(self, input_db, output_db):
for idx, (key, values) in enumerate(group_by_key(input_db.RangeIter())):
# if idx % 1000 == 0:
# logger.info('Reducing records=%d key=%s shard=%d', idx, key, self.shard_idx)
self.reduce(key, values, output_db)
def shuffle(self):
os.system('mkdir -p "%s"' % self.tmp_prefix)
shuffle_dir = tempfile.mkdtemp(
prefix='shard-%05d-of-%05d' % (self.shard_idx, self.num_shards),
dir=self.tmp_prefix)
shuffle_db = leveldb.LevelDB(shuffle_dir)
idx = 0
while 1:
next_entry = self.input_queue.get()
if next_entry is None:
break
key, value_str = next_entry
shuffle_db.Put((key + ('.%s' % idx)).encode(), value_str)
idx += 1
# if idx % 1000 == 0:
# logger.info('Shuffling records=%d key=%s shard=%d', idx, key, self.shard_idx)
output_db = self.output_class.create_writer(self.output_prefix, self.shard_idx, self.num_shards)
# logger.debug('Reducer: %s', output_db)
self.reduce_shard(shuffle_db, output_db)
output_db.flush()
del output_db
del shuffle_db
os.system('rm -rf "%s"' % shuffle_dir)
def reduce(self, key, values, output):
raise NotImplementedError
def reduce_finished(self):
'''Called after all values have been reduced.
The result of this call is returned to the caller of `mapreduce`.
'''
pass
class IdentityReducer(Reducer):
def reduce(self, key, values, output):
for value in values:
output.put(key, value)
class SumReducer(Reducer):
def reduce(self, key, values, output):
output.put(key, sum([float(v) for v in values]))
class ListReducer(Reducer):
def reduce(self, key, values, output):
output.put(key, list(values))
class NullReducer(Reducer):
def reduce(self, key, values, output):
return
def pivot_values(value_list):
''' Takes a list of (name, value) tuples, and `pivots` them, returning
a dictionary from name -> [values].
This is frequently used when joining a number of inputs together,
where each input is tagged with a table name.
'''
intermediate = collections.defaultdict(list)
for row in value_list:
table_name, val = row
intermediate[table_name].append(val)
return intermediate
class PivotReducer(Reducer):
def reduce(self, key, values, output):
val = pivot_values(values)
output.put(key, val)
|
packages/syft/src/syft/core/node/common/node_table/entity.py
|
vishalbelsare/PySyft
| 8,428 |
87159
|
# stdlib
from typing import Any
# third party
from sqlalchemy import Column
from sqlalchemy import LargeBinary
from sqlalchemy import String
# relative
from . import Base
from ..... import deserialize
from ..... import serialize
class Entity(Base):
__tablename__ = "entity"
name = Column(String(255), primary_key=True)
entity_bin = Column(LargeBinary(3072))
@property
def obj(self) -> Any:
return deserialize(self.entity_bin, from_bytes=True) # TODO: techdebt fix
@obj.setter
def obj(self, value: Any) -> None:
self.entity_bin = serialize(value, to_bytes=True) # TODO: techdebt fix
|
examples/pervasive/modules/wip/ll_controls.py
|
EricLina/attn2d
| 490 |
87177
|
import sys
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
def progressive_max(x):
T = x.size(1)
x = F.pad(x, (T-1, 0), 'constant', -1)
x = F.max_pool1d(x.unsqueeze(1).float(), # shape into B, C, T
T, # kernel size
1, # stride
0, # padding
1, # dilation
False, # ceil_mode
False, # return indices
)
return x.squeeze(1) # B, Tt
def logsumexp(a, b):
m = torch.max(a, b)
return torch.log(torch.exp(a - m) + torch.exp(b - m))
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.)
return m
class LLControls(nn.Module):
"""
LL based controller
"""
def __init__(self, args, controller_dim):
nn.Module.__init__(self)
self.gate = nn.Linear(controller_dim, 1, bias=True)
nn.init.normal_(self.gate.weight, 0, 1/controller_dim)
nn.init.constant_(self.gate.bias, 0)
self.penalty = args.oracle_penalty
self.write_right = args.write_right
def get_positions_proba(self, rw_logits):
"""
Inputs:
rw_logits [log(rho), log(1-rho)] : (Tt, B, Ts, 2)
Returns the probabilities of being at position (t,j) (Tt, B, Ts)
"""
Tt, B, Ts, _ = rw_logits.size()
Mr1 = rw_logits[0:1,:,:-1,0].exp()
Mc1 = rw_logits[:,:,0:1,1].exp()
M = rw_logits[1:,:,:-1,0].exp() + rw_logits[:-1,:,1:,1].exp()
M = torch.cat((Mr1, M), dim=0)
M = torch.cat((Mc1, M), dim=-1)
return M
def predict_read_write(self, x):
""" Returns log(rho), log(1-rho) in B, Tt, Ts, 2 """
x = self.gate(x)
s = F.logsigmoid(x)
return torch.cat((s, s-x), dim=-1).float()
def forward(self, observations, scores):
"""
Inputs:
observations : Input for the controller: B, Tt, Ts, C
Scores : log p(y_t | x<j) : B, Tt, Ts
"""
controls = self.predict_read_write(observations) # B,Tt,Ts,2
B, Tt, Ts = scores.size()
with torch.no_grad():
if self.penalty:
# Penalize large contexts:
indices = torch.arange(
Ts,
dtype=scores.dtype,
device=scores.device
) / Ts
scores = scores - self.penalty * indices.unsqueeze(0).unsqueeze(0)
best_context = scores.max(-1)[1] # B, Tt
best_context = progressive_max(best_context).type_as(best_context)
AP = best_context.float().mean(dim=1) / Ts
print('AP:', ' '.join(map(lambda x: '{:.2f}'.format(x), AP.tolist())))
gamma = torch.zeros_like(scores).scatter_(-1, best_context.unsqueeze(-1), 1.0) # B, Tt, Ts
if self.write_right:
gamma = gamma.cumsum(dim=-1)
# Write beyond the ideal context
if self.write_right:
write = gamma[:, 1:] # B, Tt-1, Ts
else:
write = gamma[:, 1:].cumsum(dim=-1) # B, Tt-1, Ts
read = 1 - write
return controls[:, :-1], gamma, read, write
|
examples/scripts/10_custom_backends.py
|
buildfail/frontera
| 1,267 |
87186
|
"""
Custom backend example
"""
import random
from frontera import FrontierManager, Settings, FrontierTester, graphs
from frontera.contrib.backends.memory import MemoryBaseBackend
SITE_LIST = [
[('http://google.com', [])],
[('http://scrapinghub.com', [])],
[('http://zynga.com', [])],
[('http://microsoft.com', [])],
[('http://apple.com', [])],
]
class AlphabeticSortBackend(MemoryBaseBackend):
"""
Custom backend that sort pages alphabetically from url
"""
name = 'Alphabetic domain name sort backend'
def _compare_pages(self, first, second):
return cmp(first.url, second.url)
class RandomSortBackend(MemoryBaseBackend):
"""
Custom backend that sort pages randomly
"""
name = 'Random sort backend'
def _compare_pages(self, first, second):
return random.choice([-1, 0, 1])
def test_backend(backend):
# Graph
graph = graphs.Manager()
graph.add_site_list(SITE_LIST)
# Frontier
settings = Settings()
settings.BACKEND = backend
settings.LOGGING_MANAGER_ENABLED = True
settings.LOGGING_BACKEND_ENABLED = True
settings.LOGGING_DEBUGGING_ENABLED = False
frontier = FrontierManager.from_settings(settings)
print '-'*80
print frontier.backend.name
print '-'*80
# Tester
tester = FrontierTester(frontier, graph)
tester.run()
# Show crawling sequence
for page in tester.sequence:
print page.url
if __name__ == '__main__':
test_backend('10_custom_backends.AlphabeticSortBackend')
test_backend('10_custom_backends.RandomSortBackend')
|
tests/__init__.py
|
JKitok/girder
| 395 |
87191
|
# -*- coding: utf-8 -*-
import unittest.mock
from girder import constants, logger
# Mock the logging methods so that we don't actually write logs to disk,
# and so tests can potentially inspect calls to logging methods.
print(constants.TerminalColor.warning('Mocking Girder log methods.'))
for handler in logger.handlers:
handler.emit = unittest.mock.MagicMock()
|
asreader/custombricks/save_the_best.py
|
rkadlec/asreader
| 113 |
87194
|
<reponame>rkadlec/asreader
__author__ = 'lada'
import os.path
from blocks.extensions import SimpleExtension
from blocks.serialization import secure_dump
SAVED_TO = "saved_to"
class SaveTheBest(SimpleExtension):
"""Check if a log quantity has the minimum/maximum value so far
and if that is true then save a pickled version of the main loop
to the disk.
The pickled main loop can be later reloaded and training can be
resumed.
Makes a `SAVED_TO` record in the log with the serialization destination
in the case of success and ``None`` in the case of failure. The
value of the record is a tuple of paths to which saving was done
(there can be more than one if the user added a condition
with an argument, see :meth:`do` docs).
Parameters
----------
record_name : str
The name of the record to track.
choose_best : callable, optional
A function that takes the current value and the best so far
and return the best of two. By default :func:`min`, which
corresponds to tracking the minimum value.
path : str
The destination path for pickling.
save_separately : list of str, optional
The list of the main loop's attributes to be pickled separately
to their own files. The paths will be formed by adding the
attribute name preceded by an underscore before the before the
`path` extension. The whole main loop will still be pickled
as usual.
use_cpickle : bool
See docs of :func:`~blocks.serialization.dump`.
Attributes
----------
best_name : str
The name of the status record to keep the best value so far.
Notes
-----
Using pickling for saving the whole main loop object comes with
certain limitations:
* Theano computation graphs build in the GPU-mode cannot be used in
the usual mode (and vice-versa). Therefore using this extension
binds you to using only one kind of device.
"""
def __init__(self, record_name, path, choose_best=min,
save_separately=None, use_cpickle=False, **kwargs):
self.record_name = record_name
self.best_name = "bestsave_" + record_name
self.choose_best = choose_best
if not save_separately:
save_separately = []
self.path = path
self.save_separately = save_separately
self.use_cpickle = use_cpickle
# kwargs.setdefault("after_training", True)
kwargs.setdefault("after_epoch", True)
super(SaveTheBest, self).__init__(**kwargs)
def save_separately_filenames(self, path):
""" Compute paths for separately saved attributes.
Parameters
----------
path : str
Path to which the main savethebest file is being saved.
Returns
-------
paths : dict
A dictionary mapping attribute names to derived paths
based on the `path` passed in as an argument.
"""
root, ext = os.path.splitext(path)
return {attribute: root + "_" + attribute + ext
for attribute in self.save_separately}
def do(self, which_callback, *args):
current_value = self.main_loop.log.current_row.get(self.record_name)
if current_value is None:
return
best_value = self.main_loop.status.get(self.best_name, None)
if(best_value is None or
(current_value != best_value and
self.choose_best(current_value, best_value) == current_value)):
self.main_loop.status[self.best_name] = current_value
# save main_loop
_, from_user = self.parse_args(which_callback, args)
try:
path = self.path
if from_user:
path, = from_user
secure_dump(self.main_loop, path, use_cpickle=self.use_cpickle)
filenames = self.save_separately_filenames(path)
for attribute in self.save_separately:
secure_dump(getattr(self.main_loop, attribute),
filenames[attribute], use_cpickle=self.use_cpickle)
except Exception:
path = None
raise
finally:
already_saved_to = self.main_loop.log.current_row.get(SAVED_TO, ())
self.main_loop.log.current_row[SAVED_TO] = (already_saved_to +
(path,))
import logging
logger = logging.getLogger(__name__)
|
infer_tools_tree/utils.py
|
gvieralopez/Coronary-Artery-Tracking-via-3D-CNN-Classification
| 107 |
87195
|
import numpy as np
import warnings
from scipy.ndimage.interpolation import zoom
import torch
import math
import copy
import cv2
from skimage import measure
import pandas as pd
def resample(imgs, spacing, new_spacing, order=2):
if len(imgs.shape) == 3:
new_shape = np.round(imgs.shape * spacing / new_spacing)
true_spacing = spacing * imgs.shape / new_shape
resize_factor = new_shape / imgs.shape
with warnings.catch_warnings():
warnings.simplefilter("ignore")
imgs = zoom(imgs, resize_factor, mode='nearest', order=order)
return imgs, true_spacing, resize_factor
elif len(imgs.shape) == 4:
n = imgs.shape[-1]
newimg = []
for i in range(n):
slice = imgs[:, :, :, i]
newslice, true_spacing = resample(slice, spacing, new_spacing)
newimg.append(newslice)
newimg = np.transpose(np.array(newimg), [1, 2, 3, 0])
return newimg, true_spacing
else:
raise ValueError('wrong shape')
def get_start_ind(center_points):
curr_x = center_points[0][0]
curr_y = center_points[0][1]
curr_z = center_points[0][2]
curr_r = 3
start_ind = -1
ellipsis = 0.1
for i in range(1, len(center_points)):
v1 = np.array([curr_x, curr_y, curr_z])
v2 = np.array([center_points[i][0], center_points[i][1], center_points[i][2]])
dist = np.linalg.norm(v1 - v2)
if (dist - curr_r) <= ellipsis and dist >= curr_r:
start_ind = i
break
return start_ind
def get_spacing_res2(x, spacing_x, spacing_new):
return int(round((x / spacing_x) * spacing_new))
def get_world_cood(x, spacing_x, spacing_new):
return (x / spacing_new) * spacing_x
def data_preprocess(img):
mean_intensity = np.mean(img)
std_intensity = np.std(img)
upper_bound = np.percentile(img, 99.5)
lower_bound = np.percentile(img, 00.5)
img = np.clip(img, lower_bound, upper_bound)
# 防止除0
img = (img - mean_intensity) / (std_intensity + 1e-9)
img = np.array([img])
img = torch.from_numpy(img)
return img.unsqueeze(0)
def get_shell(fl_Num_Points, fl_Radius):
x_list = []
y_list = []
z_list = []
offset = 2.0 / fl_Num_Points
increment = math.pi * (3.0 - math.sqrt(5.0))
for i in range(fl_Num_Points):
z = ((i * offset) - 1.0) + (offset / 2.0)
r = math.sqrt(1.0 - pow(z, 2.0))
phi = ((i + 1) % fl_Num_Points) * increment
x = math.cos(phi) * r
y = math.sin(phi) * r
x_list.append(fl_Radius * x)
y_list.append(fl_Radius * y)
z_list.append(fl_Radius * z)
return x_list, y_list, z_list
def prob_terminates(pre_y, max_points):
res = torch.sum(-pre_y * torch.log2(pre_y))
return res / torch.log2(torch.from_numpy(np.array([max_points])).float())
def get_closer_distance(vessel, target_point):
min_dis = float("inf")
for i in range(len(vessel)):
curr_point = vessel[i]
dist = np.linalg.norm(target_point - curr_point)
if dist < min_dis:
min_dis = dist
index = i
return min_dis, index
def get_distance(v1, v2):
return np.linalg.norm(v1 - v2)
def get_angle(v1, v2):
cosangle = v1.dot(v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
cosangle = np.clip(cosangle, -1, 1)
return math.degrees(np.arccos(cosangle))
def save_info(res: list, path: str):
x_list = []
y_list = []
z_list = []
for i in range(len(res)):
x_list.append(res[i][0][0])
y_list.append(res[i][0][1])
z_list.append(res[i][0][2])
dataframe = pd.DataFrame(
{'x': x_list, 'y': y_list, 'z': z_list})
dataframe.to_csv(path, index=False,
columns=['x', 'y', 'z'], sep=',',float_format='%.5f')
def crop_heart(input_arr):
'''
In order to remove the influence of pulmonary vessels, we will use threshold method to segment the heart region
:param input_arr: image arr
:return: Data after removing lung areas
'''
src_array = copy.deepcopy(input_arr)
z, w, h = src_array.shape
new_arr = np.zeros((z, w, h))
new_arr += -1000
sum_minr = 0
sum_minc = 0
sum_maxr = 0
sum_maxc = 0
for k in range(z):
image = src_array[k][:, :]
ret, thresh = cv2.threshold(image, 20, 400, cv2.THRESH_BINARY)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, anchor=(-1, -1), iterations=4)
label_opening = measure.label(opening)
regionprops = measure.regionprops(label_opening)
max_area = 0
index = 0
for i in range(len(regionprops)):
if regionprops[i].area > max_area:
max_area = regionprops[i].area
index = i
minr, minc, maxr, maxc = regionprops[index].bbox
new_arr[k][minr:maxr, minc:maxc] = src_array[k][minr:maxr, minc:maxc]
sum_minr += minr
sum_minc += minc
sum_maxr += maxr
sum_maxc += maxc
mean_minr = sum_minr // z
meam_minc = sum_minc // z
mean_maxr = sum_maxr // z
mean_maxc = sum_maxc // z
return new_arr, meam_minc, mean_minr, mean_maxc, mean_maxr
|
Python/demos/d04_SimpleReconstruction.py
|
tsadakane/TIGRE
| 326 |
87220
|
<gh_stars>100-1000
#%% Demo 4: Simple Image reconstruction
#
#
# This demo will show how a simple image reconstruction can be performed,
# by using OS-SART and FDK
#
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# This file is part of the TIGRE Toolbox
#
# Copyright (c) 2015, University of Bath and
# CERN-European Organization for Nuclear Research
# All rights reserved.
#
# License: Open Source under BSD.
# See the full license at
# https://github.com/CERN/TIGRE/blob/master/LICENSE
#
# Contact: <EMAIL>
# Codes: https://github.com/CERN/TIGRE/
# Coded by: <NAME>
# --------------------------------------------------------------------------
#%%Initialize
import tigre
import numpy as np
from tigre.utilities import sample_loader
from tigre.utilities import CTnoise
import tigre.algorithms as algs
#%% Geometry
geo = tigre.geometry_default(high_resolution=False)
#%% Load data and generate projections
# define angles
angles = np.linspace(0, 2 * np.pi, 100)
# Load thorax phatom data
head = sample_loader.load_head_phantom(geo.nVoxel)
# generate projections
projections = tigre.Ax(head, geo, angles)
# add noise
noise_projections = CTnoise.add(projections, Poisson=1e5, Gaussian=np.array([0, 10]))
#%% Reconstruct image using OS-SART and FDK
# FDK
imgFDK = algs.fdk(noise_projections, geo, angles)
# OS-SART
niter = 50
imgOSSART = algs.ossart(noise_projections, geo, angles, niter)
#%% Show the results
tigre.plotimg(np.concatenate([imgFDK, imgOSSART], axis=1), dim="z")
|
contrib/0.挖宝行动/youzidata-机坪跑道航空器识别/src/data/yolo_dataset.py
|
huaweicloud/ModelArts-Lab
| 1,045 |
87232
|
# Copyright 2018 Deep Learning Service of Huawei Cloud. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import, division, print_function
import os
import numpy as np
from moxing.framework import file
from data.yolo_load.detection_dataset import Detection_dataset
from utils.read_image_to_list import get_image_list
from mxnet import gluon, io, nd
def _pad_arrs_to_max_length(arrs, max_gt_box_number, pad_axis=0, pad_val=-1):
"""Inner Implementation of the Pad batchify"""
if not isinstance(arrs[0], (nd.NDArray, np.ndarray)):
arrs = [np.asarray(ele) for ele in arrs]
max_size = max_gt_box_number
ret_shape = list(arrs[0].shape)
ret_shape[pad_axis] = max_size
ret_shape = (len(arrs), ) + tuple(ret_shape)
ret = nd.full(shape=ret_shape, val=pad_val, dtype=arrs[0].dtype)
for i, arr in enumerate(arrs):
if arr.shape[pad_axis] == max_size:
ret[i] = arr
else:
slices = [slice(None) for _ in range(arr.ndim)]
slices[pad_axis] = slice(0, arr.shape[pad_axis])
slices = [slice(i, i + 1)] + slices
ret[tuple(slices)] = arr
return ret
class _train_batchify_fn(object):
def __init__(self, max_gt_box_number):
self._max_gt_box_number = max_gt_box_number
def __call__(self, data):
"""Collate train data into batch."""
img_data = nd.stack(*[item[0] for item in data])
center_targets = nd.stack(*[item[1] for item in data])
scale_targets = nd.stack(*[item[2] for item in data])
weights = nd.stack(*[item[3] for item in data])
objectness = nd.stack(*[item[4] for item in data])
class_targets = nd.stack(*[item[5] for item in data])
gt_bboxes = _pad_arrs_to_max_length([item[6] for item in data],
self._max_gt_box_number,
pad_axis=0, pad_val=-1)
batch_data = io.DataBatch(data=[img_data],
label=[gt_bboxes, objectness, center_targets,
scale_targets, weights, class_targets])
return batch_data
class _val_batchify_fn(object):
def __init__(self, max_gt_box_number):
self._max_gt_box_number = max_gt_box_number
def __call__(self, data):
"""Collate train data into batch."""
img_data = nd.stack(*[item[0] for item in data])
gt_bboxes = _pad_arrs_to_max_length([item[1] for item in data],
self._max_gt_box_number,
pad_axis=0, pad_val=-1)
batch_data = io.DataBatch(data=[img_data],
label=[gt_bboxes])
return batch_data
def _get_provide_data(next_batch):
next_data = next_batch.data
return [io.DataDesc(name='data', shape=next_data[0].shape)]
def _get_provide_label(next_batch, gt_boxes_shape=(32, 56, 4), is_train=True):
next_label = next_batch.label
if is_train:
provide_label = [io.DataDesc(name='gt_boxes',
shape=next_label[0].shape),
io.DataDesc(name='obj_t', shape=next_label[1].shape),
io.DataDesc(name='centers_t',
shape=next_label[2].shape),
io.DataDesc(name='scales_t',
shape=next_label[3].shape),
io.DataDesc(name='weights_t',
shape=next_label[4].shape),
io.DataDesc(name='clas_t', shape=next_label[5].shape)]
else:
provide_label = None
return provide_label
def _reset():
pass
def get_data_iter(data_path, train_file=None, val_file=None, split_spec=1,
hyper_train={}, hyper_val={}, **kwargs):
train_set = None
val_set = None
train_list = None
val_list = None
if train_file is not None:
assert file.exists(train_file), 'not found train file'
train_path = file.read(train_file).split("\n")[0:-1]
train_list = [path.replace('\r', '').split(' ') for path in train_path]
train_list = [[os.path.join(data_path, path[0]),
os.path.join(data_path, path[1])] for path in train_list]
if val_file is not None:
assert file.exists(val_file), 'not found val file'
val_path = file.read(val_file).split("\n")[0:-1]
val_list = [path.replace('\r', '').split(' ') for path in val_path]
val_list = [[os.path.join(data_path, path[0]),
os.path.join(data_path, path[1])] for path in val_list]
if train_file is None and val_file is None:
train_list, val_list, _ = get_image_list(data_path, split_spec)
if 'anchors' not in kwargs:
kwargs['anchors'] = [[116, 90, 156, 198, 373, 326],
[30, 61, 62, 45, 59, 119],
[10, 13, 16, 30, 33, 23]]
if 'offsets' not in kwargs:
kwargs['offsets'] = [(13, 13), (26, 26), (52, 52)]
if train_list is not None and len(train_list) > 0:
dataset = Detection_dataset(img_list=train_list,
index_file=hyper_train.get(
'index_file', None),
width=hyper_train.get('width', 416),
height=hyper_train.get('height', 416),
is_train=True,
** kwargs)
max_gt_box_number = max([len(item) for item in dataset.label_cache])
batch_size = hyper_train.get('batch_size', 32)
train_set = gluon.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=hyper_train.get('shuffle', True),
batchify_fn=_train_batchify_fn(max_gt_box_number),
last_batch='rollover',
num_workers=hyper_train.get('preprocess_threads', 4))
next_data_batch = next(iter(train_set))
setattr(train_set, 'reset', _reset)
setattr(train_set, 'provide_data', _get_provide_data(next_data_batch))
setattr(train_set, 'provide_label', _get_provide_label(
next_data_batch, (batch_size, max_gt_box_number, 4), is_train=True))
if val_list is not None and len(val_list) > 0:
assert 'index_file' in hyper_val and file.exists(
hyper_val['index_file']), 'not found label name file'
dataset = Detection_dataset(img_list=val_list,
index_file=hyper_val.get(
'index_file'),
width=hyper_val.get('width', 416),
height=hyper_val.get('height', 416),
is_train=False,
** kwargs)
max_gt_box_number = max([len(item) for item in dataset.label_cache])
batch_size = hyper_val.get('batch_size', 32)
val_set = gluon.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=hyper_val.get('shuffle', True),
batchify_fn=_val_batchify_fn(max_gt_box_number),
last_batch='keep',
num_workers=hyper_val.get('preprocess_threads', 4))
next_data_batch = next(iter(val_set))
setattr(val_set, 'reset', _reset)
setattr(val_set, 'provide_data', _get_provide_data(next_data_batch))
setattr(val_set, 'provide_label', _get_provide_label(
next_data_batch, is_train=False))
return train_set, val_set
|
hardware/chip/rtl872xd/hal/hal_test/uart/ucube.py
|
wstong999/AliOS-Things
| 4,538 |
87281
|
src = Split('''
uart_test.c
''')
component = aos_component('uart_test', src)
component.add_cflags('-Wall')
component.add_cflags('-Werror')
|
pad__unpad__example.py
|
DazEB2/SimplePyScripts
| 117 |
87284
|
<gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
def pad(s: bytes, bs=8) -> bytes:
pad_size = bs - (len(s) % bs)
return s + bytes([pad_size] * pad_size)
def unpad(s: bytes) -> bytes:
pad_size = s[-1]
return s[:-pad_size]
if __name__ == '__main__':
data = b'Hello'
padded_data = pad(data)
print(padded_data) # b'Hello\x03\x03\x03'
print(unpad(padded_data)) # b'Hello'
print(unpad(padded_data).decode('utf-8')) # Hello
assert data == unpad(pad(data))
print()
assert b'123' == unpad(pad(b'123'))
assert b'123' * 9999 == unpad(pad(b'123' * 9999))
assert b'11111111' == unpad(pad(b'11111111'))
assert b'abcd123' == unpad(pad(b'abcd123'))
print(unpad(b'12\x02\x02')) # b'12'
print(unpad(b'1\x01')) # b'1'
print()
data = 'Привет!'.encode('utf-8')
padded_data = pad(data)
print(padded_data) # b'\xd0\x9f\xd1\x80\xd0\xb8\xd0\xb2\xd0\xb5\xd1\x82!\x03\x03\x03'
print(unpad(padded_data)) # b'\xd0\x9f\xd1\x80\xd0\xb8\xd0\xb2\xd0\xb5\xd1\x82!'
print(unpad(padded_data).decode('utf-8')) # Привет!
assert data == unpad(pad(data))
|
bmtk/simulator/pointnet/pyfunction_cache.py
|
aaberbach/bmtk
| 216 |
87297
|
<reponame>aaberbach/bmtk<gh_stars>100-1000
# Copyright 2017. <NAME>. All rights reserved
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import types
from functools import wraps
class _PyFunctions(object):
"""Structure for holding custom user-defined python functions.
Will store a set of functions created by the user. Should not access this directly but rather user the
decorators or setter functions, and use the py_modules class variable to access individual functions. Is divided
up into
synaptic_weight: functions for calcuating synaptic weight.
cell_model: should return NEURON cell hobj.
synapse model: should return a NEURON synapse object.
"""
def __init__(self):
self.__syn_weights = {}
self.__cell_models = {}
self.__synapse_models = {}
self.__cell_processors = {}
def clear(self):
self.__syn_weights.clear()
self.__cell_models.clear()
self.__synapse_models.clear()
self.__cell_processors.clear()
def add_synaptic_weight(self, name, func, overwrite=True):
"""stores synpatic fuction for given name"""
if overwrite or name not in self.__syn_weights:
self.__syn_weights[name] = func
@property
def synaptic_weights(self):
"""return list of the names of all available synaptic weight functions"""
return self.__syn_weights.keys()
def synaptic_weight(self, name):
"""return the synpatic weight function"""
return self.__syn_weights[name]
def has_synaptic_weight(self, name):
return name in self.__syn_weights
def __cell_model_key(self, directive, model_type):
return (directive, model_type)
def add_cell_model(self, directive, model_type, func, overwrite=True):
key = self.__cell_model_key(directive, model_type)
if overwrite or key not in self.__cell_models:
self.__cell_models[key] = func
@property
def cell_models(self):
return self.__cell_models.keys()
def cell_model(self, directive, model_type):
return self.__cell_models[self.__cell_model_key(directive, model_type)]
def has_cell_model(self, directive, model_type):
return self.__cell_model_key(directive, model_type) in self.__cell_models
def add_synapse_model(self, name, func, overwrite=True):
if overwrite or name not in self.__synapse_models:
self.__synapse_models[name] = func
@property
def synapse_models(self):
return self.__synapse_models.keys()
def synapse_model(self, name):
return self.__synapse_models[name]
@property
def cell_processors(self):
return self.__cell_processors.keys()
def cell_processor(self, name):
return self.__cell_processors[name]
def add_cell_processor(self, name, func, overwrite=True):
if overwrite or name not in self.__syn_weights:
self.__cell_processors[name] = func
def __repr__(self):
rstr = '{}: {}\n'.format('cell_models', self.cell_models)
rstr += '{}: {}\n'.format('synapse_models', self.synapse_models)
rstr += '{}: {}'.format('synaptic_weights', self.synaptic_weights)
return rstr
py_modules = _PyFunctions()
def synaptic_weight(*wargs, **wkwargs):
"""A decorator for registering a function as a synaptic weight function.
To use either::
@synaptic_weight
def weight_function():
...
or::
@synaptic_weight(name='name_in_edge_types')
def weight_function():
...
Once the decorator has been attached and imported the functions will automatically be added to py_modules.
"""
if len(wargs) == 1 and callable(wargs[0]):
# for the case without decorator arguments, grab the function object in wargs and create a decorator
func = wargs[0]
py_modules.add_synaptic_weight(func.__name__, func) # add function assigned to its original name
@wraps(func)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return func_wrapper
else:
# for the case with decorator arguments
assert(all(k in ['name'] for k in wkwargs.keys()))
def decorator(func):
# store the function in py_modules but under the name given in the decorator arguments
py_modules.add_synaptic_weight(wkwargs['name'], func)
@wraps(func)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return func_wrapper
return decorator
def cell_model(*wargs, **wkwargs):
"""A decorator for registering NEURON cell loader functions."""
if len(wargs) == 1 and callable(wargs[0]):
# for the case without decorator arguments, grab the function object in wargs and create a decorator
func = wargs[0]
py_modules.add_cell_model(func.__name__, func) # add function assigned to its original name
@wraps(func)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return func_wrapper
else:
# for the case with decorator arguments
assert(all(k in ['name'] for k in wkwargs.keys()))
def decorator(func):
# store the function in py_modules but under the name given in the decorator arguments
py_modules.add_cell_model(wkwargs['name'], func)
@wraps(func)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return func_wrapper
return decorator
def synapse_model(*wargs, **wkwargs):
"""A decorator for registering NEURON synapse loader functions."""
if len(wargs) == 1 and callable(wargs[0]):
# for the case without decorator arguments, grab the function object in wargs and create a decorator
func = wargs[0]
py_modules.add_synapse_model(func.__name__, func) # add function assigned to its original name
@wraps(func)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return func_wrapper
else:
# for the case with decorator arguments
assert(all(k in ['name'] for k in wkwargs.keys()))
def decorator(func):
# store the function in py_modules but under the name given in the decorator arguments
py_modules.add_synapse_model(wkwargs['name'], func)
@wraps(func)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return func_wrapper
return decorator
def add_weight_function(func, name=None, overwrite=True):
assert(callable(func))
func_name = name if name is not None else func.__name__
py_modules.add_synaptic_weight(func_name, func, overwrite)
def add_cell_model(func, directive, model_type, overwrite=True):
assert(callable(func))
# func_name = name if name is not None else func.__name__
py_modules.add_cell_model(directive, model_type, func, overwrite)
def add_cell_processor(func, name=None, overwrite=True):
assert(callable(func))
func_name = name if name is not None else func.__name__
py_modules.add_cell_processor(func_name, func, overwrite)
def add_synapse_model(func, name=None, overwrite=True):
assert (callable(func))
func_name = name if name is not None else func.__name__
py_modules.add_synapse_model(func_name, func, overwrite)
def load_py_modules(cell_models=None, syn_models=None, syn_weights=None):
# py_modules.clear()
if cell_models is not None:
assert(isinstance(cell_models, types.ModuleType))
for f in [cell_models.__dict__.get(f) for f in dir(cell_models)]:
if isinstance(f, types.FunctionType):
py_modules.add_cell_model(f.__name__, f)
if syn_models is not None:
assert(isinstance(syn_models, types.ModuleType))
for f in [syn_models.__dict__.get(f) for f in dir(syn_models)]:
if isinstance(f, types.FunctionType):
py_modules.add_synapse_model(f.__name__, f)
if syn_weights is not None:
assert(isinstance(syn_weights, types.ModuleType))
for f in [syn_weights.__dict__.get(f) for f in dir(syn_weights)]:
if isinstance(f, types.FunctionType):
py_modules.add_synaptic_weight(f.__name__, f)
|
tests/unit/fixtures/mock_hooks.py
|
troyready/runway
| 134 |
87306
|
"""Mock hooks."""
import os
GLOBAL_VALUE = os.getenv("AWS_DEFAULT_REGION")
|
test/issues/test_012.py
|
ajnelson-nist/pySHACL
| 167 |
87336
|
<reponame>ajnelson-nist/pySHACL
# -*- coding: utf-8 -*-
#
"""
https://github.com/RDFLib/pySHACL/issues/12
"""
from pyshacl import validate
shacl_file_text = """
@prefix hei: <http://hei.org/customer/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix sh: <http://www.w3.org/ns/shacl#> .
@prefix xml: <http://www.w3.org/XML/1998/namespace> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
hei:HeiAddressShape a sh:NodeShape ;
sh:property [ rdfs:comment "Street constraint" ;
sh:datatype xsd:string ;
sh:minLength 30 ;
sh:path hei:Ship_to_street ] ;
sh:targetClass hei:Hei_customer .
"""
data_file_text = """
@prefix hei: <http://hei.org/customer/> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
hei:hei_cust_1281 a hei:Hei_customer ;
rdfs:label "XYZHorecagroothandel" ;
hei:Klant_nummer 1281 ;
hei:Ship_to_City "Middenmeer" ;
hei:Ship_to_postcode "1799 AB" ;
hei:Ship_to_street "Industrieweg" .
"""
def test_012_text():
res = validate(data_file_text, shacl_graph=shacl_file_text,
data_graph_format='turtle', shacl_graph_format='turtle',
inference='both', debug=True)
conforms, graph, string = res
assert not conforms
def test_012_graph():
from rdflib import Graph
g = Graph()
g.parse(data=data_file_text, format='turtle')
sg = Graph()
sg.parse(data=shacl_file_text, format='turtle')
res = validate(g, shacl_graph=sg, inference='both', debug=True)
conforms, graph, string = res
assert not conforms
|
dev/mixtures/Table2.9_to_JSON.py
|
pauliacomi/CoolProp
| 520 |
87393
|
<reponame>pauliacomi/CoolProp
data = """CO2-H2O 1.030538 0.828472 1.021392 0.895156 1
CO2-N2 0.994140013 1.107654104 1.022709642 1.047578256 1
CO2-O2 1.000000 1.031986 1.000000 1.084460 0
CO2-Ar 1.027147 0.968781 1.001378 1.029710 1
CO2-CO 0.993245 1.068392 1.030855 1.245499 0
H2O-N2 0.954149 0.805147 1.079628 0.733443 1
H2O-O2 0.798046 0.807842 0.972576 0.873460 0.6017
H2O-Ar 0.679104 0.921000 0.940398 1.050952 0
H2O-CO 1.045927 0.823984 1.063348 0.766756 0.9897
N2-O2 0.997190589 0.995157044 0.999521770 0.997082328 0
N2-Ar 0.999442 0.989311 1.006697 1.001549 0
N2-CO 1.002409 0.994100 1.000000 1.001317 0
O2-Ar 0.999039 0.988822 1.006502 1.001341 0
O2-CO 1.000000 1.000000 1.000000 1.000000 0
CO-Ar 1.000000000 0.954215746 1.000000000 1.159720623 0"""
namedict = dict(O2='Oxygen', N2='Nitrogen', CO2='CarbonDioxide', CO='CarbonMonoxide', H2O='Water', Ar='Argon')
import CoolProp
CASdict = {namedict[n]: CoolProp.CoolProp.get_fluid_param_string(namedict[n], "CAS") for n in namedict}
functiondict = {'CO2-H2O': 'CarbonDioxide-Water',
'CO2-N2': 'CarbonDioxide-Nitrogen',
'CO2-Ar': 'CarbonDioxide-Argon',
'H2O-N2': 'GeneralizedAirWater',
'H2O-O2': 'GeneralizedAirWater',
'H2O-CO': 'GeneralizedAirWater'}
out = []
for line in data.split('\n'):
pair, betaT, betaV, gammaT, gammaV, F = line.split(' ')
n1, n2 = pair.split('-')
out.append(dict(BibTeX='Gernert-Thesis-2013',
F=float(F),
betaT=float(betaT),
betaV=float(betaV),
gammaT=float(gammaT),
gammaV=float(gammaV),
Name1=namedict[n1],
Name2=namedict[n2],
CAS1=CASdict[namedict[n1]],
CAS2=CASdict[namedict[n2]]))
if F != '0':
out[-1]['function'] = functiondict[pair]
import json, sys
sys.path.append('..')
from package_json import json_options
print(json.dumps(out, **json_options))
|
research/cvt_text/corpus_processing/example.py
|
873040/Abhishek
| 82,518 |
87413
|
<filename>research/cvt_text/corpus_processing/example.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for training examples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from base import embeddings
CONTRACTION_WORDS = set(w + 'n' for w in
['do', 'does', 'did', 'is', 'are', 'was', 'were', 'has',
'have', 'had', 'could', 'would', 'should', 'ca', 'wo',
'ai', 'might'])
class Example(object):
def __init__(self, words, word_vocab, char_vocab):
words = words[:]
# Fix inconsistent tokenization between datasets
for i in range(len(words)):
if (words[i].lower() == '\'t' and i > 0 and
words[i - 1].lower() in CONTRACTION_WORDS):
words[i] = words[i - 1][-1] + words[i]
words[i - 1] = words[i - 1][:-1]
self.words = ([embeddings.START] +
[word_vocab[embeddings.normalize_word(w)] for w in words] +
[embeddings.END])
self.chars = ([[embeddings.MISSING]] +
[[char_vocab[c] for c in embeddings.normalize_chars(w)]
for w in words] +
[[embeddings.MISSING]])
def __repr__(self,):
inv_char_vocab = embeddings.get_inv_char_vocab()
return ' '.join([''.join([inv_char_vocab[c] for c in w])
for w in self.chars])
|
tests/test_message.py
|
xjiro/python-valve
| 136 |
87421
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
# Copyright (C) 2013 <NAME>
from __future__ import (absolute_import,
unicode_literals, print_function, division)
import inspect
try:
from mock import Mock
except ImportError:
from unittest.mock import Mock
import pytest
import six
from valve.source import messages
class TestUseDefault(object):
def test_pass_value(self):
instance = messages.MessageField("", optional=False, default_value=5)
called = []
@messages.use_default
def test(instance, value, values):
called.append(None)
assert value == 5
test(instance, 5)
assert called
def test_nonoptional_no_value(self):
instance = messages.MessageField("", optional=False, default_value=5)
called = []
@messages.use_default
def test(instance, value, values):
called.append(None)
assert value == 5
with pytest.raises(ValueError):
test(instance)
assert not called
def test_optional_pass_value(self):
instance = messages.MessageField("", optional=True, default_value=5)
called = []
@messages.use_default
def test(instance, value, values):
called.append(None)
assert value == 10
test(instance, 10)
assert called
def test_optional_no_value(self):
instance = messages.MessageField("", optional=True, default_value=5)
called = []
@messages.use_default
def test(instance, value, values):
called.append(None)
assert value == 5
test(instance)
assert called
class TestNeedsBuffer(object):
def test_not_empty(self):
called = []
@messages.needs_buffer
def test(instance, buf, values):
called.append(None)
test(None, b"...", {})
assert called
def test_empty(self):
called = []
@messages.needs_buffer
def test(instance, buf, values):
called.append(None)
with pytest.raises(messages.BufferExhaustedError):
test(None, b"", {})
assert not called
class TestMessageField(object):
def test_default_little_endian(self):
class TestField(messages.MessageField):
fmt = "i"
assert TestField("").format.startswith("<")
def test_explicit_endian(self):
for fmt in "!<>=@":
TestField = type("TestField" if six.PY3 else b"TestField",
(messages.MessageField,), {"fmt": fmt})
assert TestField("").format.startswith(fmt)
def test_validate(self):
validators = [
Mock(side_effect=lambda x: x == 5),
Mock(side_effect=lambda x: isinstance(x, int))
]
field = messages.MessageField("", validators=validators)
field.validate(5)
for validator in validators:
assert validator.called
with pytest.raises(messages.BrokenMessageError):
field.validate("10")
def test_validate_exception(self):
field = messages.MessageField("", validators=[Mock(side_effect=Exception)])
with pytest.raises(messages.BrokenMessageError):
field.validate(5)
def test_decode_empty(self):
field = messages.MessageField("")
with pytest.raises(messages.BufferExhaustedError):
field.decode(b"")
def test_decode_small_buffer(self):
field = messages.MessageField("")
field.format = b"<d" # 8 bytes
with pytest.raises(messages.BufferExhaustedError):
field.decode(b"\x00\x00\x00\x00\x00\x00\x00")
def test_decode(self):
field = messages.MessageField("")
field.format = b"<B" # 1 byte
value, remnants = field.decode(b"\xFF\x01\x02\x03")
assert value == 255
assert isinstance(remnants, bytes)
assert remnants == b"\x01\x02\x03"
def test_decode_junk(self, monkeypatch):
field = messages.MessageField("")
field.format = b"B"
unpack = Mock(side_effect=messages.struct.error)
monkeypatch.setattr(messages.struct, "unpack", unpack)
with pytest.raises(messages.BrokenMessageError):
field.decode(b"\x01\x02\x03")
@pytest.mark.parametrize("field,value,expected", [
(messages.ByteField, 26, b"\x1A"),
(messages.ShortField, 4056, b"\xD8\x0F"),
(messages.LongField, 2394838, b"\xD6\x8A\x24\x00"),
(messages.FloatField, 1.0, b"\x00\x00\x80\x3F"),
(messages.MSAddressEntryPortField, 6969, b"\x1B\x39")
])
def test_encode(self, field, value, expected):
encoded = field("").encode(value)
assert isinstance(encoded, bytes)
assert encoded == expected
@pytest.mark.parametrize("field,value", [
(messages.ByteField, -1),
(messages.ByteField, 256),
(messages.ShortField, -32769),
(messages.ShortField, 32768),
(messages.LongField, -2147483649),
(messages.LongField, 2147483648),
(messages.MSAddressEntryPortField, -1),
(messages.MSAddressEntryPortField, 65536)
])
def test_encode_out_of_range(self, field, value):
with pytest.raises(messages.BrokenMessageError):
field("").encode(value)
class TestStringField(object):
def test_encode(self):
field = messages.StringField("")
encoded = field.encode("Hello")
assert isinstance(encoded, bytes)
assert encoded.endswith(b"\x00")
assert encoded[:-1] == b"\x48\x65\x6C\x6C\x6F"
def test_decode(self):
field = messages.StringField("")
encoded = b"\x48\x65\x6C\x6C\x6F\x00\x02\x01\x00"
decoded, remnants = field.decode(encoded)
assert isinstance(decoded, six.text_type)
assert decoded == "Hello"
assert isinstance(remnants, bytes)
assert remnants == b"\x02\x01\x00"
def test_decode_empty(self):
field = messages.StringField("")
with pytest.raises(messages.BufferExhaustedError):
field.decode(b"")
def test_no_null_terminator(self):
field = messages.StringField("")
with pytest.raises(messages.BufferExhaustedError):
field.decode(b"\xFF\xFF\xFF")
class TestMessageArrayField(object):
@pytest.fixture
def Message(self):
"""Simple message with a byte field and short filed"""
class Message(messages.Message):
fields = (
messages.ByteField("byte"),
messages.ShortField("short")
)
return Message
def test_constant_count(self):
array = messages.MessageArrayField("", None, 5)
assert array.count() == 5
assert array.count.minimum == 5
def test_callable_count(self):
def function(values={}):
pass
array = messages.MessageArrayField("", None, function)
assert array.count is function
def test_decode_constant(self):
class Message(messages.Message):
fields = messages.ByteField("field"),
array = messages.MessageArrayField("", Message, 5)
encoded = b"\x00\x01\x02\x03\x04\x00\x00\x00"
values, remnants = array.decode(encoded)
for sub_message, expected in zip(values, range(4)):
assert sub_message["field"] == expected
assert isinstance(remnants, bytes)
assert remnants == b"\x00\x00\x00"
def test_decode_insufficient_buffer(self):
class Message(messages.Message):
fields = messages.ByteField("field"),
array = messages.MessageArrayField("", Message, 5)
encoded = b"\xFF\xFE\xFD"
with pytest.raises(messages.BrokenMessageError):
array.decode(encoded)
def test_decode_minimum(self):
class Message(messages.Message):
fields = messages.ByteField("field"),
array = messages.MessageArrayField("", Message, 5)
array.count.minimum = 2
encoded = b"\x00\x01"
values, remnants = array.decode(encoded) # Minimum
for sub_message, expected in zip(values, range(1)):
assert sub_message["field"] == expected
assert not remnants
encoded += b"\x02\x03\x04"
values, remnants = array.decode(encoded) # Maximum
for sub_message, expected in zip(values, range(4)):
assert sub_message["field"] == expected
assert not remnants
def test_decode_minimum_remnants(self):
class Message(messages.Message):
fields = messages.ShortField("field"),
array = messages.MessageArrayField("", Message, 3)
array.count.minimum = 2
# Two shorts and a trailing byte
encoded = b"\x00\x00\x11\x11\x22"
values, remnants = array.decode(encoded)
for sub_message, expected in zip(values, [0, 0x1111]):
assert sub_message["field"] == expected
assert isinstance(remnants, bytes)
assert remnants == b"\x22"
def test_deocde_value_of(self):
assert messages.MessageArrayField.value_of("f")({"f": 26}) == 26
def test_deocde_all(self):
class Message(messages.Message):
fields = messages.ByteField(""),
array = messages.MessageArrayField(
"", Message, messages.MessageArrayField.all())
values, remnants = array.decode(b"\x00" * 128)
assert len(values) == 128
assert not remnants
def test_deocde_all_remnants(self):
class Message(messages.Message):
fields = messages.ShortField(""),
array = messages.MessageArrayField(
"", Message, messages.MessageArrayField.all())
values, remnants = array.decode((b"\x00\x00" * 64) + b"\xFF")
assert len(values) == 64
assert isinstance(remnants, bytes)
assert remnants == b"\xFF"
def test_deocde_at_least_minimum(self):
class Message(messages.Message):
fields = messages.ByteField(""),
array = messages.MessageArrayField(
"", Message, messages.MessageArrayField.at_least(5))
values, remnants = array.decode(b"\x00" * 5)
assert len(values) == 5
assert not remnants
def test_decode_at_least_more(self):
class Message(messages.Message):
fields = messages.ByteField(""),
array = messages.MessageArrayField(
"", Message, messages.MessageArrayField.at_least(5))
values, remnants = array.decode(b"\x00" * 10)
assert len(values) == 10
assert not remnants
def test_deocde_at_least_too_few(self):
class Message(messages.Message):
fields = messages.ByteField(""),
array = messages.MessageArrayField(
"", Message, messages.MessageArrayField.at_least(5))
with pytest.raises(messages.BrokenMessageError):
array.decode(b"\x00" * 4)
def test_deocde_at_least_remnants(self):
class Message(messages.Message):
fields = messages.ShortField(""),
array = messages.MessageArrayField(
"", Message, messages.MessageArrayField.at_least(5))
values, remnants = array.decode((b"\x00\x00" * 10) + b"\xFF")
assert len(values) == 10
assert isinstance(remnants, bytes)
assert remnants == b"\xFF"
def test_encode(self, Message):
array = messages.MessageArrayField("", Message, 3)
elements = [Message(byte=255, short=0x11AA)] * 3
encoded = array.encode(elements)
assert isinstance(encoded, bytes)
assert encoded == elements[0].encode() * 3
def test_encode_invalid_element(self):
class Element(messages.Message):
fields = ()
class Borked(messages.Message):
fields = ()
array = messages.MessageArrayField("", Element, 3)
with pytest.raises(messages.BrokenMessageError):
array.encode([Borked()])
def test_encode_too_many_elements(self, Message):
array = messages.MessageArrayField("", Message, 3)
elements = [Message(byte=255, short=0x11AA)] * 5
with pytest.raises(messages.BrokenMessageError):
array.encode(elements)
def test_encode_too_few_elements(self, Message):
array = messages.MessageArrayField("", Message, 5)
elements = [Message(byte=255, short=0x11AA)] * 3
with pytest.raises(messages.BrokenMessageError):
array.encode(elements)
def test_encode_all(self, Message):
array = messages.MessageArrayField("", Message)
elements = [Message(byte=255, short=0x11AA)] * 10
encoded = array.encode(elements)
assert isinstance(encoded, bytes)
assert encoded == elements[0].encode() * 10
def test_encode_all_none(self, Message):
array = messages.MessageArrayField("", Message)
encoded = array.encode([])
assert isinstance(encoded, bytes)
assert len(encoded) == 0
def test_encode_value_of(self, Message):
array = messages.MessageArrayField(
"", Message, messages.MessageArrayField.value_of("life"))
elements = [Message(byte=255, short=0x11AA)] * 5
encoded = array.encode(elements, {"life": 5})
assert isinstance(encoded, bytes)
assert encoded == elements[0].encode() * 5
def test_encode_at_least_minimum(self, Message):
array = messages.MessageArrayField(
"", Message, messages.MessageArrayField.at_least(3))
elements = [Message(byte=255, short=0x11AA)] * 3
encoded = array.encode(elements)
assert isinstance(encoded, bytes)
assert encoded == elements[0].encode() * 3
def test_encode_at_least_more(self, Message):
array = messages.MessageArrayField(
"", Message, messages.MessageArrayField.at_least(3))
elements = [Message(byte=255, short=0x11AA)] * 5
encoded = array.encode(elements)
assert isinstance(encoded, bytes)
assert encoded == elements[0].encode() * 5
def test_encode_at_least_too_few(self, Message):
array = messages.MessageArrayField(
"", Message, messages.MessageArrayField.at_least(5))
elements = [Message(byte=255, short=0x11AA)] * 4
with pytest.raises(messages.BrokenMessageError):
encoded = array.encode(elements)
class TestMessageDictField(object):
def test_decode(self):
ddict = messages.MessageDictField("",
messages.ByteField("key"),
messages.ByteField("value"), 5)
encoded = b""
for key in six.moves.range(5):
encoded += six.int2byte(key) + b"\xFF"
values, remnants = ddict.decode(encoded)
for key in values.keys():
assert key in set(six.moves.range(5))
assert values[key] == 255
class TestMessage(object):
def test_getitem(self):
assert messages.Message(key=":)")["key"] == ":)"
def test_setitem(self):
message = messages.Message()
message["key"] = ":)"
assert message["key"] == ":)"
def test_delitem(self):
message = messages.Message(key=":(")
del message["key"]
with pytest.raises(KeyError):
message["key"]
def test_len(self):
message = messages.Message(key1=None, key2=None, key3=None)
assert len(message) == 3
def test_iter(self):
keys = {"key1": None, "key2": None, "key3": None}
message = messages.Message(**keys)
for key in message:
keys.pop(key)
assert not keys
def test_encode_simple(self):
class Message(messages.Message):
fields = (
messages.ByteField("first_field"),
messages.ByteField("last_field")
)
encoded = Message(first_field=5).encode(last_field=10)
assert isinstance(encoded, bytes)
assert encoded == b"\x05\x0A"
def test_encode_missing_nonoptional_field(self):
class Message(messages.Message):
fields = (
messages.ByteField("first_field"),
messages.ByteField("last_field")
)
with pytest.raises(ValueError):
Message(first_field=5).encode()
def test_encode_missing_optional_field(self):
class Message(messages.Message):
fields = (
messages.ByteField("first_field"),
messages.ByteField("last_field",
optional=True, default_value=10)
)
encoded = Message(first_field=5).encode()
assert isinstance(encoded, bytes)
assert encoded == b"\x05\x0A"
def test_encode_array(self):
count = Mock(return_value=1)
count.minimum = 1
class Element(messages.Message):
fields = ()
encode = Mock(return_value=b"")
class Message(messages.Message):
fields = (
messages.ByteField("byte"),
messages.MessageArrayField("array", Element, count)
)
message = Message(byte=26, array=[Element()])
encoded = message.encode()
assert isinstance(encoded, bytes)
assert Element.encode.called
assert count.called
assert count.call_args[0][0] == message.values
# TODO: more complex structures, e.g. ArrayField and DictFields
class TestFragment(object):
def test_is_compressed(self):
assert messages.Fragment(message_id=(1 << 31) - 1).is_compressed
assert not messages.Fragment(message_id=1 << 30).is_compressed
class TestMSAddressEntry(object):
def test_decode_ip_insufficient_buffer(self):
with pytest.raises(messages.BufferExhaustedError):
messages.MSAddressEntryIPField("").decode(b"\x00\x00")
def test_decode_ip(self):
ip, remnants = messages.MSAddressEntryIPField("").decode(
b"\x00\x01\x02\x03\xFF\xFF")
assert isinstance(ip, six.text_type)
assert ip == "0.1.2.3"
assert isinstance(remnants, bytes)
assert remnants == b"\xFF\xFF"
def test_is_null(self):
assert messages.MSAddressEntry.decode(
b"\x00\x00\x00\x00\x00\x00").is_null
assert not messages.MSAddressEntry.decode(
b"\x01\x02\x03\x04\x69\x87").is_null
|
xpath/extractor/tables.py
|
r0oth3x49/xpath
| 113 |
87446
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# pylint: disable=R,W,E,C
"""
Author : <NAME> (r0ot h3x49)
Github : https://github.com/r0oth3x49
License : MIT
Copyright (c) 2016-2025 <NAME> (r0ot h3x49)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the
Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from xpath.common.session import session
from xpath.injector.request import request
from xpath.logger.colored_logger import logger
from xpath.common.payloads import PAYLOADS_TBLS_COUNT, PAYLOADS_TBLS_NAMES
from xpath.common.utils import (
to_hex,
prettifier,
prepare_payload_request,
clean_up_offset_payload,
)
from xpath.common.lib import (
binascii,
collections,
compat_urlencode,
DB_TABLES,
TBLS_STATEMENT,
)
class TablesExtractor(object):
"""
Extracts table names for a database..
"""
def _generate_tbl_payloads(self, tbl_count, payload, index=0):
payload = clean_up_offset_payload(payload)
payloads = [payload.format(index=i) for i in range(index, tbl_count)]
return payloads
def _tbl_count(self, db=""):
_temp = []
if db:
count_payloads = []
[count_payloads.extend(v) for _, v in PAYLOADS_TBLS_COUNT.items()]
encode_string = to_hex(db, dbms=self._dbms)
if self._dbms:
count_payloads = PAYLOADS_TBLS_COUNT.get(self._dbms, count_payloads)
for entry in count_payloads:
if self._dbms and self._dbms.startswith("Microsoft") and "sysobjects" in entry:
data = entry.format(db=db.strip())
else:
data = entry.format(db=encode_string)
_temp.append(data)
payloads = self._generat_payload(payloads_list=_temp)
return self._extact(payloads=payloads)
def tbl_names(self, db=""):
index = 0
_temp = []
is_resumed = False
fetched_data = {}
_temp_payloads = []
TablesResponse = collections.namedtuple(
"TablesResponse", ["fetched", "count", "database", "tables"]
)
if db:
dump_payloads = []
[dump_payloads.extend(v) for _, v in PAYLOADS_TBLS_NAMES.items()]
encode_string = to_hex(db, dbms=self._dbms)
if self._dbms:
dump_payloads = PAYLOADS_TBLS_NAMES.get(self._dbms, dump_payloads)
for entry in dump_payloads:
if self._dbms and self._dbms.startswith("Microsoft"):
if "sysobjects" in entry:
data = entry.format(db=db.strip(), db1=db.strip())
else:
data = entry.format(db=encode_string, db1=encode_string)
else:
data = entry.format(db=encode_string)
_temp_payloads.append(data)
try:
fetched_data = session.fetch_from_table(
session_filepath=self.session_filepath,
table_name=db,
cursor=False,
)
if fetched_data:
is_resumed = True
except Exception as error:
pass
logger.info("fetching tables for database: '%s'" % (db))
retval = self._tbl_count(db=db)
if retval.is_injected:
tbl_count = int(retval.result)
if tbl_count != 0:
logger.info("used SQL query returns %d entries" % (tbl_count))
if tbl_count == 0:
logger.warning(
"used SQL query returns %d entries for database: '%s'"
% (tbl_count, db)
)
return TablesResponse(
fetched=False, count=tbl_count, database=db, tables=[]
)
if is_resumed:
for entry in fetched_data:
name = entry.get("tblname")
if name not in _temp:
_temp.append(name)
logger.info(f"resumed: '{name}'")
index += 1
should_fetch = True
if is_resumed:
if len(fetched_data) == tbl_count:
should_fetch = False
if should_fetch:
payloads = self._generat_payload(payloads_list=_temp_payloads)
retval = self._extact(payloads=payloads)
if retval.is_injected:
payload = retval.payload
payloads = self._generate_tbl_payloads(
tbl_count=tbl_count, payload=payload, index=index
)
if not is_resumed:
session.generate_table(
session_filepath=self.session_filepath,
query=DB_TABLES.format(name=db, tbl_name=db),
)
response_data = self._extract_tbls(payloads=payloads, database=db)
if response_data.is_fetched:
_temp.extend(response_data.result)
self._pprint_tables(
cursor_or_list=_temp,
field_names="Tables",
database=db,
)
return TablesResponse(
fetched=True, count=tbl_count, database=db, tables=_temp
)
if not retval.is_injected:
status_code = retval.status_code
error = retval.error
count = retval.payloads_count
if status_code not in [200, 0]:
message = f"{error} - {count} times"
logger.warning(
f"HTTP error codes detected during run:\n{message}"
)
else:
message = f"tested with '{count}' queries, unable to find working SQL query."
logger.critical(message)
else:
self._pprint_tables(
cursor_or_list=_temp,
field_names="Tables",
database=db,
)
return TablesResponse(
fetched=True, count=tbl_count, database=db, tables=_temp
)
if not retval.is_injected:
status_code = retval.status_code
error = retval.error
count = retval.payloads_count
if status_code not in [200, 0]:
message = f"{error} - {count} times"
logger.warning(f"HTTP error codes detected during run:\n{message}")
else:
message = (
f"tested with '{count}' queries, unable to find working SQL query."
)
logger.critical(message)
return TablesResponse(fetched=False, count=0, database="", tables=_temp)
def _pprint_tables(self, cursor_or_list, field_names, database=""):
obj = prettifier(cursor_or_list, field_names)
data = obj.data
entries = obj.entries
logger.success(f"Database: {database}")
logger.success(f"[{entries} tables]")
logger.success(f"{data}")
def _extract_tbls(self, payloads, database=""):
_temp, index = [], 0
Response = collections.namedtuple("Response", ["is_fetched", "result"])
while index < len(payloads):
payload = payloads[index]
payload_request = prepare_payload_request(self, payload)
url = payload_request.url
data = payload_request.data
regex = payload_request.regex
headers = payload_request.headers
try:
response = request.inject_payload(
url=url, regex=regex, data=data, headers=headers, proxy=self._proxy
)
except KeyboardInterrupt:
logger.warning(
"user aborted during enumeration. Xpath will display partial output"
)
break
else:
if response.ok:
result = response.result
logger.info("retrieved: '%s'" % (result))
_temp.append(result)
retval = session.dump(
session_filepath=self.session_filepath,
query=TBLS_STATEMENT.format(tbl_name=database, tblname=result),
)
index += 1
if _temp and len(_temp) > 0:
_temp = list(set(_temp))
resp = Response(is_fetched=True, result=_temp)
else:
resp = Response(is_fetched=True, result=_temp)
return resp
|
test/benchmark_array_creation.py
|
Yousazoe/oxBot
| 242 |
87451
|
"""
Testing what the fastest way is to create a 1D Array with 2 values
"""
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import random
import numpy as np
x, y = random.uniform(0, 300), random.uniform(0, 300)
def numpy_array(x, y):
# Calculate distances between each of the points
return np.array((x, y), dtype=np.float)
def numpy_array_tuple(my_tuple):
# Calculate distances between each of the points
return np.array(my_tuple, dtype=np.float)
def numpy_asarray(x, y):
# Calculate distances between each of the points
return np.asarray((x, y), dtype=np.float)
def numpy_asarray_tuple(my_tuple):
# Calculate distances between each of the points
return np.asarray(my_tuple, dtype=np.float)
def numpy_asanyarray(x, y):
# Calculate distances between each of the points
return np.asanyarray((x, y), dtype=np.float)
def numpy_asanyarray_tuple(my_tuple):
# Calculate distances between each of the points
return np.asanyarray(my_tuple, dtype=np.float)
def numpy_fromiter(x, y):
# Calculate distances between each of the points
return np.fromiter((x, y), dtype=float, count=2)
def numpy_fromiter_tuple(my_tuple):
# Calculate distances between each of the points
return np.fromiter(my_tuple, dtype=float, count=2)
def numpy_fromiter_np_float(x, y):
# Calculate distances between each of the points
return np.fromiter((x, y), dtype=np.float, count=2)
def numpy_fromiter_np_float_tuple(my_tuple):
# Calculate distances between each of the points
return np.fromiter(my_tuple, dtype=np.float, count=2)
def numpy_zeros(x, y):
# Calculate distances between each of the points
a = np.zeros(2, dtype=np.float)
a[0] = x
a[1] = y
return a
def numpy_ones(x, y):
# Calculate distances between each of the points
a = np.ones(2, dtype=np.float)
a[0] = x
a[1] = y
return a
numpy_array(x, y)
correct_array = np.array([x, y])
def test_numpy_array(benchmark):
result = benchmark(numpy_array, x, y)
assert np.array_equal(result, correct_array)
def test_numpy_array_tuple(benchmark):
result = benchmark(numpy_array_tuple, (x, y))
assert np.array_equal(result, correct_array)
def test_numpy_asarray(benchmark):
result = benchmark(numpy_asarray, x, y)
assert np.array_equal(result, correct_array)
def test_numpy_asarray_tuple(benchmark):
result = benchmark(numpy_asarray_tuple, (x, y))
assert np.array_equal(result, correct_array)
def test_numpy_asanyarray(benchmark):
result = benchmark(numpy_asanyarray, x, y)
assert np.array_equal(result, correct_array)
def test_numpy_asanyarray_tuple(benchmark):
result = benchmark(numpy_asanyarray_tuple, (x, y))
assert np.array_equal(result, correct_array)
def test_numpy_fromiter(benchmark):
result = benchmark(numpy_fromiter, x, y)
assert np.array_equal(result, correct_array)
def test_numpy_fromiter_tuple(benchmark):
result = benchmark(numpy_fromiter_tuple, (x, y))
assert np.array_equal(result, correct_array)
def test_numpy_fromiter_np_float(benchmark):
result = benchmark(numpy_fromiter_np_float, x, y)
assert np.array_equal(result, correct_array)
def test_numpy_fromiter_np_float_tuple(benchmark):
result = benchmark(numpy_fromiter_np_float_tuple, (x, y))
assert np.array_equal(result, correct_array)
def test_numpy_zeros(benchmark):
result = benchmark(numpy_zeros, x, y)
assert np.array_equal(result, correct_array)
def test_numpy_ones(benchmark):
result = benchmark(numpy_ones, x, y)
assert np.array_equal(result, correct_array)
# Run this file using
# poetry run pytest test/test_benchmark_array_creation.py --benchmark-compare
|
tests/test_commandline_classify_speakers.py
|
erayee/Montreal-Forced-Aligner
| 702 |
87485
|
<reponame>erayee/Montreal-Forced-Aligner<gh_stars>100-1000
import os
import pytest
from montreal_forced_aligner.command_line.classify_speakers import run_classify_speakers
from montreal_forced_aligner.command_line.mfa import parser
def test_classify(basic_corpus_dir, sick_dict_path, english_ivector_model, generated_dir, temp_dir):
output_path = os.path.join(generated_dir, 'classify_test')
command = ['classify_speakers', basic_corpus_dir, 'english_ivector',
output_path,
'-t', temp_dir, '-q', '--clean', '--debug', '-v', '--disable_mp', '-s', '1']
args, unknown = parser.parse_known_args(command)
run_classify_speakers(args)
def test_cluster(basic_corpus_dir, sick_dict_path, english_ivector_model, generated_dir,
transcription_acoustic_model, transcription_language_model, temp_dir):
output_path = os.path.join(generated_dir, 'cluster_test')
command = ['classify_speakers', basic_corpus_dir, 'english_ivector',
output_path,
'-t', temp_dir, '-q', '--clean', '--debug', '--cluster', '-s', '2', '--disable_mp']
args, unknown = parser.parse_known_args(command)
run_classify_speakers(args)
|
oracle_problems/problem_3.py
|
loftwah/Daily-Coding-Problem
| 129 |
87509
|
"""This problem was asked by Oracle.
Given a binary search tree, find the floor and ceiling of a given integer.
The floor is the highest element in the tree less than or equal to an integer,
while the ceiling is the lowest element in the tree greater than or equal to an integer.
If either value does not exist, return None.
"""
|
recipes/Python/577859_CLOSlike_aroundbeforeafter_auxiliary/recipe-577859.py
|
tdiprima/code
| 2,023 |
87531
|
#!/usr/bin/env python
#
# Copyright (c) 2011 <NAME> (zuo). All rights reserved.
# Licensed under the MIT License.
#
# Python 2.5+/3.x-compatibile.
#
# The newest version of this module should be downloadable from:
# https://github.com/zuo/Zuo-s-Recipes-and-Drafts/blob/master/auxmethods.py
from __future__ import with_statement # (Py2.5 needs this)
from functools import wraps
from inspect import getmro, isfunction
__all__ = (
'ClassNameConflictError',
'aux', 'primary',
'AutoAuxBase', 'AutoAuxMeta',
)
#
# exceptions
class ClassNameConflictError(Exception):
"""
Conflict: class names are identical after stripping leading underscores.
"""
def __str__(self):
cls1, cls2 = self.args
return (
'Class names: %r and %r -- are identical after stripping leading '
'underscores, which is forbidden when using aux/primary methods.'
% (cls1.__name__, cls2.__name__))
#
# non-public stuff
_SUFFIXES = '_primary', '_before', '_after', '_around'
class _WrappedMethodPlaceholder(object):
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
raise TypeError('method placeholder is not callable '
'(forgot to apply aux() class decorator?)')
def _next_around(obj_around, self, basename, *args, **kwargs):
# try to get and call next `around` aux method
meth_around = getattr(obj_around, basename + '_around', None)
if meth_around is not None:
return meth_around(*args, **kwargs)
else:
# if there is no more `around` methods, get and call:
# `before` aux method (it can call superclasses' `before` methods)
meth_before = getattr(self, basename + '_before', None)
if meth_before is not None:
meth_before(*args, **kwargs)
# primary method (it can call superclasses' primary methods)
meth_primary = getattr(self, basename + '_primary')
pri_result = meth_primary(*args, **kwargs)
# `after` aux method (it can call superclasses' `after` methods)
meth_after = getattr(self, basename + '_after', None)
if meth_after is not None:
meth_after(*args, **kwargs)
return pri_result
def _provide_wrapper(cls, func, basename):
@wraps(func)
def wrapper(self, *args, **kwargs):
return _next_around(self, self, basename, *args, **kwargs)
added_doc = '(See: %s%s() signature).' % (basename, '_primary')
existing_doc = (getattr(wrapper, '__doc__', None) or '').rstrip()
if existing_doc:
wrapper.__doc__ = '%s\n\n%s' % (existing_doc, added_doc)
else:
wrapper.__doc__ = added_doc
setattr(cls, basename, wrapper)
def _provide_primary(cls, func, basename):
suffixed_name = basename + '_primary'
func.__name__ = suffixed_name
func.__doc__ = (
'The actual method implementation '
'(%s() is only a wrapper).' % basename)
setattr(cls, suffixed_name, func)
def _provide_wrapped_primary(cls, func):
basename = func.__name__
_provide_wrapper(cls, func, basename)
_provide_primary(cls, func, basename)
def _strip_and_check_cls_name(cls):
cls_stripped_name = cls.__name__.lstrip('_')
for supercls in getmro(cls):
if (supercls is not cls and
cls_stripped_name == supercls.__name__.lstrip('_')):
raise ClassNameConflictError(supercls, cls)
return cls_stripped_name
def _provide_call_next(cls, suffixed_name):
cls_stripped_name = _strip_and_check_cls_name(cls)
basename, qualifier = suffixed_name.rsplit('_', 1)
cn_name = '_%s__%s' % (
cls_stripped_name,
(basename if qualifier == 'primary' else suffixed_name))
if cn_name in vars(cls):
return
if qualifier == 'around':
def call_next(self, *args, **kwargs):
return _next_around(
super(cls, self), self, basename, *args, **kwargs)
else:
def call_next(self, *args, **kwargs):
super_meth = getattr(super(cls, self), suffixed_name, None)
if super_meth is not None:
return super_meth(*args, **kwargs)
call_next.__name__ = cn_name
setattr(cls, cn_name, call_next)
#
# actual decorators
def aux(cls):
"""Class decorator (for classes containing primary and/or aux methods)."""
if not isinstance(cls, type):
raise TypeError('%r is not a type' % cls)
# wrap/rename primary methods
for name, obj in tuple(vars(cls).items()): # (Py2.x/3.x-compatibile way)
if isinstance(obj, _WrappedMethodPlaceholder):
_provide_wrapped_primary(cls, obj.func)
# provide `call-next-method`-like methods
for name, obj in tuple(vars(cls).items()):
if isfunction(obj) and obj.__name__.endswith(_SUFFIXES):
_provide_call_next(cls, obj.__name__)
return cls
def primary(func):
"""Method decorator (for primary methods only)."""
if not isfunction(func):
raise TypeError('%r is not a function' % func)
return _WrappedMethodPlaceholder(func)
#
# convenience classes (any of them can be used *optionally*...)
class AutoAuxMeta(type):
"""Convenience metaclass: `aux()`-decorates classes created by it."""
def __new__(mcs, name, bases, attr_dict):
return aux(type.__new__(mcs, name, bases, attr_dict))
# (here: Py2.x/3.x-compatibile way to create a class with a custom metaclass)
AutoAuxBase = AutoAuxMeta('AutoAuxBase', (object,), {'__doc__':
"""`AutoAuxMeta`-created base class: `aux()`-decorates its subclasses."""})
#
# basic example
if __name__ == '__main__':
import sys
import time
class TimedAction(AutoAuxBase):
# note: AutoAuxBase automatically decorates your classes with aux()
def action_before(self, *args, **kwargs):
"""Start action timer."""
print('starting action timer...')
self.start_time = time.time()
def action_after(self, *args, **kwargs):
"""Stop action timer and report measured duration."""
self.action_duration = time.time() - self.start_time
print('action duration: %f' % self.action_duration)
class FileContentAction(AutoAuxBase):
def action_around(self, path):
"""Read file and pass its content on; report success or error."""
print('opening file %r...' % path)
try:
with open(path) as f:
content = f.read()
except EnvironmentError:
print(sys.exc_info()[1])
else:
result = self.__action_around(path, content)
print('file %r processed successfully' % path)
return result
class NewlinesCounter(FileContentAction, TimedAction):
item_descr = 'newlines'
@primary
def action(self, path, content):
"""Get number of newlines in a given string."""
return content.count('\n')
def action_before(self, path, *args):
"""Print a message and go on..."""
print('counting %s in file %r will start...' % (
self.item_descr, path))
self.__action_before(path, *args)
def action_around(self, path):
"""Start operation with given file path. Finally, show summary."""
result = self.__action_around(path)
if result is not None:
print('%s in file %r: %s\n' % (
self.item_descr, path, result))
else:
print('could not count %s in file %r\n' % (
self.item_descr, path))
return result
class SpacesAndNewlinesCounter(NewlinesCounter):
item_descr = 'spaces and newlines'
@primary
def action(self, path, content):
"""Get number of spaces and newlines in a given string."""
spaces = content.count(' ')
newlines = self.__action(path, content)
return spaces + newlines
example_file_paths = __file__, 'spam/spam/spam/non-existent'
nl_counter = NewlinesCounter()
spc_nl_counter = SpacesAndNewlinesCounter()
for path in example_file_paths:
nl_counter.action(path)
spc_nl_counter.action(path)
|
testData/typeinspection/ignoreInitArguments.py
|
alek-sun/pydantic-pycharm-plugin
| 238 |
87541
|
<reponame>alek-sun/pydantic-pycharm-plugin
from pydantic import BaseModel
class A(BaseModel):
a: int
def __init__(self, xyz: str):
super().__init__(a=xyz)
A(xyz=123<warning descr="null">)</warning>
A(a=123)
|
utils/scripts/gen_onnx_spectrogram_model.py
|
dreiss/glow
| 2,838 |
87562
|
<reponame>dreiss/glow<filename>utils/scripts/gen_onnx_spectrogram_model.py
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import onnx
import tensorflow as tf
from onnx import TensorProto, helper
from tensorflow.python.ops import gen_audio_ops as audio_ops
# ONNX utility.
def make_init(name, dtype, tensor):
return helper.make_tensor(
name=name,
data_type=dtype,
dims=tensor.shape,
vals=tensor.reshape(tensor.size).tolist(),
)
# Function to generate AudioSpectrogram ONNX test model.
def gen_spectrogram_onnx_test_model(
model_path, window_count, window_size, stride, magnitude_squared=True
):
# Tensor sizes.
input_length = window_size + (window_count - 1) * stride
fft_length = int(2 ** np.ceil(np.log2(window_size)))
input_shape = [1, input_length]
spectrogram_length = int(fft_length / 2 + 1)
spectrogram_shape = [window_count, spectrogram_length]
# Generate random input data.
np.random.seed(1)
input_data = np.random.randn(*input_shape)
# ----------------------------------------- COMPUTE TensorFlow REFERENCE -------------------------------------------
# Define TensorFlow model.
tf_input = tf.constant(
input_data.reshape([input_length, 1]), name="input", dtype=tf.float32
)
tf_spectrogram = audio_ops.audio_spectrogram(
tf_input,
window_size=window_size,
stride=stride,
magnitude_squared=magnitude_squared,
)
# Run TensorFlow model and get reference output.
with tf.Session() as sess:
spectrogram_ref = sess.run(tf_spectrogram)
spectrogram_ref = np.reshape(spectrogram_ref, spectrogram_shape)
# ---------------------------------------------- NODE DEFINITION --------------------------------------------------
# AudioSpectrogram node definition.
spectrogram_node_def = onnx.helper.make_node(
"AudioSpectrogram",
name="audio_spectrogram",
inputs=["input"],
outputs=["spectrogram"],
window_size=int(window_size),
stride=int(stride),
magnitude_squared=int(magnitude_squared),
)
# Error node definition.
err_node_def = onnx.helper.make_node(
"Sub",
name="error",
inputs=["spectrogram", "spectrogram_ref"],
outputs=["spectrogram_err"],
)
# --------------------------------------------- GRAPH DEFINITION --------------------------------------------------
graph_input = list()
graph_init = list()
graph_output = list()
# Graph inputs.
graph_input.append(
helper.make_tensor_value_info("input", TensorProto.FLOAT, input_shape)
)
graph_input.append(
helper.make_tensor_value_info(
"spectrogram_ref", TensorProto.FLOAT, spectrogram_shape
)
)
# Graph initializers.
graph_init.append(make_init("input", TensorProto.FLOAT, input_data))
graph_init.append(make_init("spectrogram_ref", TensorProto.FLOAT, spectrogram_ref))
# Graph outputs.
graph_output.append(
helper.make_tensor_value_info(
"spectrogram_err", TensorProto.FLOAT, spectrogram_shape
)
)
# Graph name.
graph_name = "audio_spectrogram_test"
# Define graph (GraphProto).
graph_def = helper.make_graph(
[spectrogram_node_def, err_node_def],
graph_name,
inputs=graph_input,
outputs=graph_output,
)
# Set initializers.
graph_def.initializer.extend(graph_init)
# --------------------------------------------- MODEL DEFINITION --------------------------------------------------
# Define model (ModelProto).
model_def = helper.make_model(graph_def, producer_name="onnx-audio-spectrogram")
# Print model.
with open(model_path, "w") as f:
f.write(str(model_def))
# One window spectrogram.
gen_spectrogram_onnx_test_model(
model_path="audioSpectrogramOneWindow.onnxtxt",
window_count=1,
window_size=512,
stride=256,
magnitude_squared=True,
)
# Two window spectrogram.
gen_spectrogram_onnx_test_model(
model_path="audioSpectrogramTwoWindow.onnxtxt",
window_count=2,
window_size=640,
stride=320,
magnitude_squared=True,
)
# Magnitude non-squared.
gen_spectrogram_onnx_test_model(
model_path="audioSpectrogramNonSquared.onnxtxt",
window_count=1,
window_size=640,
stride=320,
magnitude_squared=False,
)
|
psdaq/psdaq/pyxpm/surf/devices/ti/_UCD92xx.py
|
ZhenghengLi/lcls2
| 134 |
87564
|
<reponame>ZhenghengLi/lcls2<filename>psdaq/psdaq/pyxpm/surf/devices/ti/_UCD92xx.py
#-----------------------------------------------------------------------------
# This file is part of the 'SLAC Firmware Standard Library'. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the 'SLAC Firmware Standard Library', including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import pyrogue as pr
import surf.protocols.i2c
class UCD92xx(surf.protocols.i2c.PMBus):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.add(pr.LinkVariable(
name = 'VIN',
mode = 'RO',
units = 'V',
disp = '{:1.3f}',
linkedGet = self.getPMbusLinearDataFormat11Bit,
dependencies = [self.READ_VIN],
))
self.add(pr.LinkVariable(
name = 'IIN',
mode = 'RO',
units = 'A',
disp = '{:1.3f}',
linkedGet = self.getPMbusLinearDataFormat11Bit,
dependencies = [self.READ_IIN],
))
self.add(pr.LinkVariable(
name = 'VOUT',
mode = 'RO',
units = 'V',
disp = '{:1.3f}',
linkedGet = surf.protocols.i2c.getPMbusLinearDataFormat,
dependencies = [self.READ_VIN],
))
self.add(pr.LinkVariable(
name = 'IOUT',
mode = 'RO',
units = 'A',
disp = '{:1.3f}',
linkedGet = self.getPMbusLinearDataFormat11Bit,
dependencies = [self.READ_IOUT],
))
self.add(pr.LinkVariable(
name = 'TEMPERATURE[1]',
mode = 'RO',
units = 'degC',
disp = '{:1.3f}',
linkedGet = self.getPMbusLinearDataFormat11Bit,
dependencies = [self.READ_TEMPERATURE_1],
))
self.add(pr.LinkVariable(
name = 'TEMPERATURE[2]',
mode = 'RO',
units = 'degC',
disp = '{:1.3f}',
linkedGet = self.getPMbusLinearDataFormat11Bit,
dependencies = [self.READ_TEMPERATURE_2],
))
self.add(pr.LinkVariable(
name = 'FAN_SPEED[1]',
mode = 'RO',
units = 'RPM',
disp = '{:1.3f}',
linkedGet = self.getPMbusLinearDataFormat11Bit,
dependencies = [self.READ_FAN_SPEED_1],
))
self.add(pr.LinkVariable(
name = 'DUTY_CYCLE',
mode = 'RO',
units = '%',
disp = '{:1.3f}',
linkedGet = self.getPMbusLinearDataFormat11Bit,
dependencies = [self.READ_DUTY_CYCLE],
))
self.add(pr.LinkVariable(
name = 'POUT',
mode = 'RO',
units = 'W',
disp = '{:1.3f}',
linkedGet = self.getPMbusLinearDataFormat11Bit,
dependencies = [self.READ_POUT],
))
self.add(pr.LinkVariable(
name = 'PIN',
mode = 'RO',
units = 'W',
disp = '{:1.3f}',
linkedGet = self.getPMbusLinearDataFormat11Bit,
dependencies = [self.READ_PIN],
))
@staticmethod
def getPMbusLinearDataFormat11Bit(var):
# Get the 16-bt RAW value
raw = var.dependencies[0].value()
# V is a 16-bit unsigned binary integer mantissa,
V = 1.0*raw
# The exponent is reported in the bottom 5 bits of the VOUT_MODE parameter.
# In the UCD92xx, this exponent is a read-only parameter
# whose value is fixed at –12. This allows setting voltage-related variables
# over a range from 0 to 15.9997V, with a resolution of 0.244mV.
X = -12.0
return V*(2**X)
|
bruges/models/test/models_test.py
|
EvanBianco/bruges
| 209 |
87590
|
import unittest
import numpy as np
from numpy import array
from bruges.models import reconcile, interpolate, panel
from bruges.models import wedge
class ModelTest(unittest.TestCase):
"""
Tests models.
"""
def test_reconcile(self):
a = np.array([2, 6, 7, 7, 3])
b = np.array([3, 7, 3])
A, B = reconcile(a, b, order=0)
A_, B_ = array([2, 6, 7, 7, 3]), array([3, 7, 7, 3, 3])
self.assertTrue(np.array_equal(A, A_))
self.assertTrue(np.array_equal(B, B_))
def test_interpolate(self):
a = np.array([2, 6, 7, 7, 3])
b = np.array([3, 7, 7, 3, 3])
interp = interpolate(a, b, num=10)
self.assertTrue(interp.shape == (5, 10))
def test_panel(self):
a = np.array([2, 6, 7, 7, 3])
b = np.array([3, 7, 3])
dists = (10,)
out = panel(a, b, num=15, dists=dists)
sample = out[:, 7]
self.assertTrue(np.all(sample[:4] == array([2.5, 6.5, 5., 3.])))
self.assertTrue(np.isnan(sample[-1]))
def test_wedge(self):
w, top, base, ref = wedge(depth=10, width=7, strat=(10, (20, 30), 40))
col = array([10, 10, 10, 20, 20, 30, 40, 40, 40, 40])
t = array([3., 3., 3., 3., 3., 3., 3.])
b = array([3., 3., 3.6, 4.2, 4.8, 5.4, 6. ])
self.assertTrue(np.all(w[:, -1] == col))
self.assertTrue(w.sum() == 1990)
self.assertTrue(np.allclose(top, t))
self.assertTrue(np.allclose(base, b))
self.assertTrue(ref == 6)
def test_netgross(self):
w, top, *_ = wedge(depth=10, width=7, breadth=3, strat=(10, (20, 30), 40))
self.assertTrue(w.sum() == 6003)
self.assertTrue(w.shape == (10, 7, 3))
self.assertTrue(top.sum() == 63.0)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(ModelTest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
depth/self_supervised_sfm/dataset.py
|
sarthakahuja11/cvml_project
| 171 |
87612
|
import os
import numpy as np
import tensorflow as tf
from depth.self_supervised_sfm.utils import readlines
AUTOTUNE = tf.data.experimental.AUTOTUNE
########################
# Constants
#########################
KITTI_K = np.array([[0.58, 0, 0.5, 0], # fx/width
[0, 1.92, 0.5, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]], dtype=np.float)
class KittiSFMDataset:
def __init__(self, dataset_dir, load_option,
img_size, batch_size,
split='eigen_zhou',
frame_idx=(0, -1, 1)):
self.h, self.w = img_size
self.split = split
self.batch_size = batch_size
self.load_option = load_option
self.dataset_dir = dataset_dir
self.frame_idx = frame_idx
self.side_map = {"2": 2, "3": 3, "l": 2, "r": 3} # Correspond to image folder
# Check that the folder exists
assert os.path.exists(dataset_dir) and os.path.isdir(dataset_dir), f"Dataset {dataset_dir} does not exist !"
if self.split == 'eigen_zhou':
filename = os.path.join('splits', f'eigen_zhou/{load_option}_files.txt')
else:
raise NotImplementedError
print(f'Loading from: {filename}')
data_paths = readlines(filename)
self.img_paths = []
for i, line in enumerate(data_paths):
# Image files
folder, frame_idx, side = line.split()
per_sample_imgs = []
# Load sequence img
for t in self.frame_idx:
f_str = f"{int(frame_idx) + t:010d}"
image_path = os.path.join(dataset_dir, folder, f"image_0{self.side_map[side]}/data", f_str + '.png')
per_sample_imgs.append(image_path)
self.img_paths.append(per_sample_imgs)
print(f'Total Images for {load_option}: {len(self.img_paths)}')
self.num_samples = len(self.img_paths)
def load_tfdataset(self):
inputs = {}
# Intrinsic
intrinsic = KITTI_K.copy()
intrinsic[0, :] *= self.w
intrinsic[1, :] *= self.h
inputs['K'] = tf.convert_to_tensor(intrinsic, tf.float32)
inputs['K_inv'] = tf.linalg.inv(inputs['K'])
dataset = tf.data.Dataset.from_tensor_slices(self.img_paths)
dataset = dataset.shuffle(self.num_samples)
# Load data
def load_sample(img_paths):
# load the raw data from the file as a string
image_cur = tf.io.read_file(img_paths[0])
image_prev = tf.io.read_file(img_paths[1])
image_next = tf.io.read_file(img_paths[2])
image_cur = tf.image.decode_png(image_cur)
image_prev = tf.image.decode_png(image_prev)
image_next = tf.image.decode_png(image_next)
image_cur = tf.cast(tf.image.resize(image_cur, [self.h, self.w]), tf.float32) / 255.
image_prev = tf.cast(tf.image.resize(image_prev, [self.h, self.w]), tf.float32) / 255.
image_next = tf.cast(tf.image.resize(image_next, [self.h, self.w]), tf.float32) / 255.
if self.load_option == "train":
if tf.random.uniform(()) > 0.5:
image_cur = tf.image.flip_left_right(image_cur)
image_prev = tf.image.flip_left_right(image_prev)
image_next = tf.image.flip_left_right(image_next)
inputs['img'] = image_cur
inputs['img-1'] = image_prev
inputs['img1'] = image_next
return inputs
dataset = dataset.map(load_sample, num_parallel_calls=AUTOTUNE)
dataset = dataset.batch(self.batch_size, drop_remainder=True)
dataset = dataset.prefetch(buffer_size=AUTOTUNE)
return dataset
|
s3_credentials/cli.py
|
simonw/s3-credentials
| 106 |
87614
|
from re import A
import boto3
import botocore
import click
import configparser
from csv import DictWriter
import io
import itertools
import json
import mimetypes
import os
import re
import sys
import textwrap
from . import policies
def bucket_exists(s3, bucket):
try:
s3.head_bucket(Bucket=bucket)
return True
except botocore.exceptions.ClientError:
return False
def user_exists(iam, username):
try:
iam.get_user(UserName=username)
return True
except iam.exceptions.NoSuchEntityException:
return False
def common_boto3_options(fn):
for decorator in reversed(
(
click.option(
"--access-key",
help="AWS access key ID",
),
click.option(
"--secret-key",
help="AWS secret access key",
),
click.option(
"--session-token",
help="AWS session token",
),
click.option(
"--endpoint-url",
help="Custom endpoint URL",
),
click.option(
"-a",
"--auth",
type=click.File("r"),
help="Path to JSON/INI file containing credentials",
),
)
):
fn = decorator(fn)
return fn
def common_output_options(fn):
for decorator in reversed(
(
click.option("--nl", help="Output newline-delimited JSON", is_flag=True),
click.option("--csv", help="Output CSV", is_flag=True),
click.option("--tsv", help="Output TSV", is_flag=True),
)
):
fn = decorator(fn)
return fn
@click.group()
@click.version_option()
def cli():
"A tool for creating credentials for accessing S3 buckets"
class PolicyParam(click.ParamType):
"Returns string of guaranteed well-formed JSON"
name = "policy"
def convert(self, policy, param, ctx):
if policy.strip().startswith("{"):
# Verify policy string is valid JSON
try:
json.loads(policy)
except ValueError:
self.fail("Invalid JSON string")
return policy
else:
# Assume policy is a file path or '-'
try:
with click.open_file(policy) as f:
contents = f.read()
try:
json.loads(contents)
return contents
except ValueError:
self.fail(
"{} contained invalid JSON".format(
"Input" if policy == "-" else "File"
)
)
except FileNotFoundError:
self.fail("File not found")
class DurationParam(click.ParamType):
name = "duration"
pattern = re.compile(r"^(\d+)(m|h|s)?$")
def convert(self, value, param, ctx):
match = self.pattern.match(value)
if match is None:
self.fail("Duration must be of form 3600s or 15m or 2h")
integer_string, suffix = match.groups()
integer = int(integer_string)
if suffix == "m":
integer *= 60
elif suffix == "h":
integer *= 3600
# Must be between 15 minutes and 12 hours
if not (15 * 60 <= integer <= 12 * 60 * 60):
self.fail("Duration must be between 15 minutes and 12 hours")
return integer
@cli.command()
@click.argument(
"buckets",
nargs=-1,
required=True,
)
@click.option("--read-only", help="Only allow reading from the bucket", is_flag=True)
@click.option("--write-only", help="Only allow writing to the bucket", is_flag=True)
@click.option(
"--prefix", help="Restrict to keys starting with this prefix", default="*"
)
@click.option(
"--public-bucket",
help="Bucket policy for allowing public access",
is_flag=True,
)
def policy(buckets, read_only, write_only, prefix, public_bucket):
"""
Output generated JSON policy for one or more buckets
Takes the same options as s3-credentials create
To output a read-only JSON policy for a bucket:
s3-credentials policy my-bucket --read-only
"""
"Generate JSON policy for one or more buckets"
if public_bucket:
if len(buckets) != 1:
raise click.ClickException(
"--public-bucket policy can only be generated for a single bucket"
)
click.echo(
json.dumps(policies.bucket_policy_allow_all_get(buckets[0]), indent=4)
)
return
permission = "read-write"
if read_only:
permission = "read-only"
if write_only:
permission = "write-only"
statements = []
if permission == "read-write":
for bucket in buckets:
statements.extend(policies.read_write_statements(bucket, prefix))
elif permission == "read-only":
for bucket in buckets:
statements.extend(policies.read_only_statements(bucket, prefix))
elif permission == "write-only":
for bucket in buckets:
statements.extend(policies.write_only_statements(bucket, prefix))
else:
assert False, "Unknown permission: {}".format(permission)
bucket_access_policy = policies.wrap_policy(statements)
click.echo(json.dumps(bucket_access_policy, indent=4))
@cli.command()
@click.argument(
"buckets",
nargs=-1,
required=True,
)
@click.option(
"format_",
"-f",
"--format",
type=click.Choice(["ini", "json"]),
default="json",
help="Output format for credentials",
)
@click.option(
"-d",
"--duration",
type=DurationParam(),
help="How long should these credentials work for? Default is forever, use 3600 for 3600 seconds, 15m for 15 minutes, 1h for 1 hour",
)
@click.option("--username", help="Username to create or existing user to use")
@click.option(
"-c",
"--create-bucket",
help="Create buckets if they do not already exist",
is_flag=True,
)
@click.option(
"--prefix", help="Restrict to keys starting with this prefix", default="*"
)
@click.option(
"--public",
help="Make the created bucket public: anyone will be able to download files if they know their name",
is_flag=True,
)
@click.option("--read-only", help="Only allow reading from the bucket", is_flag=True)
@click.option("--write-only", help="Only allow writing to the bucket", is_flag=True)
@click.option(
"--policy",
type=PolicyParam(),
help="Path to a policy.json file, or literal JSON string - $!BUCKET_NAME!$ will be replaced with the name of the bucket",
)
@click.option("--bucket-region", help="Region in which to create buckets")
@click.option("--silent", help="Don't show performed steps", is_flag=True)
@click.option("--dry-run", help="Show steps without executing them", is_flag=True)
@click.option(
"--user-permissions-boundary",
help=(
"Custom permissions boundary to use for created users, or 'none' to "
"create without. Defaults to limiting to S3 based on "
"--read-only and --write-only options."
),
)
@common_boto3_options
def create(
buckets,
format_,
duration,
username,
create_bucket,
prefix,
public,
read_only,
write_only,
policy,
bucket_region,
user_permissions_boundary,
silent,
dry_run,
**boto_options
):
"""
Create and return new AWS credentials for specified S3 buckets - optionally
also creating the bucket if it does not yet exist.
To create a new bucket and output read-write credentials:
s3-credentials create my-new-bucket -c
To create read-only credentials for an existing bucket:
s3-credentials create my-existing-bucket --read-only
To create write-only credentials that are only valid for 15 minutes:
s3-credentials create my-existing-bucket --write-only -d 15m
"""
if read_only and write_only:
raise click.ClickException(
"Cannot use --read-only and --write-only at the same time"
)
def log(message):
if not silent:
click.echo(message, err=True)
permission = "read-write"
if read_only:
permission = "read-only"
if write_only:
permission = "write-only"
s3 = None
iam = None
sts = None
if not dry_run:
s3 = make_client("s3", **boto_options)
iam = make_client("iam", **boto_options)
sts = make_client("sts", **boto_options)
# Verify buckets
for bucket in buckets:
# Create bucket if it doesn't exist
if dry_run or (not bucket_exists(s3, bucket)):
if (not dry_run) and (not create_bucket):
raise click.ClickException(
"Bucket does not exist: {} - try --create-bucket to create it".format(
bucket
)
)
if dry_run or create_bucket:
kwargs = {}
if bucket_region:
kwargs = {
"CreateBucketConfiguration": {
"LocationConstraint": bucket_region
}
}
bucket_policy = {}
if public:
bucket_policy = policies.bucket_policy_allow_all_get(bucket)
if dry_run:
click.echo(
"Would create bucket: '{}'{}".format(
bucket,
(
" with args {}".format(json.dumps(kwargs, indent=4))
if kwargs
else ""
),
)
)
if bucket_policy:
click.echo("... then attach the following bucket policy to it:")
click.echo(json.dumps(bucket_policy, indent=4))
else:
s3.create_bucket(Bucket=bucket, **kwargs)
info = "Created bucket: {}".format(bucket)
if bucket_region:
info += " in region: {}".format(bucket_region)
log(info)
if bucket_policy:
s3.put_bucket_policy(
Bucket=bucket, Policy=json.dumps(bucket_policy)
)
log("Attached bucket policy allowing public access")
# At this point the buckets definitely exist - create the inline policy for assume_role()
assume_role_policy = {}
bucket_access_policy = {}
if policy:
assume_role_policy = json.loads(policy.replace("$!BUCKET_NAME!$", bucket))
else:
statements = []
if permission == "read-write":
for bucket in buckets:
statements.extend(policies.read_write_statements(bucket, prefix))
elif permission == "read-only":
for bucket in buckets:
statements.extend(policies.read_only_statements(bucket, prefix))
elif permission == "write-only":
for bucket in buckets:
statements.extend(policies.write_only_statements(bucket, prefix))
else:
assert False, "Unknown permission: {}".format(permission)
assume_role_policy = policies.wrap_policy(statements)
if duration:
# We're going to use sts.assume_role() rather than creating a user
if dry_run:
click.echo("Would ensure role: 's3-credentials.AmazonS3FullAccess'")
click.echo(
"Would assume role using following policy for {} seconds:".format(
duration
)
)
click.echo(json.dumps(assume_role_policy, indent=4))
else:
s3_role_arn = ensure_s3_role_exists(iam, sts)
log("Assume role against {} for {}s".format(s3_role_arn, duration))
credentials_response = sts.assume_role(
RoleArn=s3_role_arn,
RoleSessionName="s3.{permission}.{buckets}".format(
permission="custom" if policy else permission,
buckets=",".join(buckets),
),
Policy=json.dumps(assume_role_policy),
DurationSeconds=duration,
)
if format_ == "ini":
click.echo(
(
"[default]\naws_access_key_id={}\n"
"aws_secret_access_key={}\naws_session_token={}"
).format(
credentials_response["Credentials"]["AccessKeyId"],
credentials_response["Credentials"]["SecretAccessKey"],
credentials_response["Credentials"]["SessionToken"],
)
)
else:
click.echo(
json.dumps(
credentials_response["Credentials"], indent=4, default=str
)
)
return
# No duration, so wo create a new user so we can issue non-expiring credentials
if not username:
# Default username is "s3.read-write.bucket1,bucket2"
username = "s3.{permission}.{buckets}".format(
permission="custom" if policy else permission, buckets=",".join(buckets)
)
if dry_run or (not user_exists(iam, username)):
kwargs = {"UserName": username}
if user_permissions_boundary != "none":
# This is a user-account level limitation, it does not grant
# permissions on its own but is a useful extra level of defense
# https://github.com/simonw/s3-credentials/issues/1#issuecomment-958201717
if not user_permissions_boundary:
# Pick one based on --read-only/--write-only
if read_only:
user_permissions_boundary = (
"arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess"
)
else:
# Need full access in order to be able to write
user_permissions_boundary = (
"arn:aws:iam::aws:policy/AmazonS3FullAccess"
)
kwargs["PermissionsBoundary"] = user_permissions_boundary
info = " user: '{}'".format(username)
if user_permissions_boundary != "none":
info += " with permissions boundary: '{}'".format(user_permissions_boundary)
if dry_run:
click.echo("Would create{}".format(info))
else:
iam.create_user(**kwargs)
log("Created {}".format(info))
# Add inline policies to the user so they can access the buckets
user_policy = {}
for bucket in buckets:
policy_name = "s3.{permission}.{bucket}".format(
permission="custom" if policy else permission,
bucket=bucket,
)
if policy:
user_policy = json.loads(policy.replace("$!BUCKET_NAME!$", bucket))
else:
if permission == "read-write":
user_policy = policies.read_write(bucket, prefix)
elif permission == "read-only":
user_policy = policies.read_only(bucket, prefix)
elif permission == "write-only":
user_policy = policies.write_only(bucket, prefix)
else:
assert False, "Unknown permission: {}".format(permission)
if dry_run:
click.echo(
"Would attach policy called '{}' to user '{}', details:\n{}".format(
policy_name,
username,
json.dumps(user_policy, indent=4),
)
)
else:
iam.put_user_policy(
PolicyDocument=json.dumps(user_policy),
PolicyName=policy_name,
UserName=username,
)
log("Attached policy {} to user {}".format(policy_name, username))
# Retrieve and print out the credentials
if dry_run:
click.echo("Would call create access key for user '{}'".format(username))
else:
response = iam.create_access_key(
UserName=username,
)
log("Created access key for user: {}".format(username))
if format_ == "ini":
click.echo(
("[default]\naws_access_key_id={}\n" "aws_secret_access_key={}").format(
response["AccessKey"]["AccessKeyId"],
response["AccessKey"]["SecretAccessKey"],
)
)
elif format_ == "json":
click.echo(json.dumps(response["AccessKey"], indent=4, default=str))
@cli.command()
@common_boto3_options
def whoami(**boto_options):
"Identify currently authenticated user"
sts = make_client("sts", **boto_options)
identity = sts.get_caller_identity()
identity.pop("ResponseMetadata")
click.echo(json.dumps(identity, indent=4, default=str))
@cli.command()
@common_output_options
@common_boto3_options
def list_users(nl, csv, tsv, **boto_options):
"""
List all users for this account
s3-credentials list-users
Add --csv or --csv for CSV or TSV format:
s3-credentials list-users --csv
"""
iam = make_client("iam", **boto_options)
output(
paginate(iam, "list_users", "Users"),
(
"UserName",
"UserId",
"Arn",
"Path",
"CreateDate",
"PasswordLast<PASSWORD>",
"PermissionsBoundary",
"Tags",
),
nl,
csv,
tsv,
)
@cli.command()
@click.argument("role_names", nargs=-1)
@click.option("--details", help="Include attached policies (slower)", is_flag=True)
@common_output_options
@common_boto3_options
def list_roles(role_names, details, nl, csv, tsv, **boto_options):
"""
List roles
To list all roles for this AWS account:
s3-credentials list-roles
Add --csv or --csv for CSV or TSV format:
s3-credentials list-roles --csv
For extra details per role (much slower) add --details
s3-credentials list-roles --details
"""
iam = make_client("iam", **boto_options)
headers = (
"Path",
"RoleName",
"RoleId",
"Arn",
"CreateDate",
"AssumeRolePolicyDocument",
"Description",
"MaxSessionDuration",
"PermissionsBoundary",
"Tags",
"RoleLastUsed",
)
if details:
headers += ("inline_policies", "attached_policies")
def iterate():
for role in paginate(iam, "list_roles", "Roles"):
if role_names and role["RoleName"] not in role_names:
continue
if details:
role_name = role["RoleName"]
role["inline_policies"] = []
# Get inline policy names, then policy for each one
for policy_name in paginate(
iam, "list_role_policies", "PolicyNames", RoleName=role_name
):
role_policy_response = iam.get_role_policy(
RoleName=role_name,
PolicyName=policy_name,
)
role_policy_response.pop("ResponseMetadata", None)
role["inline_policies"].append(role_policy_response)
# Get attached managed policies
role["attached_policies"] = []
for attached in paginate(
iam,
"list_attached_role_policies",
"AttachedPolicies",
RoleName=role_name,
):
policy_arn = attached["PolicyArn"]
attached_policy_response = iam.get_policy(
PolicyArn=policy_arn,
)
policy_details = attached_policy_response["Policy"]
# Also need to fetch the policy JSON
version_id = policy_details["DefaultVersionId"]
policy_version_response = iam.get_policy_version(
PolicyArn=policy_arn,
VersionId=version_id,
)
policy_details["PolicyVersion"] = policy_version_response[
"PolicyVersion"
]
role["attached_policies"].append(policy_details)
yield role
output(iterate(), headers, nl, csv, tsv)
@cli.command()
@click.argument("usernames", nargs=-1)
@common_boto3_options
def list_user_policies(usernames, **boto_options):
"""
List inline policies for specified users
s3-credentials list-user-policies username
Returns policies for all users if no usernames are provided.
"""
iam = make_client("iam", **boto_options)
if not usernames:
usernames = [user["UserName"] for user in paginate(iam, "list_users", "Users")]
for username in usernames:
click.echo("User: {}".format(username))
for policy_name in paginate(
iam, "list_user_policies", "PolicyNames", UserName=username
):
click.echo("PolicyName: {}".format(policy_name))
policy_response = iam.get_user_policy(
UserName=username, PolicyName=policy_name
)
click.echo(
json.dumps(policy_response["PolicyDocument"], indent=4, default=str)
)
@cli.command()
@click.argument("buckets", nargs=-1)
@click.option("--details", help="Include extra bucket details (slower)", is_flag=True)
@common_output_options
@common_boto3_options
def list_buckets(buckets, details, nl, csv, tsv, **boto_options):
"""
List buckets
To list all buckets and their creation time as JSON:
s3-credentials list-buckets
Add --csv or --csv for CSV or TSV format:
s3-credentials list-buckets --csv
For extra details per bucket (much slower) add --details
s3-credentials list-buckets --details
"""
s3 = make_client("s3", **boto_options)
headers = ["Name", "CreationDate"]
if details:
headers += ["bucket_acl", "public_access_block", "bucket_website"]
def iterator():
for bucket in s3.list_buckets()["Buckets"]:
if buckets and (bucket["Name"] not in buckets):
continue
if details:
bucket_acl = dict(
(key, value)
for key, value in s3.get_bucket_acl(
Bucket=bucket["Name"],
).items()
if key != "ResponseMetadata"
)
try:
pab = s3.get_public_access_block(
Bucket=bucket["Name"],
)["PublicAccessBlockConfiguration"]
except s3.exceptions.ClientError:
pab = None
try:
bucket_website = dict(
(key, value)
for key, value in s3.get_bucket_website(
Bucket=bucket["Name"],
).items()
if key != "ResponseMetadata"
)
except s3.exceptions.ClientError:
bucket_website = None
bucket["bucket_acl"] = bucket_acl
bucket["public_access_block"] = pab
bucket["bucket_website"] = bucket_website
yield bucket
output(iterator(), headers, nl, csv, tsv)
@cli.command()
@click.argument("usernames", nargs=-1, required=True)
@common_boto3_options
def delete_user(usernames, **boto_options):
"""
Delete specified users, their access keys and their inline policies
s3-credentials delete-user username1 username2
"""
iam = make_client("iam", **boto_options)
for username in usernames:
click.echo("User: {}".format(username))
# Fetch and delete their policies
policy_names_to_delete = list(
paginate(iam, "list_user_policies", "PolicyNames", UserName=username)
)
for policy_name in policy_names_to_delete:
iam.delete_user_policy(
UserName=username,
PolicyName=policy_name,
)
click.echo(" Deleted policy: {}".format(policy_name))
# Fetch and delete their access keys
access_key_ids_to_delete = [
access_key["AccessKeyId"]
for access_key in paginate(
iam, "list_access_keys", "AccessKeyMetadata", UserName=username
)
]
for access_key_id in access_key_ids_to_delete:
iam.delete_access_key(
UserName=username,
AccessKeyId=access_key_id,
)
click.echo(" Deleted access key: {}".format(access_key_id))
iam.delete_user(UserName=username)
click.echo(" Deleted user")
def make_client(service, access_key, secret_key, session_token, endpoint_url, auth):
if auth:
if access_key or secret_key or session_token:
raise click.ClickException(
"--auth cannot be used with --access-key, --secret-key or --session-token"
)
auth_content = auth.read().strip()
if auth_content.startswith("{"):
# Treat as JSON
decoded = json.loads(auth_content)
access_key = decoded.get("AccessKeyId")
secret_key = decoded.get("SecretAccessKey")
session_token = decoded.get("SessionToken")
else:
# Treat as INI
config = configparser.ConfigParser()
config.read_string(auth_content)
# Use the first section that has an aws_access_key_id
for section in config.sections():
if "aws_access_key_id" in config[section]:
access_key = config[section].get("aws_access_key_id")
secret_key = config[section].get("aws_secret_access_key")
session_token = config[section].get("aws_session_token")
break
kwargs = {}
if access_key:
kwargs["aws_access_key_id"] = access_key
if secret_key:
kwargs["aws_secret_access_key"] = secret_key
if session_token:
kwargs["aws_session_token"] = session_token
if endpoint_url:
kwargs["endpoint_url"] = endpoint_url
return boto3.client(service, **kwargs)
def ensure_s3_role_exists(iam, sts):
"Create s3-credentials.AmazonS3FullAccess role if not exists, return ARN"
role_name = "s3-credentials.AmazonS3FullAccess"
account_id = sts.get_caller_identity()["Account"]
try:
role = iam.get_role(RoleName=role_name)
return role["Role"]["Arn"]
except iam.exceptions.NoSuchEntityException:
create_role_response = iam.create_role(
Description=(
"Role used by the s3-credentials tool to create time-limited "
"credentials that are restricted to specific buckets"
),
RoleName=role_name,
AssumeRolePolicyDocument=json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::{}:root".format(account_id)
},
"Action": "sts:AssumeRole",
}
],
}
),
)
# Attach AmazonS3FullAccess to it - note that even though we use full access
# on the role itself any time we call sts.assume_role() we attach an additional
# policy to ensure reduced access for the temporary credentials
iam.attach_role_policy(
RoleName="s3-credentials.AmazonS3FullAccess",
PolicyArn="arn:aws:iam::aws:policy/AmazonS3FullAccess",
)
return create_role_response["Role"]["Arn"]
@cli.command()
@click.argument("bucket")
@click.option("--prefix", help="List keys starting with this prefix")
@common_output_options
@common_boto3_options
def list_bucket(bucket, prefix, nl, csv, tsv, **boto_options):
"""
List contents of bucket
To list the contents of a bucket as JSON:
s3-credentials list-bucket my-bucket
Add --csv or --csv for CSV or TSV format:
s3-credentials list-bucket my-bucket --csv
"""
s3 = make_client("s3", **boto_options)
kwargs = {"Bucket": bucket}
if prefix:
kwargs["Prefix"] = prefix
try:
output(
paginate(s3, "list_objects_v2", "Contents", **kwargs),
("Key", "LastModified", "ETag", "Size", "StorageClass", "Owner"),
nl,
csv,
tsv,
)
except botocore.exceptions.ClientError as e:
raise click.ClickException(e)
@cli.command()
@click.argument("bucket")
@click.argument("key")
@click.argument(
"path",
type=click.Path(
exists=True, file_okay=True, dir_okay=False, readable=True, allow_dash=True
),
)
@click.option(
"--content-type",
help="Content-Type to use (default is auto-detected based on file extension)",
)
@click.option("silent", "-s", "--silent", is_flag=True, help="Don't show progress bar")
@common_boto3_options
def put_object(bucket, key, path, content_type, silent, **boto_options):
"""
Upload an object to an S3 bucket
To upload a file to /my-key.txt in the my-bucket bucket:
s3-credentials put-object my-bucket my-key.txt /path/to/file.txt
Use - to upload content from standard input:
echo "Hello" | s3-credentials put-object my-bucket hello.txt -
"""
s3 = make_client("s3", **boto_options)
size = None
extra_args = {}
if path == "-":
# boto needs to be able to seek
fp = io.BytesIO(sys.stdin.buffer.read())
if not silent:
size = fp.getbuffer().nbytes
else:
if not content_type:
content_type = mimetypes.guess_type(path)[0]
fp = click.open_file(path, "rb")
if not silent:
size = os.path.getsize(path)
if content_type is not None:
extra_args["ContentType"] = content_type
if not silent:
# Show progress bar
with click.progressbar(length=size, label="Uploading") as bar:
s3.upload_fileobj(
fp, bucket, key, Callback=bar.update, ExtraArgs=extra_args
)
else:
s3.upload_fileobj(fp, bucket, key, ExtraArgs=extra_args)
@cli.command()
@click.argument("bucket")
@click.argument("key")
@click.option(
"output",
"-o",
"--output",
type=click.Path(file_okay=True, dir_okay=False, writable=True, allow_dash=False),
help="Write to this file instead of stdout",
)
@common_boto3_options
def get_object(bucket, key, output, **boto_options):
"""
Download an object from an S3 bucket
To see the contents of the bucket on standard output:
s3-credentials get-object my-bucket hello.txt
To save to a file:
s3-credentials get-object my-bucket hello.txt -o hello.txt
"""
s3 = make_client("s3", **boto_options)
if not output:
fp = sys.stdout.buffer
else:
fp = click.open_file(output, "wb")
s3.download_fileobj(bucket, key, fp)
@cli.command()
@click.argument("bucket")
@click.option(
"allowed_methods",
"-m",
"--allowed-method",
multiple=True,
help="Allowed method e.g. GET",
)
@click.option(
"allowed_headers",
"-h",
"--allowed-header",
multiple=True,
help="Allowed header e.g. Authorization",
)
@click.option(
"allowed_origins",
"-o",
"--allowed-origin",
multiple=True,
help="Allowed origin e.g. https://www.example.com/",
)
@click.option(
"expose_headers",
"-e",
"--expose-header",
multiple=True,
help="Header to expose e.g. ETag",
)
@click.option(
"max_age_seconds",
"--max-age-seconds",
type=int,
help="How long to cache preflight requests",
)
@common_boto3_options
def set_cors_policy(
bucket,
allowed_methods,
allowed_headers,
allowed_origins,
expose_headers,
max_age_seconds,
**boto_options
):
"""
Set CORS policy for a bucket
To allow GET requests from any origin:
s3-credentials set-cors-policy my-bucket
To allow GET and PUT from a specific origin and expose ETag headers:
\b
s3-credentials set-cors-policy my-bucket \\
--allowed-method GET \\
--allowed-method PUT \\
--allowed-origin https://www.example.com/ \\
--expose-header ETag
"""
s3 = make_client("s3", **boto_options)
if not bucket_exists(s3, bucket):
raise click.ClickException("Bucket {} does not exists".format(bucket))
cors_rule = {
"ID": "set-by-s3-credentials",
"AllowedOrigins": allowed_origins or ["*"],
"AllowedHeaders": allowed_headers,
"AllowedMethods": allowed_methods or ["GET"],
"ExposeHeaders": expose_headers,
}
if max_age_seconds:
cors_rule["MaxAgeSeconds"] = max_age_seconds
try:
s3.put_bucket_cors(Bucket=bucket, CORSConfiguration={"CORSRules": [cors_rule]})
except botocore.exceptions.ClientError as e:
raise click.ClickException(e)
@cli.command()
@click.argument("bucket")
@common_boto3_options
def get_cors_policy(bucket, **boto_options):
"""
Get CORS policy for a bucket
s3-credentials get-cors-policy my-bucket
Returns the CORS policy for this bucket, if set, as JSON
"""
s3 = make_client("s3", **boto_options)
try:
response = s3.get_bucket_cors(Bucket=bucket)
except botocore.exceptions.ClientError as e:
raise click.ClickException(e)
click.echo(json.dumps(response["CORSRules"], indent=4, default=str))
def output(iterator, headers, nl, csv, tsv):
if nl:
for item in iterator:
click.echo(json.dumps(item, default=str))
elif csv or tsv:
writer = DictWriter(
sys.stdout, headers, dialect="excel-tab" if tsv else "excel"
)
writer.writeheader()
writer.writerows(fix_json(row) for row in iterator)
else:
for line in stream_indented_json(iterator):
click.echo(line)
def stream_indented_json(iterator, indent=2):
# We have to iterate two-at-a-time so we can know if we
# should output a trailing comma or if we have reached
# the last item.
current_iter, next_iter = itertools.tee(iterator, 2)
next(next_iter, None)
first = True
for item, next_item in itertools.zip_longest(current_iter, next_iter):
is_last = next_item is None
data = item
line = "{first}{serialized}{separator}{last}".format(
first="[\n" if first else "",
serialized=textwrap.indent(
json.dumps(data, indent=indent, default=str), " " * indent
),
separator="," if not is_last else "",
last="\n]" if is_last else "",
)
yield line
first = False
if first:
# We didn't output anything, so yield the empty list
yield "[]"
def paginate(service, method, list_key, **kwargs):
paginator = service.get_paginator(method)
for response in paginator.paginate(**kwargs):
yield from response[list_key]
def fix_json(row):
# If a key value is list or dict, json encode it
return dict(
[
(
key,
json.dumps(value, indent=2, default=str)
if isinstance(value, (dict, list, tuple))
else value,
)
for key, value in row.items()
]
)
|
test/programytest/extensions/test_base.py
|
cdoebler1/AIML2
| 345 |
87615
|
import unittest
import unittest.mock
from programy.extensions.base import Extension
class MockExtension(Extension):
def execute(self,context, data):
raise NotImplementedError()
class ExtensionTests(unittest.TestCase):
def test_ensure_not_implemented(self):
bot = unittest.mock.Mock()
extension = MockExtension()
self.assertIsNotNone(extension)
with self.assertRaises(Exception):
extension.execute(bot, "testid", "Some Data")
|
docs/examples/use_cases/video_superres/tools/split_scenes.py
|
cyyever/DALI
| 3,967 |
87641
|
<gh_stars>1000+
import argparse
import os
import subprocess
def split_scenes(raw_data_path, out_data_path):
out_data_path = os.path.join(out_data_path,'orig','scenes')
if not os.path.isdir(os.path.join(out_data_path,'train')):
os.makedirs(os.path.join(out_data_path,'train'))
if not os.path.isdir(os.path.join(out_data_path,'val')):
os.makedirs(os.path.join(out_data_path,'val'))
start = "00:00:00.0"
with open("./data/timestamps") as f:
for i, line in enumerate(f.readlines()):
m, s = divmod(float(line), 60)
h, m = divmod(m, 60)
end = "%02d:%02d:%02d" %(h, m, s)
if i < 53:
subset = 'train'
else:
subset = 'val'
filepath = os.path.join(out_data_path, subset)
filename = os.path.join(filepath, 'scene_' + str(i) + '.mp4')
cmd = ["ffmpeg", "-i", raw_data_path, "-ss", start, "-to", end,
"-c:v", "copy", "-an", filename]
print("Running: ", ' '.join(cmd))
subprocess.run(cmd)
start = end
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--raw_data', type=str, default=None)
parser.add_argument('--out_data', type=str, default=None)
args = parser.parse_args()
assert args.raw_data is not None, 'Provide --raw_data path to Myanmar 4K mp4'
assert args.out_data is not None, 'Provide --raw_data path to Myanmar 4K mp4'
split_scenes(args.raw_data, args.out_data)
|
uq360/utils/transformers/one_class_svm.py
|
Sclare87/UQ360
| 148 |
87648
|
from sklearn.svm import OneClassSVM
from uq360.utils.transformers.feature_transformer import FeatureTransformer
class OneClassSVMTransformer(FeatureTransformer):
"""One-class SVM outlier-classifier based derived feature.
This transformer fits an SVM decision boundary enclosing the
full training set. This is then the decision boundary to identify
outliers in production data at inference time. """
def __init__(self):
super(OneClassSVMTransformer, self).__init__()
self.one_class_classifier = OneClassSVM(nu=0.1, kernel="rbf", gamma='auto')
self.fit_status = False
@classmethod
def name(cls):
return ('one_class_svm')
def fit(self, x, y):
self.one_class_classifier.fit(x)
self.fit_status = True
def transform(self, x, predictions):
return self.one_class_classifier.decision_function(x)
def save(self, output_location=None):
self.register_pkl_object(self.one_class_classifier, 'one_class_classifier')
super(OneClassSVMTransformer, self)._save(output_location)
def load(self, input_location=None):
self._load(input_location)
self.one_class_classifier = self.pkl_registry[0][0]
assert type(self.one_class_classifier) == OneClassSVM
self.fit_status = True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.