max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
llvm/utils/lit/tests/shtest-format.py
|
medismailben/llvm-project
| 158 |
65269
|
<gh_stars>100-1000
# Check the various features of the ShTest format.
#
# RUN: rm -f %t.xml
# RUN: not %{lit} -j 1 -v %{inputs}/shtest-format --xunit-xml-output %t.xml > %t.out
# RUN: FileCheck < %t.out %s
# RUN: FileCheck --check-prefix=XUNIT < %t.xml %s
# END.
# CHECK: -- Testing:
# CHECK: PASS: shtest-format :: argv0.txt
# CHECK: FAIL: shtest-format :: external_shell/fail.txt
# CHECK-NEXT: *** TEST 'shtest-format :: external_shell/fail.txt' FAILED ***
# CHECK: Command Output (stdout):
# CHECK-NEXT: --
# CHECK-NEXT: line 1: failed test output on stdout
# CHECK-NEXT: line 2: failed test output on stdout
# CHECK: Command Output (stderr):
# CHECK-NEXT: --
# CHECK-NEXT: cat{{(\.exe)?}}: {{cannot open does-not-exist|does-not-exist: No such file or directory}}
# CHECK: --
# CHECK: FAIL: shtest-format :: external_shell/fail_with_bad_encoding.txt
# CHECK-NEXT: *** TEST 'shtest-format :: external_shell/fail_with_bad_encoding.txt' FAILED ***
# CHECK: Command Output (stdout):
# CHECK-NEXT: --
# CHECK-NEXT: a line with bad encoding:
# CHECK: --
# CHECK: PASS: shtest-format :: external_shell/pass.txt
# CHECK: FAIL: shtest-format :: fail.txt
# CHECK-NEXT: *** TEST 'shtest-format :: fail.txt' FAILED ***
# CHECK-NEXT: Script:
# CHECK-NEXT: --
# CHECK-NEXT: printf "line 1
# CHECK-NEXT: false
# CHECK-NEXT: --
# CHECK-NEXT: Exit Code: 1
#
# CHECK: Command Output (stdout):
# CHECK-NEXT: --
# CHECK-NEXT: $ ":" "RUN: at line 1"
# CHECK-NEXT: $ "printf"
# CHECK-NEXT: # command output:
# CHECK-NEXT: line 1: failed test output on stdout
# CHECK-NEXT: line 2: failed test output on stdout
# CHECK: UNRESOLVED: shtest-format :: no-test-line.txt
# CHECK: PASS: shtest-format :: pass.txt
# CHECK: UNSUPPORTED: shtest-format :: requires-missing.txt
# CHECK: PASS: shtest-format :: requires-present.txt
# CHECK: UNRESOLVED: shtest-format :: requires-star.txt
# CHECK: UNSUPPORTED: shtest-format :: requires-triple.txt
# CHECK: PASS: shtest-format :: unsupported-expr-false.txt
# CHECK: UNSUPPORTED: shtest-format :: unsupported-expr-true.txt
# CHECK: UNRESOLVED: shtest-format :: unsupported-star.txt
# CHECK: UNSUPPORTED: shtest-format :: unsupported_dir/some-test.txt
# CHECK: PASS: shtest-format :: xfail-expr-false.txt
# CHECK: XFAIL: shtest-format :: xfail-expr-true.txt
# CHECK: XFAIL: shtest-format :: xfail-feature.txt
# CHECK: XFAIL: shtest-format :: xfail-target.txt
# CHECK: XFAIL: shtest-format :: xfail.txt
# CHECK: XPASS: shtest-format :: xpass.txt
# CHECK-NEXT: *** TEST 'shtest-format :: xpass.txt' FAILED ***
# CHECK-NEXT: Script
# CHECK-NEXT: --
# CHECK-NEXT: true
# CHECK-NEXT: --
# CHECK: Testing Time
# CHECK: Unexpected Passing Tests (1)
# CHECK: shtest-format :: xpass.txt
# CHECK: Failing Tests (3)
# CHECK: shtest-format :: external_shell/fail.txt
# CHECK: shtest-format :: external_shell/fail_with_bad_encoding.txt
# CHECK: shtest-format :: fail.txt
# CHECK: Expected Passes : 7
# CHECK: Expected Failures : 4
# CHECK: Unsupported Tests : 4
# CHECK: Unresolved Tests : 3
# CHECK: Unexpected Passes : 1
# CHECK: Unexpected Failures: 3
# XUNIT: <?xml version="1.0" encoding="UTF-8" ?>
# XUNIT-NEXT: <testsuites>
# XUNIT-NEXT: <testsuite name="shtest-format" tests="22" failures="7" skipped="4">
# XUNIT: <testcase classname="shtest-format.shtest-format" name="argv0.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.external_shell" name="fail.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT: <failure{{[ ]*}}>
# XUNIT: </failure>
# XUNIT-NEXT: </testcase>
# XUNIT: <testcase classname="shtest-format.external_shell" name="fail_with_bad_encoding.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT: <failure{{[ ]*}}>
# XUNIT: </failure>
# XUNIT-NEXT: </testcase>
# XUNIT: <testcase classname="shtest-format.external_shell" name="pass.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="fail.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT: <failure{{[ ]*}}>
# XUNIT: </failure>
# XUNIT-NEXT: </testcase>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="no-test-line.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT: <failure{{[ ]*}}>
# XUNIT: </failure>
# XUNIT-NEXT: </testcase>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="pass.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="requires-missing.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT:<skipped message="Skipping because of: a-missing-feature" />
# XUNIT: <testcase classname="shtest-format.shtest-format" name="requires-present.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="requires-star.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT: <failure{{[ ]*}}>
# XUNIT: </failure>
# XUNIT-NEXT: </testcase>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="requires-triple.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT:<skipped message="Skipping because of: x86_64" />
# XUNIT: <testcase classname="shtest-format.shtest-format" name="unsupported-expr-false.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="unsupported-expr-true.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT:<skipped message="Skipping because of configuration." />
# XUNIT: <testcase classname="shtest-format.shtest-format" name="unsupported-star.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT: <failure{{[ ]*}}>
# XUNIT: </failure>
# XUNIT-NEXT: </testcase>
# XUNIT: <testcase classname="shtest-format.unsupported_dir" name="some-test.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT:<skipped message="Skipping because of configuration." />
# XUNIT: <testcase classname="shtest-format.shtest-format" name="xfail-expr-false.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="xfail-expr-true.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="xfail-feature.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="xfail-target.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="xfail.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="xpass.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT: <failure{{[ ]*}}>
# XUNIT: </failure>
# XUNIT-NEXT: </testcase>
# XUNIT: </testsuite>
# XUNIT-NEXT: </testsuites>
|
.jenkins/pytorch/print_sccache_log.py
|
Hacky-DH/pytorch
| 60,067 |
65303
|
import sys
log_file_path = sys.argv[1]
with open(log_file_path) as f:
lines = f.readlines()
for line in lines:
# Ignore errors from CPU instruction set, symbol existing testing,
# or compilation error formatting
ignored_keywords = [
'src.c',
'CheckSymbolExists.c',
'test_compilation_error_formatting',
]
if all([keyword not in line for keyword in ignored_keywords]):
print(line)
|
tests/fixers/test_commit_strategy.py
|
jsoref/lint-review
| 271 |
65306
|
<reponame>jsoref/lint-review<filename>tests/fixers/test_commit_strategy.py
from unittest import TestCase
from lintreview.fixers.commit_strategy import CommitStrategy
from lintreview.fixers.error import WorkflowError
from mock import patch, Mock, sentinel
from ..test_git import setup_repo, teardown_repo, clone_path
class TestCommitStrategy(TestCase):
def setUp(self):
setup_repo()
def tearDown(self):
teardown_repo()
def test_init_key_requirements(self):
keys = ('repo_path', 'author_email', 'author_name',
'pull_request')
values = ('some/path', 'lintbot', '<EMAIL>',
'pull#1')
for key in keys:
context = dict(zip(keys, values))
del context[key]
self.assertRaises(KeyError,
CommitStrategy,
context)
@patch('lintreview.git.commit')
@patch('lintreview.git.push')
@patch('lintreview.git.apply_cached')
def test_execute__push_error(self, mock_apply, mock_push, mock_commit):
mock_push.side_effect = IOError(
'! [remote rejected] stylefixes -> add_date_to_obs '
'(permission denied)\nerror: failed to push some refs to')
mock_pull = Mock(
head_branch='patch-1',
from_private_fork=False,
maintainer_can_modify=True)
context = {
'repo_path': clone_path,
'author_name': 'lintbot',
'author_email': '<EMAIL>',
'pull_request': mock_pull
}
strategy = CommitStrategy(context)
diff = Mock()
diff.as_diff.return_value = sentinel.diff
self.assertRaises(WorkflowError,
strategy.execute,
[diff])
@patch('lintreview.git.commit')
@patch('lintreview.git.push')
@patch('lintreview.git.apply_cached')
def test_execute__git_flow(self, mock_apply, mock_push, mock_commit):
mock_pull = Mock(
head_branch='patch-1',
from_private_fork=False,
maintainer_can_modify=True)
context = {
'repo_path': clone_path,
'author_name': 'lintbot',
'author_email': '<EMAIL>',
'pull_request': mock_pull
}
strategy = CommitStrategy(context)
diff = Mock()
diff.as_diff.return_value = sentinel.diff
out = strategy.execute([diff])
self.assertIsNone(out)
mock_commit.assert_called_with(
clone_path,
'lintbot <<EMAIL>>',
'Fixing style errors.')
mock_push.assert_called_with(
clone_path,
'origin',
'stylefixes:patch-1')
mock_apply.assert_called_with(
clone_path,
sentinel.diff)
@patch('lintreview.git.commit')
def test_execute__no_maintainer_modify(self, mock_commit):
mock_pull = Mock(
head_branch='patch-1',
maintainer_can_modify=False,
from_private_fork=False)
context = {
'repo_path': clone_path,
'author_name': 'lintbot',
'author_email': '<EMAIL>',
'pull_request': mock_pull
}
strategy = CommitStrategy(context)
diff = Mock()
diff.as_diff.return_value = sentinel.diff
with self.assertRaises(WorkflowError) as err:
strategy.execute([diff])
self.assertIn('Cannot apply automatic fixing', str(err.exception))
self.assertIn('modified by maintainers', str(err.exception))
self.assertEqual(0, mock_commit.call_count)
@patch('lintreview.git.commit')
def test_execute__private_fork(self, mock_commit):
mock_pull = Mock(
head_branch='patch-1',
maintainer_can_modify=True,
from_private_fork=True)
context = {
'repo_path': clone_path,
'author_name': 'lintbot',
'author_email': '<EMAIL>',
'pull_request': mock_pull
}
strategy = CommitStrategy(context)
diff = Mock()
diff.as_diff.return_value = sentinel.diff
with self.assertRaises(WorkflowError) as err:
strategy.execute([diff])
self.assertIn('Cannot apply automatic fixing', str(err.exception))
self.assertIn('private fork', str(err.exception))
self.assertEqual(0, mock_commit.call_count)
|
acouchbase/tests/py34only.py
|
couchbase/couchbase-python-client
| 189 |
65314
|
import asyncio
from couchbase.asynchronous import AsyncSearchResult
from couchbase.asynchronous import AsyncAnalyticsResult
from .fixtures import asynct, AioTestCase
from couchbase.exceptions import CouchbaseException, SearchException, NotSupportedException
from unittest import SkipTest
import couchbase.search as SEARCH
class CouchbaseBeerTest(AioTestCase):
def setUp(self, **kwargs):
try:
return super(CouchbaseBeerTest, self).setUp(
bucket='beer-sample', **kwargs)
except CouchbaseException:
raise SkipTest("Need 'beer-sample' bucket for this")
class CouchbaseBeerKVTest(CouchbaseBeerTest):
def setUp(self):
super(CouchbaseBeerKVTest, self).setUp()
@asynct
@asyncio.coroutine
def test_get_data(self):
connargs = self.make_connargs(bucket='beer-sample')
beer_default_collection = self.gen_collection(**connargs)
yield from (beer_default_collection.on_connect() or asyncio.sleep(0.01))
data = yield from beer_default_collection.get('21st_amendment_brewery_cafe')
self.assertEqual("21st Amendment Brewery Cafe", data.content["name"])
class CouchbaseBeerViewTest(CouchbaseBeerTest):
def setUp(self):
super(CouchbaseBeerViewTest, self).setUp(type='Bucket')
@asynct
@asyncio.coroutine
def test_query(self):
beer_bucket = self.gen_cluster(
**self.make_connargs()).bucket('beer-sample')
yield from (beer_bucket.on_connect() or asyncio.sleep(0.01))
viewiter = beer_bucket.view_query("beer", "brewery_beers", limit=10)
yield from viewiter.future
count = len(list(viewiter))
self.assertEqual(count, 10)
class CouchbaseDefaultTestKV(AioTestCase):
@asynct
@asyncio.coroutine
def test_upsert(self):
import uuid
expected = str(uuid.uuid4())
default_collection = self.gen_collection(**self.make_connargs())
yield from (default_collection.on_connect() or asyncio.sleep(0.01))
yield from default_collection.upsert('hello', {"key": expected})
obtained = yield from default_collection.get('hello')
self.assertEqual({"key": expected}, obtained.content)
class AIOClusterTest(AioTestCase):
def setUp(self, **kwargs):
super(AIOClusterTest, self).setUp(**kwargs)
@asynct
@asyncio.coroutine
def test_n1ql(self):
cluster = self.gen_cluster(**self.make_connargs())
yield from (cluster.on_connect() or asyncio.sleep(0.01))
it = cluster.query(self.query_props.statement)
yield from it.future
data = list(it)
self.assertEqual(self.query_props.rowcount, len(data))
@asynct
@asyncio.coroutine
def test_search(self # type: Base
):
cluster = self.gen_cluster(**self.make_connargs())
yield from (cluster.on_connect() or asyncio.sleep(0.01))
try:
it = cluster.search_query("beer-search", SEARCH.TermQuery("category"),
facets={'fred': SEARCH.TermFacet('category', 10)})
yield from it.future
data = list(it)
self.assertIsInstance(it, AsyncSearchResult)
self.assertEqual(10, len(data))
except SearchException as e:
if isinstance(e.inner_cause,
NotSupportedException) and self.is_mock:
raise SkipTest("Not supported")
class AnalyticsTest(AioTestCase):
def testBatchedAnalytics(self # type: Base
):
cluster = self.gen_cluster(**self.make_connargs())
yield from (cluster.on_connect() or asyncio.sleep(0.01))
it = cluster.analytics_query(
"SELECT * FROM `{}` LIMIT 1".format(self.dataset_name))
yield from it.future
self.assertIsInstance(it, AsyncAnalyticsResult)
self.assertEqual(1, len(it.rows()))
|
baseline_tokenization/javalang/test/test_java_8_syntax.py
|
greenmonn/code2seq
| 2,151 |
65365
|
<filename>baseline_tokenization/javalang/test/test_java_8_syntax.py
import unittest
from pkg_resources import resource_string
from .. import parse, parser, tree
def setup_java_class(content_to_add):
""" returns an example java class with the
given content_to_add contained within a method.
"""
template = """
public class Lambda {
public static void main(String args[]) {
%s
}
}
"""
return template % content_to_add
def filter_type_in_method(clazz, the_type, method_name):
""" yields the result of filtering the given class for the given
type inside the given method identified by its name.
"""
for path, node in clazz.filter(the_type):
for p in reversed(path):
if isinstance(p, tree.MethodDeclaration):
if p.name == method_name:
yield path, node
class LambdaSupportTest(unittest.TestCase):
""" Contains tests for java 8 lambda syntax. """
def assert_contains_lambda_expression_in_m(
self, clazz, method_name='main'):
""" asserts that the given tree contains a method with the supplied
method name containing a lambda expression.
"""
matches = list(filter_type_in_method(
clazz, tree.LambdaExpression, method_name))
if not matches:
self.fail('No matching lambda expression found.')
return matches
def test_lambda_support_no_parameters_no_body(self):
""" tests support for lambda with no parameters and no body. """
self.assert_contains_lambda_expression_in_m(
parse.parse(setup_java_class("() -> {};")))
def test_lambda_support_no_parameters_expression_body(self):
""" tests support for lambda with no parameters and an
expression body.
"""
test_classes = [
setup_java_class("() -> 3;"),
setup_java_class("() -> null;"),
setup_java_class("() -> { return 21; };"),
setup_java_class("() -> { System.exit(1); };"),
]
for test_class in test_classes:
clazz = parse.parse(test_class)
self.assert_contains_lambda_expression_in_m(clazz)
def test_lambda_support_no_parameters_complex_expression(self):
""" tests support for lambda with no parameters and a
complex expression body.
"""
code = """
() -> {
if (true) return 21;
else
{
int result = 21;
return result / 2;
}
};"""
self.assert_contains_lambda_expression_in_m(
parse.parse(setup_java_class(code)))
def test_parameter_no_type_expression_body(self):
""" tests support for lambda with parameters with inferred types. """
test_classes = [
setup_java_class("(bar) -> bar + 1;"),
setup_java_class("bar -> bar + 1;"),
setup_java_class("x -> x.length();"),
setup_java_class("y -> { y.boom(); };"),
]
for test_class in test_classes:
clazz = parse.parse(test_class)
self.assert_contains_lambda_expression_in_m(clazz)
def test_parameter_with_type_expression_body(self):
""" tests support for lambda with parameters with formal types. """
test_classes = [
setup_java_class("(int foo) -> { return foo + 2; };"),
setup_java_class("(String s) -> s.length();"),
setup_java_class("(int foo) -> foo + 1;"),
setup_java_class("(Thread th) -> { th.start(); };"),
setup_java_class("(String foo, String bar) -> "
"foo + bar;"),
]
for test_class in test_classes:
clazz = parse.parse(test_class)
self.assert_contains_lambda_expression_in_m(clazz)
def test_parameters_with_no_type_expression_body(self):
""" tests support for multiple lambda parameters
that are specified without their types.
"""
self.assert_contains_lambda_expression_in_m(
parse.parse(setup_java_class("(x, y) -> x + y;")))
def test_parameters_with_mixed_inferred_and_declared_types(self):
""" this tests that lambda type specification mixing is considered
invalid as per the specifications.
"""
with self.assertRaises(parser.JavaSyntaxError):
parse.parse(setup_java_class("(x, int y) -> x+y;"))
def test_parameters_inferred_types_with_modifiers(self):
""" this tests that lambda inferred type parameters with modifiers are
considered invalid as per the specifications.
"""
with self.assertRaises(parser.JavaSyntaxError):
parse.parse(setup_java_class("(x, final y) -> x+y;"))
def test_invalid_parameters_are_invalid(self):
""" this tests that invalid lambda parameters are are
considered invalid as per the specifications.
"""
with self.assertRaises(parser.JavaSyntaxError):
parse.parse(setup_java_class("(a b c) -> {};"))
def test_cast_works(self):
""" this tests that a cast expression works as expected. """
parse.parse(setup_java_class("String x = (String) A.x() ;"))
class MethodReferenceSyntaxTest(unittest.TestCase):
""" Contains tests for java 8 method reference syntax. """
def assert_contains_method_reference_expression_in_m(
self, clazz, method_name='main'):
""" asserts that the given class contains a method with the supplied
method name containing a method reference.
"""
matches = list(filter_type_in_method(
clazz, tree.MethodReference, method_name))
if not matches:
self.fail('No matching method reference found.')
return matches
def test_method_reference(self):
""" tests that method references are supported. """
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("String::length;")))
def test_method_reference_to_the_new_method(self):
""" test support for method references to 'new'. """
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("String::new;")))
def test_method_reference_to_the_new_method_with_explict_type(self):
""" test support for method references to 'new' with an
explicit type.
"""
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("String::<String> new;")))
def test_method_reference_from_super(self):
""" test support for method references from 'super'. """
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("super::toString;")))
def test_method_reference_from_super_with_identifier(self):
""" test support for method references from Identifier.super. """
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("String.super::toString;")))
@unittest.expectedFailure
def test_method_reference_explicit_type_arguments_for_generic_type(self):
""" currently there is no support for method references
for an explicit type.
"""
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("List<String>::size;")))
def test_method_reference_explicit_type_arguments(self):
""" test support for method references with an explicit type.
"""
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("Arrays::<String> sort;")))
@unittest.expectedFailure
def test_method_reference_from_array_type(self):
""" currently there is no support for method references
from a primary type.
"""
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("int[]::new;")))
class InterfaceSupportTest(unittest.TestCase):
""" Contains tests for java 8 interface extensions. """
def test_interface_support_static_methods(self):
parse.parse("""
interface Foo {
void foo();
static Foo create() {
return new Foo() {
@Override
void foo() {
System.out.println("foo");
}
};
}
}
""")
def test_interface_support_default_methods(self):
parse.parse("""
interface Foo {
default void foo() {
System.out.println("foo");
}
}
""")
def main():
unittest.main()
if __name__ == '__main__':
main()
|
auctioning_platform/shipping/shipping/domain/value_objects/__init__.py
|
nhdinh/smp-modulith
| 299 |
65393
|
<reponame>nhdinh/smp-modulith
__all__ = ["ConsigneeId", "PackageStatus"]
from shipping.domain.value_objects.package_status import PackageStatus
ConsigneeId = int
|
navec/vocab.py
|
FreedomSlow/navec
| 115 |
65395
|
from gzip import (
compress,
GzipFile
)
import numpy as np
from .record import Record
UNK = '<unk>'
PAD = '<pad>'
class Vocab(Record):
__attributes__ = ['words', 'counts']
def __init__(self, words, counts):
self.words = words
self.counts = counts
self.word_ids = {
word: id
for id, word in enumerate(self.words)
}
self.unk_id = self.word_ids.get(UNK)
self.pad_id = self.word_ids.get(PAD)
def __getitem__(self, word):
return self.word_ids[word]
def __contains__(self, word):
return word in self.word_ids
def get(self, word, default=None):
if word in self:
return self[word]
return default
def count(self, word):
return self.counts[self.word_ids[word]]
def top(self, count=None):
return sorted(
self.words,
key=self.count,
reverse=True
)[:count]
def sampled(self, words):
words = list(words)
counts = [
self.counts[self.word_ids[_]]
for _ in words
]
return Vocab(words, counts)
def __repr__(self):
return '{name}(words=[...], counts=[...])'.format(
name=self.__class__.__name__
)
def _repr_pretty_(self, printer, cycle):
printer.text(repr(self))
@classmethod
def from_glove(cls, words, counts):
# for some reason glove vocab may have words with broken
# unicode
words = [_.decode('utf8', errors='ignore') for _ in words]
# emb has unk in the end
for word in (UNK, PAD):
words.append(word)
counts.append(0)
return cls(words, counts)
@property
def as_glove(self):
for word, count in zip(self.words, self.counts):
if word in (UNK, PAD):
continue
word = word.encode('utf8')
yield word, count
@property
def as_bytes(self):
meta = [len(self.counts)]
meta = np.array(meta).astype(np.uint32).tobytes()
words = '\n'.join(self.words)
words = words.encode('utf8')
counts = np.array(self.counts, dtype=np.uint32).tobytes()
return compress(meta + counts + words)
@classmethod
def from_file(cls, file):
file = GzipFile(mode='rb', fileobj=file)
buffer = file.read(4)
size, = np.frombuffer(buffer, np.uint32)
buffer = file.read(4 * size)
counts = np.frombuffer(buffer, np.uint32).tolist()
text = file.read().decode('utf8')
words = text.splitlines()
return cls(words, counts)
|
scripts/nick/import_pyEPR.py
|
mkxia57/pyEPR
| 109 |
65429
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 22 11:21:01 2017
@author: Zlatko
"""
from pyEPR import *
if 0:
# Specify the HFSS project to be analyzed
project_info = ProjectInfo(r"X:\Simulation\\hfss\\KC\\")
project_info.project_name = '2013-12-03_9GHzCavity' # Name of the project file (string). "None" will get the current active one.
project_info.design_name = '9GHz_EM_center_SNAIL' # Name of the desgin file (string). "None" will get the current active one.
project_info.setup_name = None # Name of the setup(string). "None" will get the current active one.
## Describe the junctions in the HFSS desgin
project_info.junctions['snail'] = {'rect':'qubit', 'line': 'JunctionLine', 'Lj_variable':'LJ', 'length':0.0001}
# project_info.junctions['jBob'] = {'rect':'qubitBob', 'line': 'bob_line', 'Lj_variable':'LJBob', 'length':0.0001}
# Dissipative elments EPR
project_info.dissipative['dielectric_surfaces'] = None # supply names here, there are more options in project_info.dissipative.
# Run analysis
epr_hfss = DistributedAnalysis(project_info)
epr_hfss.do_EPR_analysis() #variations = ['1', '70']
if 1: # Hamiltonian analysis
# filename = epr_hfss.data_filename
filename = r'X:\Simulation\hfss\KC\pyEPR_results_2018\2013-12-03_9GHzCavity\9GHz_EM_center_SNAIL\9GHz_EM_center_SNAIL_20180726_170049.hdf5'
#filename = r'C:\\Users\\rslqulab\\Desktop\\zkm\\2017_pyEPR_data\\\\/2017_08_Zlatko_Shyam_AutStab/2 pyEPR/2 pyEPR_20170825_170550.hdf5'
epr = QuantumAnalysis(filename)
#result = epr.analyze_variation('1', cos_trunc = 8, fock_trunc = 7)
epr.analyze_all_variations(cos_trunc = None, fock_trunc = 4) # only quadratic part
epr.plot_hamiltonian_results()
if 1:
from pyEPR.toolbox_plotting import cmap_discrete
f0 = epr.results.get_frequencies_HFSS()
f1 = epr.results.get_frequencies_O1()
chi = epr.results.get_chi_O1()
mode_idx = list(f0.index)
nmodes = len(mode_idx)
cmap = cmap_discrete(nmodes)
|
NX/catkin_ws/src/enet_ros/src/utils/transforms/data_augmentation.py
|
dlfdn9392/autonomous_driving_car_project
| 199 |
65457
|
import random
import numpy as np
import cv2
from utils.transforms.transforms import CustomTransform
class RandomFlip(CustomTransform):
def __init__(self, prob_x=0, prob_y=0):
"""
Arguments:
----------
prob_x: range [0, 1], probability to use horizontal flip, setting to 0 means disabling flip
prob_y: range [0, 1], probability to use vertical flip
"""
self.prob_x = prob_x
self.prob_y = prob_y
def __call__(self, sample):
img = sample.get('img').copy()
segLabel = sample.get('segLabel', None)
if segLabel is not None:
segLabel = segLabel.copy()
flip_x = np.random.choice([False, True], p=(1 - self.prob_x, self.prob_x))
flip_y = np.random.choice([False, True], p=(1 - self.prob_y, self.prob_y))
if flip_x:
img = np.ascontiguousarray(np.flip(img, axis=1))
if segLabel is not None:
segLabel = np.ascontiguousarray(np.flip(segLabel, axis=1))
if flip_y:
img = np.ascontiguousarray(np.flip(img, axis=0))
if segLabel is not None:
segLabel = np.ascontiguousarray(np.flip(segLabel, axis=0))
_sample = sample.copy()
_sample['img'] = img
_sample['segLabel'] = segLabel
return _sample
class Darkness(CustomTransform):
def __init__(self, coeff):
assert coeff >= 1., "Darkness coefficient must be greater than 1"
self.coeff = coeff
def __call__(self, sample):
img = sample.get('img')
coeff = np.random.uniform(1., self.coeff)
img = (img.astype('float32') / coeff).astype('uint8')
_sample = sample.copy()
_sample['img'] = img
return _sample
|
tests/manage/z_cluster/test_osd_heap_profile.py
|
annagitel/ocs-ci
| 130 |
65465
|
<filename>tests/manage/z_cluster/test_osd_heap_profile.py
import logging
import pytest
import time
import random
from ocs_ci.framework.testlib import (
ManageTest,
tier2,
skipif_ocs_version,
bugzilla,
skipif_external_mode,
)
from ocs_ci.ocs.resources.pod import get_ceph_tools_pod, get_osd_pods, get_osd_pod_id
from ocs_ci.utility.utils import TimeoutSampler
from ocs_ci.ocs.exceptions import CommandFailed
log = logging.getLogger(__name__)
@tier2
@bugzilla("1938049")
@skipif_ocs_version("<4.6")
@pytest.mark.polarion_id("OCS-2512")
@skipif_external_mode
class TestOSDHeapProfile(ManageTest):
"""
1.Start heap profiler for osd
$ oc exec rook-ceph-tools-85ccf9f7c5-v7bgk ceph tell osd.0 heap start_profiler
2.Dump heap profile
$ oc exec rook-ceph-tools-85ccf9f7c5-v7bgk ceph tell osd.0 heap dump
3.Get heap profile in /var/log/ceph dir on osd node
$ oc rsh rook-ceph-osd-0-959dbdc6d-pddd4
sh-4.4# ls -ltr /var/log/ceph/
-rw-r--r--. 1 ceph ceph 295891 Apr 11 14:33 osd.0.profile.0001.heap
"""
def test_osd_heap_profile(self):
"""
Generate heap profile dump file for OSDs and verify whether the file
is created on '/var/log/ceph/'
"""
strings_err = ["error", "fail"]
osd_pods = get_osd_pods()
osd_id = str(random.randint(0, len(osd_pods) - 1))
log.info(f"Start heap profiler for osd-{osd_id}")
pod_tool = get_ceph_tools_pod()
out = pod_tool.exec_cmd_on_pod(
command=f"ceph tell osd.{osd_id} heap start_profiler", out_yaml_format=False
)
logging.info(f"command output:{out}")
for string_err in strings_err:
assert (
string_err not in out.lower()
), f"{string_err} on the output command {out}"
logging.info("Sleep 10 sec, for running heap profiler")
time.sleep(10)
log.info("Dump heap profile")
out = pod_tool.exec_sh_cmd_on_pod(command=f"ceph tell osd.{osd_id} heap dump")
logging.info(out)
for string_err in strings_err:
assert (
string_err not in out.lower()
), f"{string_err} on the output command {out}"
log.info(f"Get osd-{osd_id} pod object")
for osd_pod in osd_pods:
if get_osd_pod_id(osd_pod) == osd_id:
osd_pod_profile = osd_pod
osd_profile_str = f"osd.{osd_id}.profile"
log.info(f"Verify {osd_profile_str} log exist on /var/log/ceph/")
sample = TimeoutSampler(
timeout=100,
sleep=10,
func=self.verify_output_command_osd_pod,
command="ls -ltr /var/log/ceph/",
pod_obj=osd_pod_profile,
str_to_check=osd_profile_str,
)
if not sample.wait_for_func_status(result=True):
log.error(f"{osd_profile_str} log does not exist on /var/log/ceph")
raise ValueError(f"{osd_profile_str} log does not exist on /var/log/ceph")
log.info(f"osd.{osd_id}.profile log exist on /var/log/ceph")
def verify_output_command_osd_pod(self, command, pod_obj, str_to_check):
"""
Check the output of the command (from osd pod)
Args:
command (str): command run on osd pod
pod_obj (obj): pod object
str_to_check (str): check if the string is contained on output command
Returns:
bool: True if we find the string in output, False otherwise
"""
try:
out = pod_obj.exec_cmd_on_pod(command=command)
log.info(f"the output of the command {command}: {out}")
return True if str_to_check in out else False
except CommandFailed as e:
log.error(e)
return False
|
head/__init__.py
|
NotMorven/cavaface.pytorch
| 329 |
65467
|
from head.metrics import *
from head.metrics_parallel import *
HEAD_DICT = {
"Softmax": Softmax,
"ArcFace": ArcFace,
"Combined": Combined,
"CosFace": CosFace,
"SphereFace": SphereFace,
"Am_softmax": Am_softmax,
"CurricularFace": CurricularFace,
"ArcNegFace": ArcNegFace,
"SVX": SVXSoftmax,
"AirFace": AirFace,
"QAMFace": QAMFace,
"CircleLoss": CircleLoss,
"ParallelArcFace": ParallelArcFace,
}
|
spartan/examples/ssvd/ssvd.py
|
GabrielWen/spartan
| 156 |
65507
|
import spartan
from spartan import core, expr, util, blob_ctx
import numpy as np
from .qr import qr
def svd(A, k=None):
"""
Stochastic SVD.
Parameters
----------
A : spartan matrix
Array to compute the SVD on, of shape (M, N)
k : int, optional
Number of singular values and vectors to compute.
The operations include matrix multiplication and QR decomposition.
We parallelize both of them.
Returns
--------
U : Spartan array of shape (M, k)
S : numpy array of shape (k,)
V : numpy array of shape (k, k)
"""
if k is None: k = A.shape[1]
Omega = expr.randn(A.shape[1], k)
Y = expr.dot(A, Omega)
Q, R = qr(Y)
B = expr.dot(expr.transpose(Q), A)
BTB = expr.dot(B, expr.transpose(B)).optimized().glom()
S, U_ = np.linalg.eig(BTB)
S = np.sqrt(S)
# Sort by eigen values from large to small
si = np.argsort(S)[::-1]
S = S[si]
U_ = U_[:, si]
U = expr.dot(Q, U_).optimized().evaluate()
V = np.dot(np.dot(expr.transpose(B).optimized().glom(), U_), np.diag(np.ones(S.shape[0]) / S))
return U, S, V.T
|
setup/setup_database.py
|
Gui-Luz/Empire
| 5,720 |
65515
|
#!/usr/bin/env python
import sqlite3, os, string, hashlib, random
###################################################
#
# Default values for the config
#
###################################################
# Staging Key is set up via environmental variable
# or via command line. By setting RANDOM a randomly
# selected password will automatically be selected
# or it can be set to any bash acceptable character
# set for a password.
STAGING_KEY = os.getenv('STAGING_KEY', "BLANK")
punctuation = '!#%&()*+,-./:;<=>?@[]^_{|}~'
# otherwise prompt the user for a set value to hash for the negotiation password
if STAGING_KEY == "BLANK":
choice = raw_input("\n [>] Enter server negotiation password, enter for random generation: ")
if choice == "":
# if no password is entered, generation something random
STAGING_KEY = ''.join(random.sample(string.ascii_letters + string.digits + punctuation, 32))
else:
STAGING_KEY = hashlib.md5(choice).hexdigest()
elif STAGING_KEY == "RANDOM":
STAGING_KEY = ''.join(random.sample(string.ascii_letters + string.digits + punctuation, 32))
# Calculate the install path. We know the project directory will always be the parent of the current directory. Any modifications of the folder structure will
# need to be applied here.
INSTALL_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + "/"
# an IP white list to ONLY accept clients from
# format is "192.168.1.1,192.168.1.10-192.168.1.100,10.0.0.0/8"
IP_WHITELIST = ""
# an IP black list to reject accept clients from
# format is "192.168.1.1,192.168.1.10-192.168.1.100,10.0.0.0/8"
IP_BLACKLIST = ""
# default credentials used to log into the RESTful API
API_USERNAME = "empireadmin"
API_PASSWORD = ''.join(random.sample(string.ascii_letters + string.digits + punctuation, 32))
# the 'permanent' API token (doesn't change)
API_PERMANENT_TOKEN = ''.join(random.choice(string.ascii_lowercase + string.digits) for x in range(40))
# default obfuscation setting
OBFUSCATE = 0
# default obfuscation command
OBFUSCATE_COMMAND = r'Token\All\1'
###################################################
#
# Database setup.
#
###################################################
conn = sqlite3.connect('%s/data/empire.db'%INSTALL_PATH)
c = conn.cursor()
# try to prevent some of the weird sqlite I/O errors
c.execute('PRAGMA journal_mode = OFF')
c.execute('DROP TABLE IF EXISTS config')
c.execute('''CREATE TABLE config (
"staging_key" text,
"install_path" text,
"ip_whitelist" text,
"ip_blacklist" text,
"autorun_command" text,
"autorun_data" text,
"rootuser" boolean,
"api_username" text,
"api_password" text,
"api_current_token" text,
"api_permanent_token" text,
"obfuscate" integer,
"obfuscate_command" text
)''')
# kick off the config component of the database
c.execute("INSERT INTO config VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)", (STAGING_KEY, INSTALL_PATH, IP_WHITELIST, IP_BLACKLIST, '', '', False, API_USERNAME, API_PASSWORD, '', API_PERMANENT_TOKEN, OBFUSCATE, OBFUSCATE_COMMAND))
c.execute('''CREATE TABLE "agents" (
"id" integer PRIMARY KEY,
"session_id" text,
"listener" text,
"name" text,
"language" text,
"language_version" text,
"delay" integer,
"jitter" real,
"external_ip" text,
"internal_ip" text,
"username" text,
"high_integrity" integer,
"process_name" text,
"process_id" text,
"hostname" text,
"os_details" text,
"session_key" text,
"nonce" text,
"checkin_time" text,
"lastseen_time" text,
"parent" text,
"children" text,
"servers" text,
"profile" text,
"functions" text,
"kill_date" text,
"working_hours" text,
"lost_limit" integer,
"taskings" text,
"results" text
)''')
# the 'options' field contains a pickled version of all
# currently set listener options
c.execute('''CREATE TABLE "listeners" (
"id" integer PRIMARY KEY,
"name" text,
"module" text,
"listener_type" text,
"listener_category" text,
"enabled" boolean,
"options" blob
)''')
# type = hash, plaintext, token
# for krbtgt, the domain SID is stored in misc
# for tokens, the data is base64'ed and stored in pass
c.execute('''CREATE TABLE "credentials" (
"id" integer PRIMARY KEY,
"credtype" text,
"domain" text,
"username" text,
"password" <PASSWORD>,
"host" text,
"os" text,
"sid" text,
"notes" text
)''')
c.execute( '''CREATE TABLE "taskings" (
"id" integer,
"data" text,
"agent" text,
PRIMARY KEY(id, agent)
)''')
c.execute( '''CREATE TABLE "results" (
"id" integer,
"data" text,
"agent" text,
PRIMARY KEY(id, agent)
)''')
# event_types -> checkin, task, result, rename
c.execute('''CREATE TABLE "reporting" (
"id" integer PRIMARY KEY,
"name" text,
"event_type" text,
"message" text,
"time_stamp" text,
"taskID" integer,
FOREIGN KEY(taskID) REFERENCES results(id)
)''')
# commit the changes and close everything off
conn.commit()
conn.close()
print "\n [*] Database setup completed!\n"
|
egs/lj/local/preprocess_scripts/text/parse_pronounce.py
|
entn-at/efficient_tts
| 111 |
65518
|
<reponame>entn-at/efficient_tts<filename>egs/lj/local/preprocess_scripts/text/parse_pronounce.py
import os
from util import register
mandarin_initial_list = ["b", "ch", "c", "d", "f", "g", "h", "j", "k", "l",
"m", "n", "p", "q", "r", "sh", "s", "t", "x", "zh",
"z"]
# fuse rear case to avoid OOV
special_phone_map = {}
# punc list
punc_list = ['_FH', '_MH', '_DUN', '_DH', '_WH', '_OPUNC']
special_phn_list = ['_WORD_SEG#1', '_WORD_SEG#2', '_WORD_SEG#3', '_WORD_SEG#4', '_HEAD', '_SPS_SEG', '_JH_E', '_WH_E', '_TH_E']
# func puncs
punc_map = {
'_FH': '_FH',
'_MH': '_MH',
'_DUN': '_DUN',
'_DH': '_DH',
'_WH': '_WH',
'_TH': '_TH',
'_DYH': '_OPUNC',
'_KH': '_OPUNC',
'_PZH': '_OPUNC',
'_SLH': '_OPUNC',
'_SMH': '_OPUNC',
'_SYH': '_OPUNC',
'_YD': '_OPUNC'}
final_punc_map = {
'_DH_E': '_JH_E',
'_JH': '_DH',
'_OPUNC_E': '_JH_E'}
parse_pinyin_methods = {}
parse_line_methods = {}
parse_sent_methods = {}
def split_phone_tone(s):
head = s.rstrip('0123456')
if len(head) == len(s):
phn_tone = [s]
else:
tail = s[len(head):]
phn_tone = [head, tail]
return phn_tone
@register.register('PHN_TONE_SEP', parse_pinyin_methods)
def parse_pinyin_phn_tone_sep(py):
phns = py.split('-')
phns_tone = []
for i in phns:
if i in special_phone_map:
i = special_phone_map[i]
phns_tone.extend(split_phone_tone(i))
outputs = []
if py.islower():
outputs.extend(phns_tone)
else:
outputs.extend(phns_tone)
return outputs
@register.register('PHN_TONE', parse_pinyin_methods)
def parse_pinyin_phn_tone(py):
phns = py.split('-')
outputs = []
if py.islower():
if len(phns) == 1:
outputs.extend([phns[0]])
else:
yun_tail = phns[-1]
if yun_tail in special_phone_map:
yun_tail = special_phone_map[yun_tail]
outputs.extend(phns[:-1] + [yun_tail])
else:
for phn in phns:
if phn in special_phone_map:
outputs.append(special_phone_map[phn])
else:
outputs.append(phn)
return outputs
def parse_pinyin(pronoun_line, py_type):
parts = pronoun_line.split()
pinyin_str = parts[-1]
pinyins = [py for py in pinyin_str.split("|")
if py != ""]
try:
outputs = []
for py in pinyins:
outputs.extend(['_SPS_SEG'])
outputs.extend(parse_pinyin_methods[py_type](py))
except KeyError:
raise ValueError('parse_pinyin for [{}] is not implemented'.format(py_type))
return outputs
def parse_punct(pronoun_line):
parts = pronoun_line.split()
punct_part = parts[3]
prosody_word_seg_sign = parts[-2]
if prosody_word_seg_sign == '#0':
suffix = []
else:
if punct_part != '0':
punc = '_' + punct_part.upper()
if punc in punc_map:
punc = punc_map[punc]
suffix = ['_WORD_SEG' + prosody_word_seg_sign] + [punc]
else:
suffix = ['_WORD_SEG' + prosody_word_seg_sign]
return suffix
def parse_pos(pronoun_line):
parts = pronoun_line.split()
pos_part = parts[1]
pos = '~' + pos_part
return pos
@register.register(['PHN', 'PHN_TONE', 'PHN_TONE_SEP', 'SHENGYUN'], parse_line_methods)
def parse_line_default(pronoun_line, py_type):
pinyins = parse_pinyin(pronoun_line, py_type)
punc = parse_punct(pronoun_line)
return pinyins + punc
def parse_line(pronoun_line, py_type):
try:
return parse_line_methods[py_type](pronoun_line, py_type)
except KeyError:
raise ValueError('parse_line for [{}] is not implemented'.format(py_type))
@register.register(['PHN', 'PHN_TONE', 'PHN_TONE_SEP', 'SHENGYUN'], parse_sent_methods)
def parse_sent_default(pronoun_lines, py_type, use_head, use_tail):
if use_head:
sent_outputs = ['_HEAD']
else:
sent_outputs = []
for line_idx, pronoun_line in enumerate(pronoun_lines):
if pronoun_line == '' or pronoun_line.startswith('#') or pronoun_line.startswith('['):
continue
else:
line_outputs = parse_line(pronoun_line, py_type)
if line_idx == len(pronoun_lines) - 1 and line_outputs[-1].startswith('_'):
line_outputs[-1] += '_E'
sent_outputs.extend(line_outputs)
for phn_idx, phn_item in enumerate(sent_outputs):
try:
sent_outputs[phn_idx] = final_punc_map[phn_item]
except KeyError as e:
pass
if use_tail:
sent_outputs.append('_TAIL')
return sent_outputs
def parse_sent(pronoun_lines, py_type, use_head=True, use_tail=True):
try:
return parse_sent_methods[py_type](pronoun_lines, py_type, use_head, use_tail)
except KeyError:
raise ValueError('parse_sent for [{}] is not implemented'.format(py_type))
|
python/dgllife/data/pcba.py
|
padr31/dgl-lifesci
| 390 |
65533
|
# -*- coding: utf-8 -*-
#
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
# PCBA from MoleculeNet for the prediction of biological activities
import pandas as pd
from dgl.data.utils import get_download_dir, download, _get_dgl_url, extract_archive
from .csv_dataset import MoleculeCSVDataset
from ..utils.mol_to_graph import smiles_to_bigraph
__all__ = ['PCBA']
class PCBA(MoleculeCSVDataset):
r"""PCBA from MoleculeNet for the prediction of biological activities
PubChem BioAssay (PCBA) is a database consisting of biological activities of small molecules
generated by high-throughput screening. This dataset is a subset of PCBA, containing 128
bioassays measured over 400 thousand compounds.
References:
* [1] MoleculeNet: A Benchmark for Molecular Machine Learning.
* [2] Massively Multitask Networks for Drug Discovery.
Parameters
----------
smiles_to_graph: callable, str -> DGLGraph
A function turning a SMILES string into a DGLGraph.
Default to :func:`dgllife.utils.smiles_to_bigraph`.
node_featurizer : callable, rdkit.Chem.rdchem.Mol -> dict
Featurization for nodes like atoms in a molecule, which can be used to update
ndata for a DGLGraph. Default to None.
edge_featurizer : callable, rdkit.Chem.rdchem.Mol -> dict
Featurization for edges like bonds in a molecule, which can be used to update
edata for a DGLGraph. Default to None.
load : bool
Whether to load the previously pre-processed dataset or pre-process from scratch.
``load`` should be False when we want to try different graph construction and
featurization methods and need to preprocess from scratch. Default to False.
log_every : bool
Print a message every time ``log_every`` molecules are processed. Default to 1000.
cache_file_path : str
Path to the cached DGLGraphs, default to 'pcba_dglgraph.bin'.
n_jobs : int
The maximum number of concurrently running jobs for graph construction and featurization,
using joblib backend. Default to 1.
Examples
--------
>>> import torch
>>> from dgllife.data import PCBA
>>> from dgllife.utils import smiles_to_bigraph, CanonicalAtomFeaturizer
>>> dataset = PCBA(smiles_to_bigraph, CanonicalAtomFeaturizer())
>>> # Get size of the dataset
>>> len(dataset)
437929
>>> # Get the 0th datapoint, consisting of SMILES, DGLGraph, labels, and masks
>>> dataset[0]
('CC(=O)N1CCC2(CC1)NC(=O)N(c1ccccc1)N2',
DGLGraph(num_nodes=20, num_edges=44,
ndata_schemes={'h': Scheme(shape=(74,), dtype=torch.float32)}
edata_schemes={}),
tensor([0., ..., 0.]),
tensor([1., ..., 0.]))
The dataset instance also contains information about molecule ids.
>>> dataset.ids[i]
We can also get the id along with SMILES, DGLGraph, labels, and masks at once.
>>> dataset.load_full = True
>>> dataset[0]
('CC(=O)N1CCC2(CC1)NC(=O)N(c1ccccc1)N2',
DGLGraph(num_nodes=20, num_edges=44,
ndata_schemes={'h': Scheme(shape=(74,), dtype=torch.float32)}
edata_schemes={}),
tensor([0., ..., 0.]),
tensor([1., ..., 0.]),
'CID1511280')
To address the imbalance between positive and negative samples, we can re-weight
positive samples for each task based on the training datapoints.
>>> train_ids = torch.arange(1000)
>>> dataset.task_pos_weights(train_ids)
tensor([7.3400, 489.0000, ..., 1.0000])
"""
def __init__(self,
smiles_to_graph=smiles_to_bigraph,
node_featurizer=None,
edge_featurizer=None,
load=False,
log_every=1000,
cache_file_path='./pcba_dglgraph.bin',
n_jobs=1):
self._url = 'dataset/pcba.zip'
data_path = get_download_dir() + '/pcba.zip'
dir_path = get_download_dir() + '/pcba'
download(_get_dgl_url(self._url), path=data_path, overwrite=False)
extract_archive(data_path, dir_path)
df = pd.read_csv(dir_path + '/pcba.csv')
self.ids = df['mol_id'].tolist()
self.load_full = False
df = df.drop(columns=['mol_id'])
super(PCBA, self).__init__(df=df,
smiles_to_graph=smiles_to_graph,
node_featurizer=node_featurizer,
edge_featurizer=edge_featurizer,
smiles_column='smiles',
cache_file_path=cache_file_path,
load=load,
log_every=log_every,
init_mask=True,
n_jobs=n_jobs)
self.ids = [self.ids[i] for i in self.valid_ids]
def __getitem__(self, item):
"""Get datapoint with index
Parameters
----------
item : int
Datapoint index
Returns
-------
str
SMILES for the ith datapoint
DGLGraph
DGLGraph for the ith datapoint
Tensor of dtype float32 and shape (T)
Labels of the ith datapoint for all tasks. T for the number of tasks.
Tensor of dtype float32 and shape (T)
Binary masks of the ith datapoint indicating the existence of labels for all tasks.
str, optional
Id for the ith datapoint, returned only when ``self.load_full`` is True.
"""
if self.load_full:
return self.smiles[item], self.graphs[item], self.labels[item], \
self.mask[item], self.ids[item]
else:
return self.smiles[item], self.graphs[item], self.labels[item], self.mask[item]
|
rl/agents/a2c/agent_test.py
|
jaejaywoo/pysc2-rl-agents
| 138 |
65534
|
from collections import namedtuple
import tensorflow as tf
import numpy as np
from rl.agents.a2c.agent import A2CAgent
TestArgType = namedtuple('ArgType', ['name'])
arg_type = TestArgType('arg')
A = np.array
class A2CAgentTest(tf.test.TestCase):
def test_compute_policy_log_probs(self):
from rl.agents.a2c.agent import compute_policy_log_probs
available_actions = A([[1, 0, 1],
[1, 0, 0],
[1, 1, 1]], dtype=np.float32)
fn_pi = A([[0.2, 0.0, 0.8],
[1.0, 0.0, 0.0],
[0.2, 0.7, 0.1]], dtype=np.float32)
fn_ids = A([2, 0, 1], dtype=np.int32)
arg_pi = {arg_type: A([[0.8, 0.2],
[0.0, 1.0],
[0.5, 0.5]], dtype=np.float32)}
arg_ids = {arg_type: A([0, 1, -1], dtype=np.int32)}
log_probs = compute_policy_log_probs(
available_actions, (fn_pi, arg_pi), (fn_ids, arg_ids)
)
expected_log_probs = np.log([0.8, 1.0, 0.7]) + A([np.log(0.8), np.log(1.0), 0])
with self.test_session() as sess:
log_probs_out = sess.run(log_probs)
self.assertAllClose(log_probs_out, expected_log_probs)
def test_compute_policy_entropy(self):
from rl.agents.a2c.agent import compute_policy_entropy
available_actions = A([[1, 0, 1],
[1, 0, 0],
[1, 1, 1]], dtype=np.float32)
fn_pi = A([[0.2, 0.0, 0.8],
[1.0, 0.0, 0.0],
[0.2, 0.7, 0.1]], dtype=np.float32)
fn_ids = A([2, 0, 1], dtype=np.int32)
arg_pi = {arg_type: A([[0.8, 0.2],
[0.0, 1.0],
[0.5, 0.5]], dtype=np.float32)}
arg_ids = {arg_type: A([0, 1, -1], dtype=np.int32)}
entropy = compute_policy_entropy(
available_actions, (fn_pi, arg_pi), (fn_ids, arg_ids)
)
expected_entropy = (0.50040245 + 0.80181855) / 3.0 + (0.50040245) / 2
with self.test_session() as sess:
entropy_out = sess.run(entropy)
self.assertAllClose(entropy_out, expected_entropy)
if __name__ == '__main__':
tf.test.main()
|
python/tvm/topi/cuda/nms.py
|
XiaoSong9905/tvm
| 4,640 |
65541
|
<reponame>XiaoSong9905/tvm
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-member, too-many-locals, too-many-arguments, too-many-statements, singleton-comparison
# pylint: disable=bad-continuation, unused-argument
"""Non-maximum suppression operator"""
import tvm
from tvm import te
from tvm.contrib import nvcc
from tvm.contrib.thrust import can_use_thrust, can_use_rocthrust
from tvm.ir import register_intrin_lowering
from tvm.tir import if_then_else
from .sort import argsort, argsort_thrust
from .scan import exclusive_scan
from ..utils import ceil_div
from ..math import cast
from ..transform import reshape
from ..vision.nms_util import (
calculate_overlap,
binary_search,
collect_selected_indices,
collect_selected_indices_and_scores,
run_all_class_nms,
)
def cuda_atomic_add_rule(op):
if op.dtype == "float32":
return tvm.tir.call_pure_extern("float32", "atomicAdd", op.args[0], op.args[1])
if op.dtype == "float64":
return tvm.tir.call_pure_extern("float64", "atomicAdd", op.args[0], op.args[1])
if op.dtype == "int32":
return tvm.tir.call_pure_extern("int32", "atomicAdd", op.args[0], op.args[1])
raise RuntimeError("only support int32, float32 and float64")
def opencl_atomic_add_rule(op):
if op.dtype == "int32":
return tvm.tir.call_pure_extern("int32", "atomic_add", op.args[0], op.args[1])
raise RuntimeError("only support int32")
register_intrin_lowering("tir.atomic_add", target="cuda", f=cuda_atomic_add_rule, level=99)
register_intrin_lowering("tir.atomic_add", target="opencl", f=opencl_atomic_add_rule, level=99)
def atomic_add(x, y):
return tvm.tir.call_intrin(y.dtype, "tir.atomic_add", x, y)
def get_valid_boxes_ir(data, valid_boxes, score_threshold, id_index, score_index):
"""Low level IR to identify bounding boxes given a score threshold.
Parameters
----------
data : Buffer
Input data. 3-D Buffer with shape [batch_size, num_anchors, elem_length].
score_threshold : Buffer or float32
Lower limit of score for valid bounding boxes.
id_index : optional, int
index of the class categories, -1 to disable.
score_index: optional, int
Index of the scores/confidence of boxes.
Returns
-------
valid_boxes: Buffer
2D Buffer indicating valid boxes with shape [batch_size, num_anchors].
"""
batch_size = data.shape[0]
num_anchors = data.shape[1]
elem_length = data.shape[2]
ib = tvm.tir.ir_builder.create()
data = ib.buffer_ptr(data)
valid_boxes = ib.buffer_ptr(valid_boxes)
if isinstance(score_threshold, float):
score_threshold = tvm.tir.FloatImm("float32", score_threshold)
id_index = tvm.tir.IntImm("int32", id_index)
score_index = tvm.tir.IntImm("int32", score_index)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(num_anchors, max_threads)
nthread_by = batch_size
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
ib.scope_attr(by, "thread_extent", nthread_by)
tid = bx * max_threads + tx
with ib.if_scope(tid < num_anchors):
i = by
j = tid
score = data[(i * num_anchors + j) * elem_length + score_index]
with ib.if_scope(
tvm.tir.all(
score > score_threshold,
tvm.tir.any(
id_index < 0, data[(i * num_anchors + j) * elem_length + id_index] >= 0
),
)
):
valid_boxes[i * num_anchors + j] = 1
with ib.else_scope():
valid_boxes[i * num_anchors + j] = 0
return ib.get()
def get_valid_counts_ir(data, valid_indices, valid_boxes, out, out_indices):
"""Low level IR to get valid count of bounding boxes
given a score threshold. Also prepares to move valid boxes to the
top of input data.
Parameters
----------
data : Buffer
Input data. 3-D Buffer with shape [batch_size, num_anchors, elem_length].
valid_indices: Buffer
2D Buffer of flag indicating valid data with shape [batch_size, num_anchors].
Returns
-------
out : Buffer
Sorted valid boxes
out_indices : Buffer
Incidices of valid boxes in original data
"""
batch_size = data.shape[0]
num_anchors = data.shape[1]
elem_length = data.shape[2]
ib = tvm.tir.ir_builder.create()
data = ib.buffer_ptr(data)
valid_indices = ib.buffer_ptr(valid_indices)
valid_boxes = ib.buffer_ptr(valid_boxes)
out = ib.buffer_ptr(out)
out_indices = ib.buffer_ptr(out_indices)
one = tvm.tir.const(1, dtype=out.dtype)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
nthread_bx = num_anchors // max_threads + 1
nthread_by = batch_size
with ib.new_scope():
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
ib.scope_attr(by, "thread_extent", nthread_by)
tid = bx * max_threads + tx
with ib.if_scope(tid < num_anchors):
i = by
j = tid
with ib.for_range(0, elem_length) as k:
out[(i * num_anchors + j) * elem_length + k] = -one
out_indices[i * num_anchors + j] = -1
with ib.new_scope():
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
ib.scope_attr(by, "thread_extent", nthread_by)
tid = bx * max_threads + tx
with ib.if_scope(tid < num_anchors):
i = by
j = tid
with ib.if_scope(valid_boxes[i, tid] > 0):
with ib.for_range(0, elem_length) as k:
out[(i * num_anchors + valid_indices[i, tid]) * elem_length + k] = data[
(i * num_anchors + j) * elem_length + k
]
out_indices[i * num_anchors + valid_indices[i, tid]] = j
return ib.get()
def get_valid_counts(data, score_threshold=0, id_index=0, score_index=1):
"""Get valid count of bounding boxes given a score threshold.
Also moves valid boxes to the top of input data.
Parameters
----------
data : tvm.te.Tensor
Input data. 3-D tensor with shape [batch_size, num_anchors, elem_length].
score_threshold : optional, tvm.te.Tensor or float
Lower limit of score for valid bounding boxes.
id_index : optional, int
index of the class categories, -1 to disable.
score_index: optional, int
Index of the scores/confidence of boxes.
Returns
-------
valid_count : tvm.te.Tensor
1-D tensor for valid number of boxes.
out_tensor : tvm.te.Tensor
Rearranged data tensor.
"""
batch_size = data.shape[0]
num_anchors = data.shape[1]
data_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "data_buf", data_alignment=8)
valid_boxes_buf = tvm.tir.decl_buffer(
(batch_size, num_anchors), "int32", "valid_boxes_buf", data_alignment=8
)
valid_boxes = te.extern(
[(batch_size, num_anchors)],
[data],
lambda ins, outs: get_valid_boxes_ir(
ins[0], outs[0], score_threshold, id_index, score_index
),
dtype=["int32"],
in_buffers=[data_buf],
out_buffers=[valid_boxes_buf],
name="get_valid_boxes",
tag="get_valid_boxes_gpu",
)
valid_indices_buf = tvm.tir.decl_buffer(
(batch_size, num_anchors), "int32", "valid_indices_buf", data_alignment=8
)
valid_indices, valid_count = exclusive_scan(valid_boxes, axis=1, return_reduction=True)
out_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "out_buf", data_alignment=8)
out_indices_buf = tvm.tir.decl_buffer(
(batch_size, num_anchors), "int32", "out_buf", data_alignment=8
)
out, out_indices = te.extern(
[data.shape, (batch_size, num_anchors)],
[data, valid_indices, valid_boxes],
lambda ins, outs: get_valid_counts_ir(ins[0], ins[1], ins[2], outs[0], outs[1]),
dtype=["int32", data.dtype],
in_buffers=[data_buf, valid_indices_buf, valid_boxes_buf],
out_buffers=[out_buf, out_indices_buf],
name="get_valid_counts",
tag="get_valid_counts_gpu",
)
return [valid_count, out, out_indices]
def _nms_loop(
ib,
batch_size,
top_k,
iou_threshold,
max_output_size,
valid_count,
on_new_valid_box_func,
on_new_invalidated_box_func,
needs_bbox_check_func,
calc_overlap_func,
out_scores,
num_valid_boxes,
):
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
with ib.new_scope():
nthread_by = batch_size
nthread_tx = max_threads
# Some cuda architectures have smaller limit of 32K for cudaDevAttrMaxRegistersPerBlock
# vs 64K for most GPUs. Since this kernel uses many registers (around 35), the limit will
# be exceeded with 1024 threads.
target = tvm.target.Target.current(allow_none=False)
if target.kind.name == "cuda":
if nvcc.get_target_compute_version(target) in ["3.2", "5.3", "6.2"]:
nthread_tx = 512
by = te.thread_axis("blockIdx.y")
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(by, "thread_extent", nthread_by)
ib.scope_attr(tx, "thread_extent", nthread_tx)
num_valid_boxes_local = ib.allocate(
"int32", (1,), name="num_valid_boxes_local", scope="local"
)
num_valid_boxes_local[0] = 0
def nms_inner_loop(ib, i, j, nkeep):
# The box j is valid, invalidate other boxes that overlap with j above iou_threshold
on_new_valid_box_func(ib, tx, num_valid_boxes_local[0], i, j)
num_valid_boxes_local[0] += 1
num_iter_per_thread = ceil_div(nkeep - (j + 1), nthread_tx)
with ib.for_range(0, num_iter_per_thread, name="_k") as _k:
k = j + 1 + _k * nthread_tx + tx
with ib.if_scope(
tvm.tir.all(
k < nkeep,
out_scores[i, k] > 0, # is the box k still valid?
needs_bbox_check_func(i, j, k),
)
):
iou = calc_overlap_func(i, j, k)
with ib.if_scope(iou >= iou_threshold):
# invalidate the box k
out_scores[i, k] = -1.0
on_new_invalidated_box_func(i, k)
ib.emit(tvm.tir.Call(None, "tir.tvm_storage_sync", tvm.runtime.convert(["shared"])))
i = by
nkeep = if_then_else(tvm.tir.all(top_k > 0, top_k < valid_count[i]), top_k, valid_count[i])
max_output_size = if_then_else(max_output_size > 0, max_output_size, nkeep)
with ib.if_scope(tvm.tir.all(iou_threshold > 0, valid_count[i] > 0)):
# Apply nms
# No need to do more iteration if we have already reached max_output_size boxes
box_idx = ib.allocate("int32", (1,), name="box_idx", scope="local")
box_idx[0] = 0
with ib.while_loop(
tvm.tir.all(box_idx[0] < nkeep, num_valid_boxes_local[0] < max_output_size)
):
# Proceed to the inner loop if the box with id box_idx is still valid
with ib.if_scope(out_scores[i, box_idx[0]] > -1.0):
nms_inner_loop(ib, i, box_idx[0], nkeep)
box_idx[0] += 1
with ib.if_scope(tx + 0 == 0):
num_valid_boxes[i] = num_valid_boxes_local[0]
with ib.else_scope():
num_valid_boxes[i] = 0
return ib.get()
def nms_ir(
data,
sorted_index,
valid_count,
indices,
out_bboxes,
out_scores,
out_class_ids,
out_features,
box_indices,
num_valid_boxes,
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start,
id_index,
score_index,
return_indices,
):
"""Low level IR routing for transform location in multibox_detection operator.
Parameters
----------
data : Buffer
Buffer of output boxes with class and score.
sorted_index : Buffer
Buffer of output box indexes sorted by score.
valid_count : Buffer
Buffer of number of valid output boxes.
indices : Buffer
indices in original tensor, with shape [batch_size, num_anchors],
represents the index of box in original data. It could be the third
output out_indices of get_valid_counts. The values in the second
dimension are like the output of arange(num_anchors) if get_valid_counts
is not used before non_max_suppression.
out_bboxes : Buffer
Output buffer, to be filled with sorted box coordinates.
out_scores : Buffer
Output buffer, to be filled with sorted scores.
out_class_ids : Buffer
Output buffer, to be filled with sorted class ids.
box_indices : Buffer
A indices tensor mapping sorted indices to original indices
This is the first output of NMS when return_indices=True.
num_valid_boxes : Buffer
Record the number of boxes that have survived IOU tests.
This is the second output of NMS when return_indices=True.
max_output_size : int
Max number of output valid boxes for each instance.
By default all valid boxes are returned.
iou_threshold : float
Overlapping(IoU) threshold to suppress object with smaller score.
force_suppress : boolean
Whether to suppress all detections regardless of class_id.
top_k : int
Keep maximum top k detections before nms, -1 for no limit.
coord_start : int
Start index of the consecutive 4 coordinates.
id_index : int
index of the class categories, -1 to disable.
score_index : optional, int
Index of the scores/confidence of boxes.
return_indices : boolean
Whether to return box indices in input data.
Returns
-------
stmt : Stmt
The result IR statement.
"""
batch_size = data.shape[0]
num_anchors = data.shape[1]
box_data_length = data.shape[2]
num_features = out_features.shape[2]
ib = tvm.tir.ir_builder.create()
data = ib.buffer_ptr(data)
sorted_index = ib.buffer_ptr(sorted_index)
valid_count = ib.buffer_ptr(valid_count)
indices = ib.buffer_ptr(indices)
# outputs
out_bboxes = ib.buffer_ptr(out_bboxes)
out_scores = ib.buffer_ptr(out_scores)
out_class_ids = ib.buffer_ptr(out_class_ids)
out_features = ib.buffer_ptr(out_features)
box_indices = ib.buffer_ptr(box_indices)
num_valid_boxes = ib.buffer_ptr(num_valid_boxes)
if isinstance(iou_threshold, float):
iou_threshold = tvm.tir.FloatImm("float32", iou_threshold)
top_k = tvm.tir.IntImm("int32", top_k)
coord_start = tvm.tir.IntImm("int32", coord_start)
id_index = tvm.tir.IntImm("int32", id_index)
score_index = tvm.tir.IntImm("int32", score_index)
force_suppress = tvm.tir.IntImm("int32", 1 if force_suppress else 0)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(num_anchors, max_threads)
nthread_by = batch_size
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
ib.scope_attr(by, "thread_extent", nthread_by)
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
i = by
base_src_idx = i * num_anchors * box_data_length
base_bbox_idx = i * num_anchors * 4
base_features_idx = i * num_anchors * num_features
with ib.if_scope(tvm.tir.all(iou_threshold > 0, valid_count[i] > 0)):
# Reorder output
nkeep = if_then_else(
tvm.tir.all(top_k > 0, top_k < valid_count[i]), top_k, valid_count[i]
)
j = bx * max_threads + tx
with ib.if_scope(j < nkeep):
src_idx = base_src_idx + sorted_index[i * num_anchors + j] * box_data_length
with ib.for_range(0, 4, kind="unroll") as k:
out_bboxes[(base_bbox_idx + j * 4 + k)] = data[src_idx + coord_start + k]
with ib.for_range(0, num_features, kind="unroll") as k:
out_features[(base_features_idx + j * num_features + k)] = data[
src_idx + coord_start + 4 + k
]
out_scores[i * num_anchors + j] = data[src_idx + score_index]
if id_index >= 0:
out_class_ids[i * num_anchors + j] = data[src_idx + id_index]
with ib.else_scope():
# Indices > nkeep are discarded
# Only needed for return_indices = False case
if return_indices is False:
with ib.if_scope(j < num_anchors):
with ib.for_range(0, 4, kind="unroll") as k:
out_bboxes[(base_bbox_idx + j * 4 + k)] = -1.0
with ib.for_range(0, num_features, kind="unroll") as k:
out_features[(base_features_idx + j * num_features + k)] = -1.0
out_scores[i, j] = -1.0
if id_index >= 0:
out_class_ids[i, j] = -1.0
if return_indices:
with ib.if_scope(j < num_anchors):
box_indices[i * num_anchors + j] = -1
with ib.else_scope():
# Need to copy all boxes if not using return_indices
bounds = valid_count[i] if return_indices else num_anchors
with ib.if_scope(j < bounds):
src_offset = base_src_idx + j * box_data_length
with ib.for_range(0, 4, kind="unroll") as k:
out_bboxes[base_bbox_idx + j * 4 + k] = data[src_offset + coord_start + k]
with ib.for_range(0, num_features, kind="unroll") as k:
out_features[(base_features_idx + j * num_features + k)] = data[
src_offset + coord_start + 4 + k
]
out_scores[i * num_anchors + j] = data[src_offset + score_index]
if id_index >= 0:
out_class_ids[i * num_anchors + j] = data[src_offset + id_index]
box_indices[i * num_anchors + j] = j
if isinstance(max_output_size, int):
max_output_size = tvm.tir.const(max_output_size)
def calc_overlap(i, j, k):
offset_j = j * 4
offset_k = k * 4
base_bbox_idx = i * num_anchors * 4
return calculate_overlap(
out_bboxes,
base_bbox_idx + offset_j,
base_bbox_idx + offset_k,
)
def on_new_valid_box(ib, tid, num_current_valid_box, i, j):
# When return_indices is False, no need to populate box_indices
if return_indices:
with ib.if_scope(tid + 0 == 0):
orig_idx = sorted_index[i * num_anchors + j]
box_indices[i, num_current_valid_box] = indices[i, orig_idx]
def on_new_invalidated_box(i, k):
if return_indices is False and id_index >= 0:
out_class_ids[i, k] = -1.0
def needs_bbox_check(i, j, k):
return tvm.tir.any(
force_suppress > 0,
id_index < 0,
out_class_ids[i, k] == out_class_ids[i, j],
)
return _nms_loop(
ib,
batch_size,
top_k,
iou_threshold,
max_output_size,
valid_count,
on_new_valid_box,
on_new_invalidated_box,
needs_bbox_check,
calc_overlap,
out_scores,
num_valid_boxes,
)
def _fetch_score_ir(data, score, axis):
"""
Fetch score from data.
This routine is required for dynamic shape nms.
"""
batch_size = data.shape[0]
num_anchors = data.shape[1]
elem_length = data.shape[2]
ib = tvm.tir.ir_builder.create()
data = ib.buffer_ptr(data)
score = ib.buffer_ptr(score)
with ib.if_scope(num_anchors > 0):
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
nthread_bx = batch_size * num_anchors // max_threads + 1
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
with ib.if_scope(tid < batch_size * num_anchors):
score[tid] = data[tid * elem_length + axis]
return ib.get()
def _dispatch_sort(scores, ret_type="indices"):
target = tvm.target.Target.current()
if target and (
can_use_thrust(target, "tvm.contrib.thrust.sort")
or can_use_rocthrust(target, "tvm.contrib.thrust.sort")
):
return argsort_thrust(scores, axis=1, is_ascend=False, dtype="int32", ret_type=ret_type)
return argsort(scores, axis=1, is_ascend=False, dtype="int32", ret_type=ret_type)
def _get_sorted_indices(data, data_buf, score_index, score_shape):
"""Extract a 1D score tensor from the packed input and do argsort on it."""
score_buf = tvm.tir.decl_buffer(score_shape, data.dtype, "score_buf", data_alignment=8)
score_tensor = te.extern(
[score_shape],
[data],
lambda ins, outs: _fetch_score_ir(
ins[0],
outs[0],
score_index,
),
dtype=[data.dtype],
in_buffers=[data_buf],
out_buffers=[score_buf],
name="fetch_score",
tag="fetch_score",
)
return _dispatch_sort(score_tensor)
def _run_nms(
data,
data_buf,
sort_tensor,
valid_count,
indices,
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start,
id_index,
score_index,
return_indices,
):
"""Run NMS using sorted scores."""
sort_tensor_buf = tvm.tir.decl_buffer(
sort_tensor.shape, sort_tensor.dtype, "sort_tensor_buf", data_alignment=8
)
valid_count_dtype = "int32"
valid_count_buf = tvm.tir.decl_buffer(
valid_count.shape, valid_count_dtype, "valid_count_buf", data_alignment=4
)
indices_buf = tvm.tir.decl_buffer(indices.shape, indices.dtype, "indices_buf", data_alignment=8)
batch_size = data.shape[0]
num_anchors = data.shape[1]
# Number of extra features per box beyond coords, score, and id.
num_features = data.shape[2] - 6 if id_index >= 0 else data.shape[2] - 5
# output shapes
bbox_shape = (batch_size, num_anchors, 4)
score_shape = (batch_size, num_anchors)
class_id_shape = score_shape
out_features_shape = (batch_size, num_anchors, num_features)
box_indices_shape = score_shape
num_valid_boxes_shape = (batch_size, 1)
return te.extern(
[
bbox_shape,
score_shape,
class_id_shape,
out_features_shape,
box_indices_shape,
num_valid_boxes_shape,
],
[data, sort_tensor, valid_count, indices],
lambda ins, outs: nms_ir(
ins[0],
ins[1],
ins[2],
ins[3],
outs[0], # sorted bbox
outs[1], # sorted scores
outs[2], # sorted class ids
outs[3], # sorted box feats
outs[4], # box_indices
outs[5], # num_valid_boxes
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start,
id_index,
score_index,
return_indices,
),
dtype=[data.dtype, "float32", "float32", "float32", "int32", "int32"],
in_buffers=[data_buf, sort_tensor_buf, valid_count_buf, indices_buf],
name="nms",
tag="nms",
)
def _concatenate_outputs(
out_bboxes,
out_scores,
out_class_ids,
out_features,
out_shape,
coord_start,
score_index,
id_index,
):
"""Pack the results from NMS into a single 5D or 6D tensor."""
batch_size = out_bboxes.shape[0]
num_anchors = out_bboxes.shape[1]
num_features = out_features.shape[2]
def ir(out_bboxes, out_scores, out_class_ids, out):
ib = tvm.tir.ir_builder.create()
out_bboxes = ib.buffer_ptr(out_bboxes)
out_scores = ib.buffer_ptr(out_scores)
out_class_ids = ib.buffer_ptr(out_class_ids)
out = ib.buffer_ptr(out)
with ib.if_scope(num_anchors > 0):
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
nthread_bx = ceil_div(num_anchors, nthread_tx)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
ib.scope_attr(by, "thread_extent", batch_size)
tid = bx * nthread_tx + tx
i = by
with ib.if_scope(tid < num_anchors):
with ib.for_range(0, 4, kind="unroll") as j:
out[i, tid, coord_start + j] = out_bboxes[i, tid, j]
with ib.for_range(0, num_features, kind="unroll") as j:
out[i, tid, coord_start + 4 + j] = out_features[i, tid, j]
out[i, tid, score_index] = out_scores[i, tid]
if id_index >= 0:
out[i, tid, id_index] = out_class_ids[i, tid]
return ib.get()
return te.extern(
[out_shape],
[out_bboxes, out_scores, out_class_ids],
lambda ins, outs: ir(ins[0], ins[1], ins[2], outs[0]),
dtype=["float32"],
name="nms_output_concat",
tag="nms_output_concat",
)
def non_max_suppression(
data,
valid_count,
indices,
max_output_size=-1,
iou_threshold=0.5,
force_suppress=False,
top_k=-1,
coord_start=2,
score_index=1,
id_index=0,
return_indices=True,
invalid_to_bottom=False,
):
"""Non-maximum suppression operator for object detection.
Parameters
----------
data : tvm.te.Tensor
3-D tensor with shape [batch_size, num_anchors, elem_length].
The last dimension should be in format of
[class_id, score, box_left, box_top, box_right, box_bottom].
It could be the second output out_tensor of get_valid_counts.
valid_count : tvm.te.Tensor
1-D tensor for valid number of boxes. It could be the output
valid_count of get_valid_counts.
indices : tvm.te.Tensor
2-D tensor with shape [batch_size, num_anchors], represents
the index of box in original data. It could be the third
output out_indices of get_valid_counts. The values in the
second dimension are like the output of arange(num_anchors)
if get_valid_counts is not used before non_max_suppression.
max_output_size : optional, tvm.te.Tensor or int
Max number of output valid boxes for each instance.
By default all valid boxes are returned.
iou_threshold : optional, tvm.te.Tensor or float
Non-maximum suppression threshold.
force_suppress : optional, boolean
Whether to suppress all detections regardless of class_id.
top_k : optional, int
Keep maximum top k detections before nms, -1 for no limit.
coord_start : required, int
Start index of the consecutive 4 coordinates.
score_index : optional, int
Index of the scores/confidence of boxes.
id_index : optional, int
index of the class categories, -1 to disable.
return_indices : boolean
Whether to return box indices in input data.
invalid_to_bottom : optional, boolean
Whether to move all valid bounding boxes to the top.
Returns
-------
out : tvm.te.Tensor
3-D tensor with shape [batch_size, num_anchors, elem_length].
Example
--------
.. code-block:: python
# An example to use nms
dshape = (1, 5, 6)
data = te.placeholder(dshape, name="data")
valid_count = te.placeholder((dshape[0],), dtype="int32", name="valid_count")
iou_threshold = 0.7
force_suppress = True
top_k = -1
out = non_max_suppression(data=data, valid_count=valid_count, iou_threshold=iou_threshold,
force_suppress=force_supress, top_k=top_k, return_indices=False)
np_data = np.random.uniform(dshape)
np_valid_count = np.array([4])
s = topi.generic.schedule_nms(out)
f = tvm.build(s, [data, valid_count, out], "cuda")
dev = tvm.cuda(0)
tvm_data = tvm.nd.array(np_data, dev)
tvm_valid_count = tvm.nd.array(np_valid_count, dev)
tvm_out = tvm.nd.array(np.zeros(dshape, dtype=data.dtype), dev)
f(tvm_data, tvm_valid_count, tvm_out)
"""
data_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "data_buf", data_alignment=8)
sort_tensor = _get_sorted_indices(data, data_buf, score_index, (data.shape[0], data.shape[1]))
out_bboxes, out_scores, out_class_ids, out_features, box_indices, num_valid_boxes = _run_nms(
data,
data_buf,
sort_tensor,
valid_count,
indices,
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start,
id_index,
score_index,
return_indices,
)
if return_indices:
return [box_indices, num_valid_boxes]
return _concatenate_outputs(
out_bboxes,
out_scores,
out_class_ids,
out_features,
data.shape,
coord_start,
score_index,
id_index,
)
def _get_valid_box_count(scores, score_threshold):
batch_classes, num_boxes = scores.shape
def searchsorted_ir(scores, valid_count):
ib = tvm.tir.ir_builder.create()
scores = ib.buffer_ptr(scores)
valid_count = ib.buffer_ptr(valid_count)
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
with ib.new_scope():
ib.scope_attr(bx, "thread_extent", ceil_div(batch_classes, max_threads))
ib.scope_attr(tx, "thread_extent", max_threads)
tid = bx * max_threads + tx
with ib.if_scope(tid < batch_classes):
binary_search(ib, tid, num_boxes, scores, score_threshold, valid_count)
return ib.get()
scores_buf = tvm.tir.decl_buffer(scores.shape, scores.dtype, "scores_buf", data_alignment=8)
return te.extern(
[(batch_classes,)],
[scores],
lambda ins, outs: searchsorted_ir(ins[0], outs[0]),
dtype=["int32"],
in_buffers=[scores_buf],
name="searchsorted",
tag="searchsorted",
)
def _collect_selected_indices_ir(num_class, selected_indices, num_detections, row_offsets, out):
batch_classes, num_boxes = selected_indices.shape
ib = tvm.tir.ir_builder.create()
selected_indices = ib.buffer_ptr(selected_indices)
num_detections = ib.buffer_ptr(num_detections)
row_offsets = ib.buffer_ptr(row_offsets)
out = ib.buffer_ptr(out)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
nthread_bx = ceil_div(num_boxes, nthread_tx)
nthread_by = batch_classes
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
ib.scope_attr(by, "thread_extent", nthread_by)
with ib.new_scope():
idx = bx * nthread_tx + tx
idy = cast(by, "int64")
batch_id = idy // num_class
class_id = idy % num_class
with ib.if_scope(idx < num_detections[idy]):
out[row_offsets[idy] + idx, 0] = batch_id
out[row_offsets[idy] + idx, 1] = class_id
out[row_offsets[idy] + idx, 2] = cast(selected_indices[idy, idx], "int64")
return ib.get()
def _collect_selected_indices_and_scores_ir(
selected_indices,
selected_scores,
num_detections,
row_offsets,
num_total_detections,
collected_indices,
collected_scores,
):
batch_size, num_class = row_offsets.shape
num_boxes = selected_indices.shape[1]
ib = tvm.tir.ir_builder.create()
selected_indices = ib.buffer_ptr(selected_indices)
selected_scores = ib.buffer_ptr(selected_scores)
num_detections = ib.buffer_ptr(num_detections)
row_offsets = ib.buffer_ptr(row_offsets)
num_total_detections = ib.buffer_ptr(num_total_detections)
collected_indices = ib.buffer_ptr(collected_indices)
collected_scores = ib.buffer_ptr(collected_scores)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
nthread_bx = ceil_div(num_boxes, nthread_tx)
nthread_by = batch_size * num_class
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
ib.scope_attr(by, "thread_extent", nthread_by)
zero = cast(0, "int64")
with ib.new_scope():
idx = bx * nthread_tx + tx
idy = cast(by, "int64")
batch_id = idy // num_class
class_id = idy % num_class
with ib.if_scope(idx < num_detections[batch_id, class_id]):
offset = row_offsets[batch_id, class_id] + idx
collected_indices[batch_id, offset, 0] = class_id
collected_indices[batch_id, offset, 1] = cast(selected_indices[idy, idx], "int64")
collected_scores[batch_id, offset] = selected_scores[idy, idx]
with ib.else_scope():
with ib.if_scope(idx < num_boxes):
offset = (
num_total_detections[batch_id]
+ class_id * num_boxes
- row_offsets[batch_id, class_id]
+ idx
- num_detections[batch_id, class_id]
)
collected_indices[batch_id, offset, 0] = zero
collected_indices[batch_id, offset, 1] = zero
collected_scores[batch_id, offset] = 0.0
return ib.get()
def all_class_non_max_suppression(
boxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
output_format="onnx",
):
"""Non-maximum suppression operator for object detection, corresponding to ONNX
NonMaxSuppression and TensorFlow combined_non_max_suppression.
NMS is performed for each class separately.
Parameters
----------
boxes : tvm.te.Tensor
3-D tensor with shape (batch_size, num_boxes, 4)
scores: tvm.te.Tensor
3-D tensor with shape (batch_size, num_classes, num_boxes)
max_output_boxes_per_class : int or tvm.te.Tensor, optional
The maxinum number of output selected boxes per class
iou_threshold : float or tvm.te.Tensor, optionaIl
IoU test threshold
score_threshold : float or tvm.te.Tensor, optional
Score threshold to filter out low score boxes early
output_format : str, optional
"onnx" or "tensorflow", see below
Returns
-------
out : list of tvm.te.Tensor
If `output_format` is "onnx", the output is two tensors. The first is `indices` of size
`(batch_size * num_class* num_boxes , 3)` and the second is a scalar tensor
`num_total_detection` of shape `(1,)` representing the total number of selected
boxes. The three values in `indices` encode batch, class, and box indices.
Rows of `indices` are ordered such that selected boxes from batch 0, class 0 come
first, in descending of scores, followed by boxes from batch 0, class 1 etc. Out of
`batch_size * num_class* num_boxes` rows of indices, only the first `num_total_detection`
rows are valid.
If `output_format` is "tensorflow", the output is three tensors, the first
is `indices` of size `(batch_size, num_class * num_boxes , 2)`, the second is `scores` of
size `(batch_size, num_class * num_boxes)`, and the third is `num_total_detection` of size
`(batch_size,)` representing the total number of selected boxes per batch. The two values
in `indices` encode class and box indices. Of num_class * num_boxes boxes in `indices` at
batch b, only the first `num_total_detection[b]` entries are valid. The second axis of
`indices` and `scores` are sorted within each class by box scores, but not across classes.
So the box indices and scores for the class 0 come first in a sorted order, followed by
the class 1 etc.
"""
batch, num_class, num_boxes = scores.shape
scores = reshape(scores, (batch * num_class, num_boxes))
sorted_scores, sorted_indices = _dispatch_sort(scores, ret_type="both")
valid_count = _get_valid_box_count(sorted_scores, score_threshold)
selected_indices, selected_scores, num_detections = run_all_class_nms(
boxes,
sorted_scores,
sorted_indices,
valid_count,
max_output_boxes_per_class,
iou_threshold,
_nms_loop,
return_scores=(output_format == "tensorflow"),
)
if output_format == "onnx":
row_offsets, num_total_detections = exclusive_scan(
num_detections, return_reduction=True, output_dtype="int64"
)
selected_indices = collect_selected_indices(
num_class, selected_indices, num_detections, row_offsets, _collect_selected_indices_ir
)
return [selected_indices, num_total_detections]
num_detections_per_batch = reshape(num_detections, (batch, num_class))
row_offsets, num_total_detections = exclusive_scan(
num_detections_per_batch, return_reduction=True, output_dtype="int64", axis=1
)
selected_indices, selected_scores = collect_selected_indices_and_scores(
selected_indices,
selected_scores,
num_detections_per_batch,
row_offsets,
num_total_detections,
_collect_selected_indices_and_scores_ir,
)
return [selected_indices, selected_scores, num_total_detections]
|
languages/python/sqlalchemy-oso/sqlalchemy_oso/oso.py
|
connec/oso
| 2,167 |
65548
|
from oso import Oso
from .auth import register_models
class SQLAlchemyOso(Oso):
"""The central object to manage application policy state, e.g.
the policy data, and verify requests when using Oso with SQLAlchemy.
Supports SQLAlchemy-specific functionality, including data filtering.
Accepts a SQLAlchemy declarative_base on initialization, which is used to register
all relevant SQLAlchemy models with Oso.
>>> from sqlalchemy_oso import SQLAlchemyOso
>>> from sqlalchemy.ext.declarative import declarative_base
>>> Base = declarative_base(name="MyBaseModel")
>>> SQLAlchemyOso(Base)
<sqlalchemy_oso.oso.SQLAlchemyOso object at 0x...>
"""
def __init__(self, sqlalchemy_base):
super().__init__()
# Register all sqlalchemy models on sqlalchemy_base
register_models(self, sqlalchemy_base)
self.base = sqlalchemy_base
|
stevedore/enabled.py
|
jaraco/stevedore
| 133 |
65561
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from .extension import ExtensionManager
LOG = logging.getLogger(__name__)
class EnabledExtensionManager(ExtensionManager):
"""Loads only plugins that pass a check function.
The check_func argument should return a boolean, with ``True``
indicating that the extension should be loaded and made available
and ``False`` indicating that the extension should be ignored.
:param namespace: The namespace for the entry points.
:type namespace: str
:param check_func: Function to determine which extensions to load.
:type check_func: callable, taking an :class:`Extension`
instance as argument
:param invoke_on_load: Boolean controlling whether to invoke the
object returned by the entry point after the driver is loaded.
:type invoke_on_load: bool
:param invoke_args: Positional arguments to pass when invoking
the object returned by the entry point. Only used if invoke_on_load
is True.
:type invoke_args: tuple
:param invoke_kwds: Named arguments to pass when invoking
the object returned by the entry point. Only used if invoke_on_load
is True.
:type invoke_kwds: dict
:param propagate_map_exceptions: Boolean controlling whether exceptions
are propagated up through the map call or whether they are logged and
then ignored
:type propagate_map_exceptions: bool
:param on_load_failure_callback: Callback function that will be called when
an entrypoint can not be loaded. The arguments that will be provided
when this is called (when an entrypoint fails to load) are
(manager, entrypoint, exception)
:type on_load_failure_callback: function
:param verify_requirements: Use setuptools to enforce the
dependencies of the plugin(s) being loaded. Defaults to False.
:type verify_requirements: bool
"""
def __init__(self, namespace, check_func, invoke_on_load=False,
invoke_args=(), invoke_kwds={},
propagate_map_exceptions=False,
on_load_failure_callback=None,
verify_requirements=False,):
self.check_func = check_func
super(EnabledExtensionManager, self).__init__(
namespace,
invoke_on_load=invoke_on_load,
invoke_args=invoke_args,
invoke_kwds=invoke_kwds,
propagate_map_exceptions=propagate_map_exceptions,
on_load_failure_callback=on_load_failure_callback,
verify_requirements=verify_requirements,
)
def _load_one_plugin(self, ep, invoke_on_load, invoke_args, invoke_kwds,
verify_requirements):
ext = super(EnabledExtensionManager, self)._load_one_plugin(
ep, invoke_on_load, invoke_args, invoke_kwds,
verify_requirements,
)
if ext and not self.check_func(ext):
LOG.debug('ignoring extension %r', ep.name)
return None
return ext
|
se/vendor/kindleunpack/mobi_k8proc.py
|
vr8hub/tools
| 985 |
65565
|
<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
from __future__ import unicode_literals, division, absolute_import, print_function
from .compatibility_utils import PY2, bstr, utf8_str
if PY2:
range = xrange
import os
import struct
# note: struct pack, unpack, unpack_from all require bytestring format
# data all the way up to at least python 2.7.5, python 3 okay with bytestring
import re
# note: re requites the pattern to be the exact same type as the data to be searched in python3
# but u"" is not allowed for the pattern itself only b""
from .mobi_index import MobiIndex
from .mobi_utils import fromBase32
from .unipath import pathof
_guide_types = [b'cover',b'title-page',b'toc',b'index',b'glossary',b'acknowledgements',
b'bibliography',b'colophon',b'copyright-page',b'dedication',
b'epigraph',b'foreword',b'loi',b'lot',b'notes',b'preface',b'text']
# locate beginning and ending positions of tag with specific aid attribute
def locate_beg_end_of_tag(ml, aid):
pattern = utf8_str(r'''<[^>]*\said\s*=\s*['"]%s['"][^>]*>''' % aid)
aid_pattern = re.compile(pattern,re.IGNORECASE)
for m in re.finditer(aid_pattern, ml):
plt = m.start()
pgt = ml.find(b'>',plt+1)
return plt, pgt
return 0, 0
# iterate over all tags in block in reverse order, i.e. last ta to first tag
def reverse_tag_iter(block):
end = len(block)
while True:
pgt = block.rfind(b'>', 0, end)
if pgt == -1:
break
plt = block.rfind(b'<', 0, pgt)
if plt == -1:
break
yield block[plt:pgt+1]
end = plt
class K8Processor:
def __init__(self, mh, sect, files, debug=False):
self.sect = sect
self.files = files
self.mi = MobiIndex(sect)
self.mh = mh
self.skelidx = mh.skelidx
self.fragidx = mh.fragidx
self.guideidx = mh.guideidx
self.fdst = mh.fdst
self.flowmap = {}
self.flows = None
self.flowinfo = []
self.parts = None
self.partinfo = []
self.linked_aids = set()
self.fdsttbl= [0,0xffffffff]
self.DEBUG = debug
# read in and parse the FDST info which is very similar in format to the Palm DB section
# parsing except it provides offsets into rawML file and not the Palm DB file
# this is needed to split up the final css, svg, etc flow section
# that can exist at the end of the rawML file
if self.fdst != 0xffffffff:
header = self.sect.loadSection(self.fdst)
if header[0:4] == b"FDST":
num_sections, = struct.unpack_from(b'>L', header, 0x08)
self.fdsttbl = struct.unpack_from(bstr('>%dL' % (num_sections*2)), header, 12)[::2] + (mh.rawSize, )
sect.setsectiondescription(self.fdst,"KF8 FDST INDX")
if self.DEBUG:
print("\nFDST Section Map: %d sections" % num_sections)
for j in range(num_sections):
print("Section %d: 0x%08X - 0x%08X" % (j, self.fdsttbl[j],self.fdsttbl[j+1]))
else:
print("\nError: K8 Mobi with Missing FDST info")
# read/process skeleton index info to create the skeleton table
skeltbl = []
if self.skelidx != 0xffffffff:
# for i in range(2):
# fname = 'skel%04d.dat' % i
# data = self.sect.loadSection(self.skelidx + i)
# with open(pathof(fname), 'wb') as f:
# f.write(data)
outtbl, ctoc_text = self.mi.getIndexData(self.skelidx, "KF8 Skeleton")
fileptr = 0
for [text, tagMap] in outtbl:
# file number, skeleton name, fragtbl record count, start position, length
skeltbl.append([fileptr, text, tagMap[1][0], tagMap[6][0], tagMap[6][1]])
fileptr += 1
self.skeltbl = skeltbl
if self.DEBUG:
print("\nSkel Table: %d entries" % len(self.skeltbl))
print("table: filenum, skeleton name, frag tbl record count, start position, length")
for j in range(len(self.skeltbl)):
print(self.skeltbl[j])
# read/process the fragment index to create the fragment table
fragtbl = []
if self.fragidx != 0xffffffff:
# for i in range(3):
# fname = 'frag%04d.dat' % i
# data = self.sect.loadSection(self.fragidx + i)
# with open(pathof(fname), 'wb') as f:
# f.write(data)
outtbl, ctoc_text = self.mi.getIndexData(self.fragidx, "KF8 Fragment")
for [text, tagMap] in outtbl:
# insert position, ctoc offset (aidtext), file number, sequence number, start position, length
ctocoffset = tagMap[2][0]
ctocdata = ctoc_text[ctocoffset]
fragtbl.append([int(text), ctocdata, tagMap[3][0], tagMap[4][0], tagMap[6][0], tagMap[6][1]])
self.fragtbl = fragtbl
if self.DEBUG:
print("\nFragment Table: %d entries" % len(self.fragtbl))
print("table: file position, link id text, file num, sequence number, start position, length")
for j in range(len(self.fragtbl)):
print(self.fragtbl[j])
# read / process guide index for guide elements of opf
guidetbl = []
if self.guideidx != 0xffffffff:
# for i in range(3):
# fname = 'guide%04d.dat' % i
# data = self.sect.loadSection(self.guideidx + i)
# with open(pathof(fname), 'wb') as f:
# f.write(data)
outtbl, ctoc_text = self.mi.getIndexData(self.guideidx, "KF8 Guide elements)")
for [text, tagMap] in outtbl:
# ref_type, ref_title, frag number
ctocoffset = tagMap[1][0]
ref_title = ctoc_text[ctocoffset]
ref_type = text
fileno = None
if 3 in tagMap:
fileno = tagMap[3][0]
if 6 in tagMap:
fileno = tagMap[6][0]
guidetbl.append([ref_type, ref_title, fileno])
self.guidetbl = guidetbl
if self.DEBUG:
print("\nGuide Table: %d entries" % len(self.guidetbl))
print("table: ref_type, ref_title, fragtbl entry number")
for j in range(len(self.guidetbl)):
print(self.guidetbl[j])
def buildParts(self, rawML):
# now split the rawML into its flow pieces
self.flows = []
for j in range(0, len(self.fdsttbl)-1):
start = self.fdsttbl[j]
end = self.fdsttbl[j+1]
self.flows.append(rawML[start:end])
# the first piece represents the xhtml text
text = self.flows[0]
self.flows[0] = b''
# walk the <skeleton> and fragment tables to build original source xhtml files
# *without* destroying any file position information needed for later href processing
# and create final list of file separation start: stop points and etc in partinfo
if self.DEBUG:
print("\nRebuilding flow piece 0: the main body of the ebook")
self.parts = []
self.partinfo = []
fragptr = 0
baseptr = 0
cnt = 0
for [skelnum, skelname, fragcnt, skelpos, skellen] in self.skeltbl:
baseptr = skelpos + skellen
skeleton = text[skelpos: baseptr]
for i in range(fragcnt):
[insertpos, idtext, filenum, seqnum, startpos, length] = self.fragtbl[fragptr]
aidtext = idtext[12:-2]
if i == 0:
filename = 'part%04d.xhtml' % filenum
slice = text[baseptr: baseptr + length]
insertpos = insertpos - skelpos
head = skeleton[:insertpos]
tail = skeleton[insertpos:]
actual_inspos = insertpos
if (tail.find(b'>') < tail.find(b'<') or head.rfind(b'>') < head.rfind(b'<')):
# There is an incomplete tag in either the head or tail.
# This can happen for some badly formed KF8 files
print('The fragment table for %s has incorrect insert position. Calculating manually.' % skelname)
bp, ep = locate_beg_end_of_tag(skeleton, aidtext)
if bp != ep:
actual_inspos = ep + 1 + startpos
if insertpos != actual_inspos:
print("fixed corrupt fragment table insert position", insertpos+skelpos, actual_inspos+skelpos)
insertpos = actual_inspos
self.fragtbl[fragptr][0] = actual_inspos + skelpos
skeleton = skeleton[0:insertpos] + slice + skeleton[insertpos:]
baseptr = baseptr + length
fragptr += 1
cnt += 1
self.parts.append(skeleton)
self.partinfo.append([skelnum, 'Text', filename, skelpos, baseptr, aidtext])
assembled_text = b''.join(self.parts)
if self.DEBUG:
outassembled = os.path.join(self.files.k8dir, 'assembled_text.dat')
with open(pathof(outassembled),'wb') as f:
f.write(assembled_text)
# The primary css style sheet is typically stored next followed by any
# snippets of code that were previously inlined in the
# original xhtml but have been stripped out and placed here.
# This can include local CDATA snippets and and svg sections.
# The problem is that for most browsers and ereaders, you can not
# use <img src="imageXXXX.svg" /> to import any svg image that itself
# properly uses an <image/> tag to import some raster image - it
# should work according to the spec but does not for almost all browsers
# and ereaders and causes epub validation issues because those raster
# images are in manifest but not in xhtml text - since they only
# referenced from an svg image
# So we need to check the remaining flow pieces to see if they are css
# or svg images. if svg images, we must check if they have an <image />
# and if so inline them into the xhtml text pieces.
# there may be other sorts of pieces stored here but until we see one
# in the wild to reverse engineer we won't be able to tell
self.flowinfo.append([None, None, None, None])
svg_tag_pattern = re.compile(br'''(<svg[^>]*>)''', re.IGNORECASE)
image_tag_pattern = re.compile(br'''(<image[^>]*>)''', re.IGNORECASE)
for j in range(1,len(self.flows)):
flowpart = self.flows[j]
nstr = '%04d' % j
m = re.search(svg_tag_pattern, flowpart)
if m is not None:
# svg
ptype = b'svg'
start = m.start()
m2 = re.search(image_tag_pattern, flowpart)
if m2 is not None:
pformat = b'inline'
pdir = None
fname = None
# strip off anything before <svg if inlining
flowpart = flowpart[start:]
else:
pformat = b'file'
pdir = "Images"
fname = 'svgimg' + nstr + '.svg'
else:
# search for CDATA and if exists inline it
if flowpart.find(b'[CDATA[') >= 0:
ptype = b'css'
flowpart = b'<style type="text/css">\n' + flowpart + b'\n</style>\n'
pformat = b'inline'
pdir = None
fname = None
else:
# css - assume as standalone css file
ptype = b'css'
pformat = b'file'
pdir = "Styles"
fname = 'style' + nstr + '.css'
self.flows[j] = flowpart
self.flowinfo.append([ptype, pformat, pdir, fname])
if self.DEBUG:
print("\nFlow Map: %d entries" % len(self.flowinfo))
for fi in self.flowinfo:
print(fi)
print("\n")
print("\nXHTML File Part Position Information: %d entries" % len(self.partinfo))
for pi in self.partinfo:
print(pi)
if False: # self.Debug:
# dump all of the locations of the aid tags used in TEXT
# find id links only inside of tags
# inside any < > pair find all "aid=' and return whatever is inside the quotes
# [^>]* means match any amount of chars except for '>' char
# [^'"] match any amount of chars except for the quote character
# \s* means match any amount of whitespace
print("\npositions of all aid= pieces")
id_pattern = re.compile(br'''<[^>]*\said\s*=\s*['"]([^'"]*)['"][^>]*>''',re.IGNORECASE)
for m in re.finditer(id_pattern, rawML):
[filename, partnum, start, end] = self.getFileInfo(m.start())
[seqnum, idtext] = self.getFragTblInfo(m.start())
value = fromBase32(m.group(1))
print(" aid: %s value: %d at: %d -> part: %d, start: %d, end: %d" % (m.group(1), value, m.start(), partnum, start, end))
print(" %s fragtbl entry %d" % (idtext, seqnum))
return
# get information fragment table entry by pos
def getFragTblInfo(self, pos):
for j in range(len(self.fragtbl)):
[insertpos, idtext, filenum, seqnum, startpos, length] = self.fragtbl[j]
if pos >= insertpos and pos < (insertpos + length):
# why are these "in: and before: added here
return seqnum, b'in: ' + idtext
if pos < insertpos:
return seqnum, b'before: ' + idtext
return None, None
# get information about the part (file) that exists at pos in original rawML
def getFileInfo(self, pos):
for [partnum, pdir, filename, start, end, aidtext] in self.partinfo:
if pos >= start and pos < end:
return filename, partnum, start, end
return None, None, None, None
# accessor functions to properly protect the internal structure
def getNumberOfParts(self):
return len(self.parts)
def getPart(self,i):
if i >= 0 and i < len(self.parts):
return self.parts[i]
return None
def getPartInfo(self, i):
if i >= 0 and i < len(self.partinfo):
return self.partinfo[i]
return None
def getNumberOfFlows(self):
return len(self.flows)
def getFlow(self,i):
# note flows[0] is empty - it was all of the original text
if i > 0 and i < len(self.flows):
return self.flows[i]
return None
def getFlowInfo(self,i):
# note flowinfo[0] is empty - it was all of the original text
if i > 0 and i < len(self.flowinfo):
return self.flowinfo[i]
return None
def getIDTagByPosFid(self, posfid, offset):
# first convert kindle:pos:fid and offset info to position in file
# (fromBase32 can handle both string types on input)
row = fromBase32(posfid)
off = fromBase32(offset)
[insertpos, idtext, filenum, seqnm, startpos, length] = self.fragtbl[row]
pos = insertpos + off
fname, pn, skelpos, skelend = self.getFileInfo(pos)
if fname is None:
# pos does not exist
# default to skeleton pos instead
print("Link To Position", pos, "does not exist, retargeting to top of target")
pos = self.skeltbl[filenum][3]
fname, pn, skelpos, skelend = self.getFileInfo(pos)
# an existing "id=" or "name=" attribute must exist in original xhtml otherwise it would not have worked for linking.
# Amazon seems to have added its own additional "aid=" inside tags whose contents seem to represent
# some position information encoded into Base32 name.
# so find the closest "id=" before position the file by actually searching in that file
idtext = self.getIDTag(pos)
return fname, idtext
def getIDTag(self, pos):
# find the first tag with a named anchor (name or id attribute) before pos
fname, pn, skelpos, skelend = self.getFileInfo(pos)
if pn is None and skelpos is None:
print("Error: getIDTag - no file contains ", pos)
textblock = self.parts[pn]
npos = pos - skelpos
# if npos inside a tag then search all text before the its end of tag marker
pgt = textblock.find(b'>',npos)
plt = textblock.find(b'<',npos)
if plt == npos or pgt < plt:
npos = pgt + 1
# find id and name attributes only inside of tags
# use a reverse tag search since that is faster
# inside any < > pair find "id=" and "name=" attributes return it
# [^>]* means match any amount of chars except for '>' char
# [^'"] match any amount of chars except for the quote character
# \s* means match any amount of whitespace
textblock = textblock[0:npos]
id_pattern = re.compile(br'''<[^>]*\sid\s*=\s*['"]([^'"]*)['"]''',re.IGNORECASE)
name_pattern = re.compile(br'''<[^>]*\sname\s*=\s*['"]([^'"]*)['"]''',re.IGNORECASE)
aid_pattern = re.compile(br'''<[^>]+\s(?:aid|AID)\s*=\s*['"]([^'"]+)['"]''')
for tag in reverse_tag_iter(textblock):
# any ids in the body should default to top of file
if tag[0:6] == b'<body ':
return b''
if tag[0:6] != b'<meta ':
m = id_pattern.match(tag) or name_pattern.match(tag)
if m is not None:
return m.group(1)
m = aid_pattern.match(tag)
if m is not None:
self.linked_aids.add(m.group(1))
return b'aid-' + m.group(1)
return b''
# do we need to do deep copying
def setParts(self, parts):
assert(len(parts) == len(self.parts))
for i in range(len(parts)):
self.parts[i] = parts[i]
# do we need to do deep copying
def setFlows(self, flows):
assert(len(flows) == len(self.flows))
for i in range(len(flows)):
self.flows[i] = flows[i]
# get information about the part (file) that exists at pos in original rawML
def getSkelInfo(self, pos):
for [partnum, pdir, filename, start, end, aidtext] in self.partinfo:
if pos >= start and pos < end:
return [partnum, pdir, filename, start, end, aidtext]
return [None, None, None, None, None, None]
# fileno is actually a reference into fragtbl (a fragment)
def getGuideText(self):
guidetext = b''
for [ref_type, ref_title, fileno] in self.guidetbl:
if ref_type == b'thumbimagestandard':
continue
if ref_type not in _guide_types and not ref_type.startswith(b'other.'):
if ref_type == b'start':
ref_type = b'text'
else:
ref_type = b'other.' + ref_type
[pos, idtext, filenum, seqnm, startpos, length] = self.fragtbl[fileno]
[pn, pdir, filename, skelpos, skelend, aidtext] = self.getSkelInfo(pos)
idtext = self.getIDTag(pos)
linktgt = filename.encode('utf-8')
if idtext != b'':
linktgt += b'#' + idtext
guidetext += b'<reference type="'+ref_type+b'" title="'+ref_title+b'" href="'+utf8_str(pdir)+b'/'+linktgt+b'" />\n'
# opf is encoded utf-8 so must convert any titles properly
guidetext = (guidetext.decode(self.mh.codec)).encode("utf-8")
return guidetext
def getPageIDTag(self, pos):
# find the first tag with a named anchor (name or id attribute) before pos
# but page map offsets need to little more leeway so if the offset points
# into a tag look for the next ending tag "/>" or "</" and start your search from there.
fname, pn, skelpos, skelend = self.getFileInfo(pos)
if pn is None and skelpos is None:
print("Error: getIDTag - no file contains ", pos)
textblock = self.parts[pn]
npos = pos - skelpos
# if npos inside a tag then search all text before next ending tag
pgt = textblock.find(b'>',npos)
plt = textblock.find(b'<',npos)
if plt == npos or pgt < plt:
# we are in a tag
# so find first ending tag
pend1 = textblock.find(b'/>', npos)
pend2 = textblock.find(b'</', npos)
if pend1 != -1 and pend2 != -1:
pend = min(pend1, pend2)
else:
pend = max(pend1, pend2)
if pend != -1:
npos = pend
else:
npos = pgt + 1
# find id and name attributes only inside of tags
# use a reverse tag search since that is faster
# inside any < > pair find "id=" and "name=" attributes return it
# [^>]* means match any amount of chars except for '>' char
# [^'"] match any amount of chars except for the quote character
# \s* means match any amount of whitespace
textblock = textblock[0:npos]
id_pattern = re.compile(br'''<[^>]*\sid\s*=\s*['"]([^'"]*)['"]''',re.IGNORECASE)
name_pattern = re.compile(br'''<[^>]*\sname\s*=\s*['"]([^'"]*)['"]''',re.IGNORECASE)
for tag in reverse_tag_iter(textblock):
# any ids in the body should default to top of file
if tag[0:6] == b'<body ':
return b''
if tag[0:6] != b'<meta ':
m = id_pattern.match(tag) or name_pattern.match(tag)
if m is not None:
return m.group(1)
return b''
|
third_party/WebKit/Source/devtools/scripts/optimize_png_images.py
|
google-ar/chromium
| 777 |
65572
|
<reponame>google-ar/chromium<filename>third_party/WebKit/Source/devtools/scripts/optimize_png_images.py
#!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import os.path
import subprocess
import sys
from build import devtools_file_hashes
try:
import json
except ImportError:
import simplejson as json
scripts_path = os.path.dirname(os.path.abspath(__file__))
devtools_path = os.path.dirname(scripts_path)
blink_source_path = os.path.dirname(devtools_path)
blink_path = os.path.dirname(blink_source_path)
chromium_src_path = os.path.dirname(os.path.dirname(blink_path))
devtools_frontend_path = os.path.join(devtools_path, "front_end")
images_path = os.path.join(devtools_frontend_path, "Images")
image_sources_path = os.path.join(images_path, "src")
hashes_file_name = "optimize_png.hashes"
hashes_file_path = os.path.join(image_sources_path, hashes_file_name)
file_names = os.listdir(image_sources_path)
svg_file_paths = [os.path.join(image_sources_path, file_name) for file_name in file_names if file_name.endswith(".svg")]
svg_file_paths_to_optimize = devtools_file_hashes.files_with_invalid_hashes(hashes_file_path, svg_file_paths)
svg_file_names = [os.path.basename(file_path) for file_path in svg_file_paths_to_optimize]
def check_installed(app_name):
proc = subprocess.Popen("which %s" % app_name, stdout=subprocess.PIPE, shell=True)
proc.communicate()
if proc.returncode != 0:
print "This script needs \"%s\" to be installed." % app_name
print "Run sudo gem install image_optim image_optim_pack"
sys.exit(1)
check_installed("image_optim")
def optimize_png(file_name):
png_full_path = os.path.join(images_path, file_name + ".png")
optimize_command = "image_optim %s" % png_full_path
proc = subprocess.Popen(optimize_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, cwd=chromium_src_path)
return proc
if len(svg_file_names):
print "%d unoptimized png files found." % len(svg_file_names)
else:
print "All png files are already optimized."
sys.exit()
processes = {}
for file_name in svg_file_names:
name = os.path.splitext(file_name)[0]
name2x = name + "_2x"
processes[name] = optimize_png(name)
processes[name2x] = optimize_png(name2x)
for file_name, proc in processes.items():
(optimize_out, _) = proc.communicate()
print("Optimization of %s finished: %s" % (file_name, optimize_out))
devtools_file_hashes.update_file_hashes(hashes_file_path, svg_file_paths)
|
tests/integration/actions/run/base.py
|
ekmixon/ansible-navigator
| 134 |
65583
|
<filename>tests/integration/actions/run/base.py
"""Base class for run interactive/stdout tests.
"""
import difflib
import json
import os
from typing import Optional
import pytest
from ..._interactions import SearchFor
from ..._interactions import Step
from ....defaults import FIXTURES_DIR
from ..._common import fixture_path_from_request
from ..._common import update_fixtures
from ..._tmux_session import TmuxSession
# run playbook
run_fixture_dir = os.path.join(FIXTURES_DIR, "integration", "actions", "run")
inventory_path = os.path.join(run_fixture_dir, "inventory")
playbook_path = os.path.join(run_fixture_dir, "site.yaml")
base_steps = (
Step(user_input=":0", comment="play-1 details"),
Step(user_input=":0", comment="task-1 details"),
Step(user_input=":back", comment="play-1 details"),
Step(user_input=":1", comment="play-1 task-2 details"),
Step(user_input=":back", comment="play-1 details"),
Step(user_input=":back", comment="all play details"),
Step(user_input=":1", comment="play-2 details"),
Step(user_input=":0", comment="play-2 task-1 details"),
Step(user_input=":back", comment="play-2 details"),
Step(user_input=":1", comment="play-2 task-2 details"),
Step(user_input=":back", comment="play-2 details"),
Step(user_input=":back", comment="all play details"),
Step(user_input=":st", comment="display stream"),
)
class BaseClass:
"""Base class for run interactive/stdout tests."""
UPDATE_FIXTURES = False
TEST_FOR_MODE: Optional[str] = None
@staticmethod
@pytest.fixture(scope="module", name="tmux_session")
def fixture_tmux_session(request):
"""tmux fixture for this module"""
params = {
"pane_height": "1000",
"pane_width": "500",
"setup_commands": [
"export ANSIBLE_DEVEL_WARNING=False",
"export ANSIBLE_DEPRECATION_WARNINGS=False",
],
"unique_test_id": request.node.nodeid,
}
with TmuxSession(**params) as tmux_session:
yield tmux_session
def test(self, request, tmux_session, step):
# pylint: disable=too-many-branches
# pylint: disable=too-many-locals
"""Run the tests for run, mode and ``ee`` set in child class."""
if step.search_within_response is SearchFor.HELP:
search_within_response = ":help help"
elif step.search_within_response is SearchFor.PROMPT:
search_within_response = tmux_session.cli_prompt
else:
search_within_response = step.search_within_response
received_output = tmux_session.interaction(
value=step.user_input,
search_within_response=search_within_response,
)
if step.mask:
# mask out some configuration that is subject to change each run
mask = "X" * 50
for idx, line in enumerate(received_output):
if tmux_session.cli_prompt in line:
received_output[idx] = mask
else:
for out in ["duration:", "playbook:", "start:", "end:", "task_path:"]:
if out in line:
received_output[idx] = mask
fixtures_update_requested = (
self.UPDATE_FIXTURES
or os.environ.get("ANSIBLE_NAVIGATOR_UPDATE_TEST_FIXTURES") == "true"
and not any((step.look_fors, step.look_nots))
)
if fixtures_update_requested:
update_fixtures(
request,
step.step_index,
received_output,
step.comment,
additional_information={
"look_fors": step.look_fors,
"look_nots": step.look_nots,
"compared_fixture": not any((step.look_fors, step.look_nots)),
},
)
page = " ".join(received_output)
if step.look_fors:
assert all(look_for in page for look_for in step.look_fors)
if step.look_nots:
assert not any(look_not in page for look_not in step.look_nots)
if not any((step.look_fors, step.look_nots)):
dir_path, file_name = fixture_path_from_request(request, step.step_index)
with open(file=os.path.join(dir_path, file_name), encoding="utf-8") as infile:
expected_output = json.load(infile)["output"]
assert expected_output == received_output, "\n" + "\n".join(
difflib.unified_diff(expected_output, received_output, "expected", "received")
)
|
sdk/servicefabric/azure-mgmt-servicefabric/azure/mgmt/servicefabric/models/_models.py
|
rsdoherty/azure-sdk-for-python
| 2,728 |
65588
|
<filename>sdk/servicefabric/azure-mgmt-servicefabric/azure/mgmt/servicefabric/models/_models.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class ApplicationDeltaHealthPolicy(msrest.serialization.Model):
"""Defines a delta health policy used to evaluate the health of an application or one of its child entities when upgrading the cluster.
:param default_service_type_delta_health_policy: The delta health policy used by default to
evaluate the health of a service type when upgrading the cluster.
:type default_service_type_delta_health_policy:
~azure.mgmt.servicefabric.models.ServiceTypeDeltaHealthPolicy
:param service_type_delta_health_policies: The map with service type delta health policy per
service type name. The map is empty by default.
:type service_type_delta_health_policies: dict[str,
~azure.mgmt.servicefabric.models.ServiceTypeDeltaHealthPolicy]
"""
_attribute_map = {
'default_service_type_delta_health_policy': {'key': 'defaultServiceTypeDeltaHealthPolicy', 'type': 'ServiceTypeDeltaHealthPolicy'},
'service_type_delta_health_policies': {'key': 'serviceTypeDeltaHealthPolicies', 'type': '{ServiceTypeDeltaHealthPolicy}'},
}
def __init__(
self,
**kwargs
):
super(ApplicationDeltaHealthPolicy, self).__init__(**kwargs)
self.default_service_type_delta_health_policy = kwargs.get('default_service_type_delta_health_policy', None)
self.service_type_delta_health_policies = kwargs.get('service_type_delta_health_policies', None)
class ApplicationHealthPolicy(msrest.serialization.Model):
"""Defines a health policy used to evaluate the health of an application or one of its children entities.
:param default_service_type_health_policy: The health policy used by default to evaluate the
health of a service type.
:type default_service_type_health_policy:
~azure.mgmt.servicefabric.models.ServiceTypeHealthPolicy
:param service_type_health_policies: The map with service type health policy per service type
name. The map is empty by default.
:type service_type_health_policies: dict[str,
~azure.mgmt.servicefabric.models.ServiceTypeHealthPolicy]
"""
_attribute_map = {
'default_service_type_health_policy': {'key': 'defaultServiceTypeHealthPolicy', 'type': 'ServiceTypeHealthPolicy'},
'service_type_health_policies': {'key': 'serviceTypeHealthPolicies', 'type': '{ServiceTypeHealthPolicy}'},
}
def __init__(
self,
**kwargs
):
super(ApplicationHealthPolicy, self).__init__(**kwargs)
self.default_service_type_health_policy = kwargs.get('default_service_type_health_policy', None)
self.service_type_health_policies = kwargs.get('service_type_health_policies', None)
class ApplicationMetricDescription(msrest.serialization.Model):
"""Describes capacity information for a custom resource balancing metric. This can be used to limit the total consumption of this metric by the services of this application.
:param name: The name of the metric.
:type name: str
:param maximum_capacity: The maximum node capacity for Service Fabric application.
This is the maximum Load for an instance of this application on a single node. Even if the
capacity of node is greater than this value, Service Fabric will limit the total load of
services within the application on each node to this value.
If set to zero, capacity for this metric is unlimited on each node.
When creating a new application with application capacity defined, the product of MaximumNodes
and this value must always be smaller than or equal to TotalApplicationCapacity.
When updating existing application with application capacity, the product of MaximumNodes and
this value must always be smaller than or equal to TotalApplicationCapacity.
:type maximum_capacity: long
:param reservation_capacity: The node reservation capacity for Service Fabric application.
This is the amount of load which is reserved on nodes which have instances of this
application.
If MinimumNodes is specified, then the product of these values will be the capacity reserved
in the cluster for the application.
If set to zero, no capacity is reserved for this metric.
When setting application capacity or when updating application capacity; this value must be
smaller than or equal to MaximumCapacity for each metric.
:type reservation_capacity: long
:param total_application_capacity: The total metric capacity for Service Fabric application.
This is the total metric capacity for this application in the cluster. Service Fabric will try
to limit the sum of loads of services within the application to this value.
When creating a new application with application capacity defined, the product of MaximumNodes
and MaximumCapacity must always be smaller than or equal to this value.
:type total_application_capacity: long
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'maximum_capacity': {'key': 'maximumCapacity', 'type': 'long'},
'reservation_capacity': {'key': 'reservationCapacity', 'type': 'long'},
'total_application_capacity': {'key': 'totalApplicationCapacity', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(ApplicationMetricDescription, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.maximum_capacity = kwargs.get('maximum_capacity', None)
self.reservation_capacity = kwargs.get('reservation_capacity', None)
self.total_application_capacity = kwargs.get('total_application_capacity', None)
class ProxyResource(msrest.serialization.Model):
"""The resource model definition for proxy-only resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Azure resource identifier.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:param location: It will be deprecated in New API, resource location depends on the parent
resource.
:type location: str
:param tags: A set of tags. Azure resource tags.
:type tags: dict[str, str]
:ivar etag: Azure resource etag.
:vartype etag: str
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.servicefabric.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
**kwargs
):
super(ProxyResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
self.etag = None
self.system_data = None
class ApplicationResource(ProxyResource):
"""The application resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Azure resource identifier.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:param location: It will be deprecated in New API, resource location depends on the parent
resource.
:type location: str
:param tags: A set of tags. Azure resource tags.
:type tags: dict[str, str]
:ivar etag: Azure resource etag.
:vartype etag: str
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.servicefabric.models.SystemData
:param identity: Describes the managed identities for an Azure resource.
:type identity: ~azure.mgmt.servicefabric.models.ManagedIdentity
:param type_version: The version of the application type as defined in the application
manifest.
:type type_version: str
:param parameters: List of application parameters with overridden values from their default
values specified in the application manifest.
:type parameters: dict[str, str]
:param upgrade_policy: Describes the policy for a monitored application upgrade.
:type upgrade_policy: ~azure.mgmt.servicefabric.models.ApplicationUpgradePolicy
:param minimum_nodes: The minimum number of nodes where Service Fabric will reserve capacity
for this application. Note that this does not mean that the services of this application will
be placed on all of those nodes. If this property is set to zero, no capacity will be reserved.
The value of this property cannot be more than the value of the MaximumNodes property.
:type minimum_nodes: long
:param maximum_nodes: The maximum number of nodes where Service Fabric will reserve capacity
for this application. Note that this does not mean that the services of this application will
be placed on all of those nodes. By default, the value of this property is zero and it means
that the services can be placed on any node.
:type maximum_nodes: long
:param remove_application_capacity: Remove the current application capacity settings.
:type remove_application_capacity: bool
:param metrics: List of application capacity metric description.
:type metrics: list[~azure.mgmt.servicefabric.models.ApplicationMetricDescription]
:param managed_identities: List of user assigned identities for the application, each mapped to
a friendly name.
:type managed_identities:
list[~azure.mgmt.servicefabric.models.ApplicationUserAssignedIdentity]
:ivar provisioning_state: The current deployment or provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:param type_name: The application type name as defined in the application manifest.
:type type_name: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'system_data': {'readonly': True},
'minimum_nodes': {'minimum': 0},
'maximum_nodes': {'minimum': 0},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'identity': {'key': 'identity', 'type': 'ManagedIdentity'},
'type_version': {'key': 'properties.typeVersion', 'type': 'str'},
'parameters': {'key': 'properties.parameters', 'type': '{str}'},
'upgrade_policy': {'key': 'properties.upgradePolicy', 'type': 'ApplicationUpgradePolicy'},
'minimum_nodes': {'key': 'properties.minimumNodes', 'type': 'long'},
'maximum_nodes': {'key': 'properties.maximumNodes', 'type': 'long'},
'remove_application_capacity': {'key': 'properties.removeApplicationCapacity', 'type': 'bool'},
'metrics': {'key': 'properties.metrics', 'type': '[ApplicationMetricDescription]'},
'managed_identities': {'key': 'properties.managedIdentities', 'type': '[ApplicationUserAssignedIdentity]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'type_name': {'key': 'properties.typeName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationResource, self).__init__(**kwargs)
self.identity = kwargs.get('identity', None)
self.type_version = kwargs.get('type_version', None)
self.parameters = kwargs.get('parameters', None)
self.upgrade_policy = kwargs.get('upgrade_policy', None)
self.minimum_nodes = kwargs.get('minimum_nodes', None)
self.maximum_nodes = kwargs.get('maximum_nodes', 0)
self.remove_application_capacity = kwargs.get('remove_application_capacity', None)
self.metrics = kwargs.get('metrics', None)
self.managed_identities = kwargs.get('managed_identities', None)
self.provisioning_state = None
self.type_name = kwargs.get('type_name', None)
class ApplicationResourceList(msrest.serialization.Model):
"""The list of application resources.
Variables are only populated by the server, and will be ignored when sending a request.
:param value:
:type value: list[~azure.mgmt.servicefabric.models.ApplicationResource]
:ivar next_link: URL to get the next set of application list results if there are any.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ApplicationResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationResourceList, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class ApplicationResourceUpdateProperties(msrest.serialization.Model):
"""The application resource properties for patch operations.
:param type_version: The version of the application type as defined in the application
manifest.
:type type_version: str
:param parameters: List of application parameters with overridden values from their default
values specified in the application manifest.
:type parameters: dict[str, str]
:param upgrade_policy: Describes the policy for a monitored application upgrade.
:type upgrade_policy: ~azure.mgmt.servicefabric.models.ApplicationUpgradePolicy
:param minimum_nodes: The minimum number of nodes where Service Fabric will reserve capacity
for this application. Note that this does not mean that the services of this application will
be placed on all of those nodes. If this property is set to zero, no capacity will be reserved.
The value of this property cannot be more than the value of the MaximumNodes property.
:type minimum_nodes: long
:param maximum_nodes: The maximum number of nodes where Service Fabric will reserve capacity
for this application. Note that this does not mean that the services of this application will
be placed on all of those nodes. By default, the value of this property is zero and it means
that the services can be placed on any node.
:type maximum_nodes: long
:param remove_application_capacity: Remove the current application capacity settings.
:type remove_application_capacity: bool
:param metrics: List of application capacity metric description.
:type metrics: list[~azure.mgmt.servicefabric.models.ApplicationMetricDescription]
:param managed_identities: List of user assigned identities for the application, each mapped to
a friendly name.
:type managed_identities:
list[~azure.mgmt.servicefabric.models.ApplicationUserAssignedIdentity]
"""
_validation = {
'minimum_nodes': {'minimum': 0},
'maximum_nodes': {'minimum': 0},
}
_attribute_map = {
'type_version': {'key': 'typeVersion', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '{str}'},
'upgrade_policy': {'key': 'upgradePolicy', 'type': 'ApplicationUpgradePolicy'},
'minimum_nodes': {'key': 'minimumNodes', 'type': 'long'},
'maximum_nodes': {'key': 'maximumNodes', 'type': 'long'},
'remove_application_capacity': {'key': 'removeApplicationCapacity', 'type': 'bool'},
'metrics': {'key': 'metrics', 'type': '[ApplicationMetricDescription]'},
'managed_identities': {'key': 'managedIdentities', 'type': '[ApplicationUserAssignedIdentity]'},
}
def __init__(
self,
**kwargs
):
super(ApplicationResourceUpdateProperties, self).__init__(**kwargs)
self.type_version = kwargs.get('type_version', None)
self.parameters = kwargs.get('parameters', None)
self.upgrade_policy = kwargs.get('upgrade_policy', None)
self.minimum_nodes = kwargs.get('minimum_nodes', None)
self.maximum_nodes = kwargs.get('maximum_nodes', 0)
self.remove_application_capacity = kwargs.get('remove_application_capacity', None)
self.metrics = kwargs.get('metrics', None)
self.managed_identities = kwargs.get('managed_identities', None)
class ApplicationResourceProperties(ApplicationResourceUpdateProperties):
"""The application resource properties.
Variables are only populated by the server, and will be ignored when sending a request.
:param type_version: The version of the application type as defined in the application
manifest.
:type type_version: str
:param parameters: List of application parameters with overridden values from their default
values specified in the application manifest.
:type parameters: dict[str, str]
:param upgrade_policy: Describes the policy for a monitored application upgrade.
:type upgrade_policy: ~azure.mgmt.servicefabric.models.ApplicationUpgradePolicy
:param minimum_nodes: The minimum number of nodes where Service Fabric will reserve capacity
for this application. Note that this does not mean that the services of this application will
be placed on all of those nodes. If this property is set to zero, no capacity will be reserved.
The value of this property cannot be more than the value of the MaximumNodes property.
:type minimum_nodes: long
:param maximum_nodes: The maximum number of nodes where Service Fabric will reserve capacity
for this application. Note that this does not mean that the services of this application will
be placed on all of those nodes. By default, the value of this property is zero and it means
that the services can be placed on any node.
:type maximum_nodes: long
:param remove_application_capacity: Remove the current application capacity settings.
:type remove_application_capacity: bool
:param metrics: List of application capacity metric description.
:type metrics: list[~azure.mgmt.servicefabric.models.ApplicationMetricDescription]
:param managed_identities: List of user assigned identities for the application, each mapped to
a friendly name.
:type managed_identities:
list[~azure.mgmt.servicefabric.models.ApplicationUserAssignedIdentity]
:ivar provisioning_state: The current deployment or provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:param type_name: The application type name as defined in the application manifest.
:type type_name: str
"""
_validation = {
'minimum_nodes': {'minimum': 0},
'maximum_nodes': {'minimum': 0},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'type_version': {'key': 'typeVersion', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '{str}'},
'upgrade_policy': {'key': 'upgradePolicy', 'type': 'ApplicationUpgradePolicy'},
'minimum_nodes': {'key': 'minimumNodes', 'type': 'long'},
'maximum_nodes': {'key': 'maximumNodes', 'type': 'long'},
'remove_application_capacity': {'key': 'removeApplicationCapacity', 'type': 'bool'},
'metrics': {'key': 'metrics', 'type': '[ApplicationMetricDescription]'},
'managed_identities': {'key': 'managedIdentities', 'type': '[ApplicationUserAssignedIdentity]'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'type_name': {'key': 'typeName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationResourceProperties, self).__init__(**kwargs)
self.provisioning_state = None
self.type_name = kwargs.get('type_name', None)
class ApplicationResourceUpdate(ProxyResource):
"""The application resource for patch operations.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Azure resource identifier.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:param location: It will be deprecated in New API, resource location depends on the parent
resource.
:type location: str
:param tags: A set of tags. Azure resource tags.
:type tags: dict[str, str]
:ivar etag: Azure resource etag.
:vartype etag: str
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.servicefabric.models.SystemData
:param type_version: The version of the application type as defined in the application
manifest.
:type type_version: str
:param parameters: List of application parameters with overridden values from their default
values specified in the application manifest.
:type parameters: dict[str, str]
:param upgrade_policy: Describes the policy for a monitored application upgrade.
:type upgrade_policy: ~azure.mgmt.servicefabric.models.ApplicationUpgradePolicy
:param minimum_nodes: The minimum number of nodes where Service Fabric will reserve capacity
for this application. Note that this does not mean that the services of this application will
be placed on all of those nodes. If this property is set to zero, no capacity will be reserved.
The value of this property cannot be more than the value of the MaximumNodes property.
:type minimum_nodes: long
:param maximum_nodes: The maximum number of nodes where Service Fabric will reserve capacity
for this application. Note that this does not mean that the services of this application will
be placed on all of those nodes. By default, the value of this property is zero and it means
that the services can be placed on any node.
:type maximum_nodes: long
:param remove_application_capacity: Remove the current application capacity settings.
:type remove_application_capacity: bool
:param metrics: List of application capacity metric description.
:type metrics: list[~azure.mgmt.servicefabric.models.ApplicationMetricDescription]
:param managed_identities: List of user assigned identities for the application, each mapped to
a friendly name.
:type managed_identities:
list[~azure.mgmt.servicefabric.models.ApplicationUserAssignedIdentity]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'system_data': {'readonly': True},
'minimum_nodes': {'minimum': 0},
'maximum_nodes': {'minimum': 0},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'type_version': {'key': 'properties.typeVersion', 'type': 'str'},
'parameters': {'key': 'properties.parameters', 'type': '{str}'},
'upgrade_policy': {'key': 'properties.upgradePolicy', 'type': 'ApplicationUpgradePolicy'},
'minimum_nodes': {'key': 'properties.minimumNodes', 'type': 'long'},
'maximum_nodes': {'key': 'properties.maximumNodes', 'type': 'long'},
'remove_application_capacity': {'key': 'properties.removeApplicationCapacity', 'type': 'bool'},
'metrics': {'key': 'properties.metrics', 'type': '[ApplicationMetricDescription]'},
'managed_identities': {'key': 'properties.managedIdentities', 'type': '[ApplicationUserAssignedIdentity]'},
}
def __init__(
self,
**kwargs
):
super(ApplicationResourceUpdate, self).__init__(**kwargs)
self.type_version = kwargs.get('type_version', None)
self.parameters = kwargs.get('parameters', None)
self.upgrade_policy = kwargs.get('upgrade_policy', None)
self.minimum_nodes = kwargs.get('minimum_nodes', None)
self.maximum_nodes = kwargs.get('maximum_nodes', 0)
self.remove_application_capacity = kwargs.get('remove_application_capacity', None)
self.metrics = kwargs.get('metrics', None)
self.managed_identities = kwargs.get('managed_identities', None)
class ApplicationTypeResource(ProxyResource):
"""The application type name resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Azure resource identifier.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:param location: It will be deprecated in New API, resource location depends on the parent
resource.
:type location: str
:param tags: A set of tags. Azure resource tags.
:type tags: dict[str, str]
:ivar etag: Azure resource etag.
:vartype etag: str
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.servicefabric.models.SystemData
:ivar provisioning_state: The current deployment or provisioning state, which only appears in
the response.
:vartype provisioning_state: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationTypeResource, self).__init__(**kwargs)
self.provisioning_state = None
class ApplicationTypeResourceList(msrest.serialization.Model):
"""The list of application type names.
Variables are only populated by the server, and will be ignored when sending a request.
:param value:
:type value: list[~azure.mgmt.servicefabric.models.ApplicationTypeResource]
:ivar next_link: URL to get the next set of application type list results if there are any.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ApplicationTypeResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationTypeResourceList, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class ApplicationTypeVersionResource(ProxyResource):
"""An application type version resource for the specified application type name resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Azure resource identifier.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:param location: It will be deprecated in New API, resource location depends on the parent
resource.
:type location: str
:param tags: A set of tags. Azure resource tags.
:type tags: dict[str, str]
:ivar etag: Azure resource etag.
:vartype etag: str
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.servicefabric.models.SystemData
:ivar provisioning_state: The current deployment or provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:param app_package_url: The URL to the application package.
:type app_package_url: str
:ivar default_parameter_list: List of application type parameters that can be overridden when
creating or updating the application.
:vartype default_parameter_list: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
'default_parameter_list': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'app_package_url': {'key': 'properties.appPackageUrl', 'type': 'str'},
'default_parameter_list': {'key': 'properties.defaultParameterList', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(ApplicationTypeVersionResource, self).__init__(**kwargs)
self.provisioning_state = None
self.app_package_url = kwargs.get('app_package_url', None)
self.default_parameter_list = None
class ApplicationTypeVersionResourceList(msrest.serialization.Model):
"""The list of application type version resources for the specified application type name resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param value:
:type value: list[~azure.mgmt.servicefabric.models.ApplicationTypeVersionResource]
:ivar next_link: URL to get the next set of application type version list results if there are
any.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ApplicationTypeVersionResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationTypeVersionResourceList, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class ApplicationTypeVersionsCleanupPolicy(msrest.serialization.Model):
"""ApplicationTypeVersionsCleanupPolicy.
All required parameters must be populated in order to send to Azure.
:param max_unused_versions_to_keep: Required. Number of unused versions per application type to
keep.
:type max_unused_versions_to_keep: long
"""
_validation = {
'max_unused_versions_to_keep': {'required': True, 'minimum': 0},
}
_attribute_map = {
'max_unused_versions_to_keep': {'key': 'maxUnusedVersionsToKeep', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(ApplicationTypeVersionsCleanupPolicy, self).__init__(**kwargs)
self.max_unused_versions_to_keep = kwargs['max_unused_versions_to_keep']
class ApplicationUpgradePolicy(msrest.serialization.Model):
"""Describes the policy for a monitored application upgrade.
:param upgrade_replica_set_check_timeout: The maximum amount of time to block processing of an
upgrade domain and prevent loss of availability when there are unexpected issues. When this
timeout expires, processing of the upgrade domain will proceed regardless of availability loss
issues. The timeout is reset at the start of each upgrade domain. Valid values are between 0
and 42949672925 inclusive. (unsigned 32-bit integer).
:type upgrade_replica_set_check_timeout: str
:param force_restart: If true, then processes are forcefully restarted during upgrade even when
the code version has not changed (the upgrade only changes configuration or data).
:type force_restart: bool
:param rolling_upgrade_monitoring_policy: The policy used for monitoring the application
upgrade.
:type rolling_upgrade_monitoring_policy:
~azure.mgmt.servicefabric.models.ArmRollingUpgradeMonitoringPolicy
:param application_health_policy: Defines a health policy used to evaluate the health of an
application or one of its children entities.
:type application_health_policy: ~azure.mgmt.servicefabric.models.ArmApplicationHealthPolicy
:param upgrade_mode: The mode used to monitor health during a rolling upgrade. The values are
UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: "Invalid",
"UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: "Monitored".
:type upgrade_mode: str or ~azure.mgmt.servicefabric.models.RollingUpgradeMode
:param recreate_application: Determines whether the application should be recreated on update.
If value=true, the rest of the upgrade policy parameters are not allowed and it will result in
availability loss.
:type recreate_application: bool
"""
_attribute_map = {
'upgrade_replica_set_check_timeout': {'key': 'upgradeReplicaSetCheckTimeout', 'type': 'str'},
'force_restart': {'key': 'forceRestart', 'type': 'bool'},
'rolling_upgrade_monitoring_policy': {'key': 'rollingUpgradeMonitoringPolicy', 'type': 'ArmRollingUpgradeMonitoringPolicy'},
'application_health_policy': {'key': 'applicationHealthPolicy', 'type': 'ArmApplicationHealthPolicy'},
'upgrade_mode': {'key': 'upgradeMode', 'type': 'str'},
'recreate_application': {'key': 'recreateApplication', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(ApplicationUpgradePolicy, self).__init__(**kwargs)
self.upgrade_replica_set_check_timeout = kwargs.get('upgrade_replica_set_check_timeout', None)
self.force_restart = kwargs.get('force_restart', False)
self.rolling_upgrade_monitoring_policy = kwargs.get('rolling_upgrade_monitoring_policy', None)
self.application_health_policy = kwargs.get('application_health_policy', None)
self.upgrade_mode = kwargs.get('upgrade_mode', "Monitored")
self.recreate_application = kwargs.get('recreate_application', None)
class ApplicationUserAssignedIdentity(msrest.serialization.Model):
"""ApplicationUserAssignedIdentity.
All required parameters must be populated in order to send to Azure.
:param name: Required. The friendly name of user assigned identity.
:type name: str
:param principal_id: Required. The principal id of user assigned identity.
:type principal_id: str
"""
_validation = {
'name': {'required': True},
'principal_id': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationUserAssignedIdentity, self).__init__(**kwargs)
self.name = kwargs['name']
self.principal_id = kwargs['principal_id']
class ArmApplicationHealthPolicy(msrest.serialization.Model):
"""Defines a health policy used to evaluate the health of an application or one of its children entities.
:param consider_warning_as_error: Indicates whether warnings are treated with the same severity
as errors.
:type consider_warning_as_error: bool
:param max_percent_unhealthy_deployed_applications: The maximum allowed percentage of unhealthy
deployed applications. Allowed values are Byte values from zero to 100.
The percentage represents the maximum tolerated percentage of deployed applications that can
be unhealthy before the application is considered in error.
This is calculated by dividing the number of unhealthy deployed applications over the number
of nodes where the application is currently deployed on in the cluster.
The computation rounds up to tolerate one failure on small numbers of nodes. Default
percentage is zero.
:type max_percent_unhealthy_deployed_applications: int
:param default_service_type_health_policy: The health policy used by default to evaluate the
health of a service type.
:type default_service_type_health_policy:
~azure.mgmt.servicefabric.models.ArmServiceTypeHealthPolicy
:param service_type_health_policy_map: The map with service type health policy per service type
name. The map is empty by default.
:type service_type_health_policy_map: dict[str,
~azure.mgmt.servicefabric.models.ArmServiceTypeHealthPolicy]
"""
_attribute_map = {
'consider_warning_as_error': {'key': 'considerWarningAsError', 'type': 'bool'},
'max_percent_unhealthy_deployed_applications': {'key': 'maxPercentUnhealthyDeployedApplications', 'type': 'int'},
'default_service_type_health_policy': {'key': 'defaultServiceTypeHealthPolicy', 'type': 'ArmServiceTypeHealthPolicy'},
'service_type_health_policy_map': {'key': 'serviceTypeHealthPolicyMap', 'type': '{ArmServiceTypeHealthPolicy}'},
}
def __init__(
self,
**kwargs
):
super(ArmApplicationHealthPolicy, self).__init__(**kwargs)
self.consider_warning_as_error = kwargs.get('consider_warning_as_error', False)
self.max_percent_unhealthy_deployed_applications = kwargs.get('max_percent_unhealthy_deployed_applications', 0)
self.default_service_type_health_policy = kwargs.get('default_service_type_health_policy', None)
self.service_type_health_policy_map = kwargs.get('service_type_health_policy_map', None)
class ArmRollingUpgradeMonitoringPolicy(msrest.serialization.Model):
"""The policy used for monitoring the application upgrade.
:param failure_action: The activation Mode of the service package. Possible values include:
"Rollback", "Manual".
:type failure_action: str or ~azure.mgmt.servicefabric.models.ArmUpgradeFailureAction
:param health_check_wait_duration: The amount of time to wait after completing an upgrade
domain before applying health policies. It is first interpreted as a string representing an ISO
8601 duration. If that fails, then it is interpreted as a number representing the total number
of milliseconds.
:type health_check_wait_duration: str
:param health_check_stable_duration: The amount of time that the application or cluster must
remain healthy before the upgrade proceeds to the next upgrade domain. It is first interpreted
as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a
number representing the total number of milliseconds.
:type health_check_stable_duration: str
:param health_check_retry_timeout: The amount of time to retry health evaluation when the
application or cluster is unhealthy before FailureAction is executed. It is first interpreted
as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a
number representing the total number of milliseconds.
:type health_check_retry_timeout: str
:param upgrade_timeout: The amount of time the overall upgrade has to complete before
FailureAction is executed. It is first interpreted as a string representing an ISO 8601
duration. If that fails, then it is interpreted as a number representing the total number of
milliseconds.
:type upgrade_timeout: str
:param upgrade_domain_timeout: The amount of time each upgrade domain has to complete before
FailureAction is executed. It is first interpreted as a string representing an ISO 8601
duration. If that fails, then it is interpreted as a number representing the total number of
milliseconds.
:type upgrade_domain_timeout: str
"""
_attribute_map = {
'failure_action': {'key': 'failureAction', 'type': 'str'},
'health_check_wait_duration': {'key': 'healthCheckWaitDuration', 'type': 'str'},
'health_check_stable_duration': {'key': 'healthCheckStableDuration', 'type': 'str'},
'health_check_retry_timeout': {'key': 'healthCheckRetryTimeout', 'type': 'str'},
'upgrade_timeout': {'key': 'upgradeTimeout', 'type': 'str'},
'upgrade_domain_timeout': {'key': 'upgradeDomainTimeout', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ArmRollingUpgradeMonitoringPolicy, self).__init__(**kwargs)
self.failure_action = kwargs.get('failure_action', None)
self.health_check_wait_duration = kwargs.get('health_check_wait_duration', "0")
self.health_check_stable_duration = kwargs.get('health_check_stable_duration', "PT0H2M0S")
self.health_check_retry_timeout = kwargs.get('health_check_retry_timeout', "PT0H10M0S")
self.upgrade_timeout = kwargs.get('upgrade_timeout', "P10675199DT02H48M05.4775807S")
self.upgrade_domain_timeout = kwargs.get('upgrade_domain_timeout', "P10675199DT02H48M05.4775807S")
class ArmServiceTypeHealthPolicy(msrest.serialization.Model):
"""Represents the health policy used to evaluate the health of services belonging to a service type.
:param max_percent_unhealthy_services: The maximum percentage of services allowed to be
unhealthy before your application is considered in error.
:type max_percent_unhealthy_services: int
:param max_percent_unhealthy_partitions_per_service: The maximum percentage of partitions per
service allowed to be unhealthy before your application is considered in error.
:type max_percent_unhealthy_partitions_per_service: int
:param max_percent_unhealthy_replicas_per_partition: The maximum percentage of replicas per
partition allowed to be unhealthy before your application is considered in error.
:type max_percent_unhealthy_replicas_per_partition: int
"""
_validation = {
'max_percent_unhealthy_services': {'maximum': 100, 'minimum': 0},
'max_percent_unhealthy_partitions_per_service': {'maximum': 100, 'minimum': 0},
'max_percent_unhealthy_replicas_per_partition': {'maximum': 100, 'minimum': 0},
}
_attribute_map = {
'max_percent_unhealthy_services': {'key': 'maxPercentUnhealthyServices', 'type': 'int'},
'max_percent_unhealthy_partitions_per_service': {'key': 'maxPercentUnhealthyPartitionsPerService', 'type': 'int'},
'max_percent_unhealthy_replicas_per_partition': {'key': 'maxPercentUnhealthyReplicasPerPartition', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ArmServiceTypeHealthPolicy, self).__init__(**kwargs)
self.max_percent_unhealthy_services = kwargs.get('max_percent_unhealthy_services', 0)
self.max_percent_unhealthy_partitions_per_service = kwargs.get('max_percent_unhealthy_partitions_per_service', 0)
self.max_percent_unhealthy_replicas_per_partition = kwargs.get('max_percent_unhealthy_replicas_per_partition', 0)
class AvailableOperationDisplay(msrest.serialization.Model):
"""Operation supported by the Service Fabric resource provider.
:param provider: The name of the provider.
:type provider: str
:param resource: The resource on which the operation is performed.
:type resource: str
:param operation: The operation that can be performed.
:type operation: str
:param description: Operation description.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AvailableOperationDisplay, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.resource = kwargs.get('resource', None)
self.operation = kwargs.get('operation', None)
self.description = kwargs.get('description', None)
class AzureActiveDirectory(msrest.serialization.Model):
"""The settings to enable AAD authentication on the cluster.
:param tenant_id: Azure active directory tenant id.
:type tenant_id: str
:param cluster_application: Azure active directory cluster application id.
:type cluster_application: str
:param client_application: Azure active directory client application id.
:type client_application: str
"""
_attribute_map = {
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'cluster_application': {'key': 'clusterApplication', 'type': 'str'},
'client_application': {'key': 'clientApplication', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureActiveDirectory, self).__init__(**kwargs)
self.tenant_id = kwargs.get('tenant_id', None)
self.cluster_application = kwargs.get('cluster_application', None)
self.client_application = kwargs.get('client_application', None)
class CertificateDescription(msrest.serialization.Model):
"""Describes the certificate details.
All required parameters must be populated in order to send to Azure.
:param thumbprint: Required. Thumbprint of the primary certificate.
:type thumbprint: str
:param thumbprint_secondary: Thumbprint of the secondary certificate.
:type thumbprint_secondary: str
:param x509_store_name: The local certificate store location. Possible values include:
"AddressBook", "AuthRoot", "CertificateAuthority", "Disallowed", "My", "Root", "TrustedPeople",
"TrustedPublisher".
:type x509_store_name: str or ~azure.mgmt.servicefabric.models.StoreName
"""
_validation = {
'thumbprint': {'required': True},
}
_attribute_map = {
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'thumbprint_secondary': {'key': 'thumbprintSecondary', 'type': 'str'},
'x509_store_name': {'key': 'x509StoreName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CertificateDescription, self).__init__(**kwargs)
self.thumbprint = kwargs['thumbprint']
self.thumbprint_secondary = kwargs.get('thumbprint_secondary', None)
self.x509_store_name = kwargs.get('x509_store_name', None)
class ClientCertificateCommonName(msrest.serialization.Model):
"""Describes the client certificate details using common name.
All required parameters must be populated in order to send to Azure.
:param is_admin: Required. Indicates if the client certificate has admin access to the cluster.
Non admin clients can perform only read only operations on the cluster.
:type is_admin: bool
:param certificate_common_name: Required. The common name of the client certificate.
:type certificate_common_name: str
:param certificate_issuer_thumbprint: Required. The issuer thumbprint of the client
certificate.
:type certificate_issuer_thumbprint: str
"""
_validation = {
'is_admin': {'required': True},
'certificate_common_name': {'required': True},
'certificate_issuer_thumbprint': {'required': True},
}
_attribute_map = {
'is_admin': {'key': 'isAdmin', 'type': 'bool'},
'certificate_common_name': {'key': 'certificateCommonName', 'type': 'str'},
'certificate_issuer_thumbprint': {'key': 'certificateIssuerThumbprint', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ClientCertificateCommonName, self).__init__(**kwargs)
self.is_admin = kwargs['is_admin']
self.certificate_common_name = kwargs['certificate_common_name']
self.certificate_issuer_thumbprint = kwargs['certificate_issuer_thumbprint']
class ClientCertificateThumbprint(msrest.serialization.Model):
"""Describes the client certificate details using thumbprint.
All required parameters must be populated in order to send to Azure.
:param is_admin: Required. Indicates if the client certificate has admin access to the cluster.
Non admin clients can perform only read only operations on the cluster.
:type is_admin: bool
:param certificate_thumbprint: Required. The thumbprint of the client certificate.
:type certificate_thumbprint: str
"""
_validation = {
'is_admin': {'required': True},
'certificate_thumbprint': {'required': True},
}
_attribute_map = {
'is_admin': {'key': 'isAdmin', 'type': 'bool'},
'certificate_thumbprint': {'key': 'certificateThumbprint', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ClientCertificateThumbprint, self).__init__(**kwargs)
self.is_admin = kwargs['is_admin']
self.certificate_thumbprint = kwargs['certificate_thumbprint']
class Resource(msrest.serialization.Model):
"""The resource model definition.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Azure resource identifier.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:param location: Required. Azure resource location.
:type location: str
:param tags: A set of tags. Azure resource tags.
:type tags: dict[str, str]
:ivar etag: Azure resource etag.
:vartype etag: str
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.servicefabric.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'etag': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = kwargs['location']
self.tags = kwargs.get('tags', None)
self.etag = None
self.system_data = None
class Cluster(Resource):
"""The cluster resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Azure resource identifier.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:param location: Required. Azure resource location.
:type location: str
:param tags: A set of tags. Azure resource tags.
:type tags: dict[str, str]
:ivar etag: Azure resource etag.
:vartype etag: str
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.servicefabric.models.SystemData
:param add_on_features: The list of add-on features to enable in the cluster.
:type add_on_features: list[str or ~azure.mgmt.servicefabric.models.AddOnFeatures]
:ivar available_cluster_versions: The Service Fabric runtime versions available for this
cluster.
:vartype available_cluster_versions:
list[~azure.mgmt.servicefabric.models.ClusterVersionDetails]
:param azure_active_directory: The AAD authentication settings of the cluster.
:type azure_active_directory: ~azure.mgmt.servicefabric.models.AzureActiveDirectory
:param certificate: The certificate to use for securing the cluster. The certificate provided
will be used for node to node security within the cluster, SSL certificate for cluster
management endpoint and default admin client.
:type certificate: ~azure.mgmt.servicefabric.models.CertificateDescription
:param certificate_common_names: Describes a list of server certificates referenced by common
name that are used to secure the cluster.
:type certificate_common_names: ~azure.mgmt.servicefabric.models.ServerCertificateCommonNames
:param client_certificate_common_names: The list of client certificates referenced by common
name that are allowed to manage the cluster.
:type client_certificate_common_names:
list[~azure.mgmt.servicefabric.models.ClientCertificateCommonName]
:param client_certificate_thumbprints: The list of client certificates referenced by thumbprint
that are allowed to manage the cluster.
:type client_certificate_thumbprints:
list[~azure.mgmt.servicefabric.models.ClientCertificateThumbprint]
:param cluster_code_version: The Service Fabric runtime version of the cluster. This property
can only by set the user when **upgradeMode** is set to 'Manual'. To get list of available
Service Fabric versions for new clusters use `ClusterVersion API <./ClusterVersion.md>`_. To
get the list of available version for existing clusters use **availableClusterVersions**.
:type cluster_code_version: str
:ivar cluster_endpoint: The Azure Resource Provider endpoint. A system service in the cluster
connects to this endpoint.
:vartype cluster_endpoint: str
:ivar cluster_id: A service generated unique identifier for the cluster resource.
:vartype cluster_id: str
:ivar cluster_state: The current state of the cluster.
* WaitingForNodes - Indicates that the cluster resource is created and the resource provider
is waiting for Service Fabric VM extension to boot up and report to it.
* Deploying - Indicates that the Service Fabric runtime is being installed on the VMs. Cluster
resource will be in this state until the cluster boots up and system services are up.
* BaselineUpgrade - Indicates that the cluster is upgrading to establishes the cluster
version. This upgrade is automatically initiated when the cluster boots up for the first time.
* UpdatingUserConfiguration - Indicates that the cluster is being upgraded with the user
provided configuration.
* UpdatingUserCertificate - Indicates that the cluster is being upgraded with the user
provided certificate.
* UpdatingInfrastructure - Indicates that the cluster is being upgraded with the latest
Service Fabric runtime version. This happens only when the **upgradeMode** is set to
'Automatic'.
* EnforcingClusterVersion - Indicates that cluster is on a different version than expected and
the cluster is being upgraded to the expected version.
* UpgradeServiceUnreachable - Indicates that the system service in the cluster is no longer
polling the Resource Provider. Clusters in this state cannot be managed by the Resource
Provider.
* AutoScale - Indicates that the ReliabilityLevel of the cluster is being adjusted.
* Ready - Indicates that the cluster is in a stable state. Possible values include:
"WaitingForNodes", "Deploying", "BaselineUpgrade", "UpdatingUserConfiguration",
"UpdatingUserCertificate", "UpdatingInfrastructure", "EnforcingClusterVersion",
"UpgradeServiceUnreachable", "AutoScale", "Ready".
:vartype cluster_state: str or ~azure.mgmt.servicefabric.models.ClusterState
:param diagnostics_storage_account_config: The storage account information for storing Service
Fabric diagnostic logs.
:type diagnostics_storage_account_config:
~azure.mgmt.servicefabric.models.DiagnosticsStorageAccountConfig
:param event_store_service_enabled: Indicates if the event store service is enabled.
:type event_store_service_enabled: bool
:param fabric_settings: The list of custom fabric settings to configure the cluster.
:type fabric_settings: list[~azure.mgmt.servicefabric.models.SettingsSectionDescription]
:param management_endpoint: The http management endpoint of the cluster.
:type management_endpoint: str
:param node_types: The list of node types in the cluster.
:type node_types: list[~azure.mgmt.servicefabric.models.NodeTypeDescription]
:ivar provisioning_state: The provisioning state of the cluster resource. Possible values
include: "Updating", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or ~azure.mgmt.servicefabric.models.ProvisioningState
:param reliability_level: The reliability level sets the replica set size of system services.
Learn about `ReliabilityLevel
<https://docs.microsoft.com/azure/service-fabric/service-fabric-cluster-capacity>`_.
* None - Run the System services with a target replica set count of 1. This should only be
used for test clusters.
* Bronze - Run the System services with a target replica set count of 3. This should only be
used for test clusters.
* Silver - Run the System services with a target replica set count of 5.
* Gold - Run the System services with a target replica set count of 7.
* Platinum - Run the System services with a target replica set count of 9. Possible values
include: "None", "Bronze", "Silver", "Gold", "Platinum".
:type reliability_level: str or ~azure.mgmt.servicefabric.models.ReliabilityLevel
:param reverse_proxy_certificate: The server certificate used by reverse proxy.
:type reverse_proxy_certificate: ~azure.mgmt.servicefabric.models.CertificateDescription
:param reverse_proxy_certificate_common_names: Describes a list of server certificates
referenced by common name that are used to secure the cluster.
:type reverse_proxy_certificate_common_names:
~azure.mgmt.servicefabric.models.ServerCertificateCommonNames
:param upgrade_description: The policy to use when upgrading the cluster.
:type upgrade_description: ~azure.mgmt.servicefabric.models.ClusterUpgradePolicy
:param upgrade_mode: The upgrade mode of the cluster when new Service Fabric runtime version is
available. Possible values include: "Automatic", "Manual". Default value: "Automatic".
:type upgrade_mode: str or ~azure.mgmt.servicefabric.models.UpgradeMode
:param application_type_versions_cleanup_policy: The policy used to clean up unused versions.
:type application_type_versions_cleanup_policy:
~azure.mgmt.servicefabric.models.ApplicationTypeVersionsCleanupPolicy
:param vm_image: The VM image VMSS has been configured with. Generic names such as Windows or
Linux can be used.
:type vm_image: str
:param sf_zonal_upgrade_mode: This property controls the logical grouping of VMs in upgrade
domains (UDs). This property can't be modified if a node type with multiple Availability Zones
is already present in the cluster. Possible values include: "Parallel", "Hierarchical".
:type sf_zonal_upgrade_mode: str or ~azure.mgmt.servicefabric.models.SfZonalUpgradeMode
:param vmss_zonal_upgrade_mode: This property defines the upgrade mode for the virtual machine
scale set, it is mandatory if a node type with multiple Availability Zones is added. Possible
values include: "Parallel", "Hierarchical".
:type vmss_zonal_upgrade_mode: str or ~azure.mgmt.servicefabric.models.VmssZonalUpgradeMode
:param infrastructure_service_manager: Indicates if infrastructure service manager is enabled.
:type infrastructure_service_manager: bool
:param upgrade_wave: Indicates when new cluster runtime version upgrades will be applied after
they are released. By default is Wave0. Only applies when **upgradeMode** is set to
'Automatic'. Possible values include: "Wave0", "Wave1", "Wave2".
:type upgrade_wave: str or ~azure.mgmt.servicefabric.models.ClusterUpgradeCadence
:param upgrade_pause_start_timestamp_utc: Indicates the start date and time to pause automatic
runtime version upgrades on the cluster for an specific period of time on the cluster (UTC).
:type upgrade_pause_start_timestamp_utc: ~datetime.datetime
:param upgrade_pause_end_timestamp_utc: Indicates the end date and time to pause automatic
runtime version upgrades on the cluster for an specific period of time on the cluster (UTC).
:type upgrade_pause_end_timestamp_utc: ~datetime.datetime
:param wave_upgrade_paused: Boolean to pause automatic runtime version upgrades to the cluster.
:type wave_upgrade_paused: bool
:param notifications: Indicates a list of notification channels for cluster events.
:type notifications: list[~azure.mgmt.servicefabric.models.Notification]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'etag': {'readonly': True},
'system_data': {'readonly': True},
'available_cluster_versions': {'readonly': True},
'cluster_endpoint': {'readonly': True},
'cluster_id': {'readonly': True},
'cluster_state': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'add_on_features': {'key': 'properties.addOnFeatures', 'type': '[str]'},
'available_cluster_versions': {'key': 'properties.availableClusterVersions', 'type': '[ClusterVersionDetails]'},
'azure_active_directory': {'key': 'properties.azureActiveDirectory', 'type': 'AzureActiveDirectory'},
'certificate': {'key': 'properties.certificate', 'type': 'CertificateDescription'},
'certificate_common_names': {'key': 'properties.certificateCommonNames', 'type': 'ServerCertificateCommonNames'},
'client_certificate_common_names': {'key': 'properties.clientCertificateCommonNames', 'type': '[ClientCertificateCommonName]'},
'client_certificate_thumbprints': {'key': 'properties.clientCertificateThumbprints', 'type': '[ClientCertificateThumbprint]'},
'cluster_code_version': {'key': 'properties.clusterCodeVersion', 'type': 'str'},
'cluster_endpoint': {'key': 'properties.clusterEndpoint', 'type': 'str'},
'cluster_id': {'key': 'properties.clusterId', 'type': 'str'},
'cluster_state': {'key': 'properties.clusterState', 'type': 'str'},
'diagnostics_storage_account_config': {'key': 'properties.diagnosticsStorageAccountConfig', 'type': 'DiagnosticsStorageAccountConfig'},
'event_store_service_enabled': {'key': 'properties.eventStoreServiceEnabled', 'type': 'bool'},
'fabric_settings': {'key': 'properties.fabricSettings', 'type': '[SettingsSectionDescription]'},
'management_endpoint': {'key': 'properties.managementEndpoint', 'type': 'str'},
'node_types': {'key': 'properties.nodeTypes', 'type': '[NodeTypeDescription]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'reliability_level': {'key': 'properties.reliabilityLevel', 'type': 'str'},
'reverse_proxy_certificate': {'key': 'properties.reverseProxyCertificate', 'type': 'CertificateDescription'},
'reverse_proxy_certificate_common_names': {'key': 'properties.reverseProxyCertificateCommonNames', 'type': 'ServerCertificateCommonNames'},
'upgrade_description': {'key': 'properties.upgradeDescription', 'type': 'ClusterUpgradePolicy'},
'upgrade_mode': {'key': 'properties.upgradeMode', 'type': 'str'},
'application_type_versions_cleanup_policy': {'key': 'properties.applicationTypeVersionsCleanupPolicy', 'type': 'ApplicationTypeVersionsCleanupPolicy'},
'vm_image': {'key': 'properties.vmImage', 'type': 'str'},
'sf_zonal_upgrade_mode': {'key': 'properties.sfZonalUpgradeMode', 'type': 'str'},
'vmss_zonal_upgrade_mode': {'key': 'properties.vmssZonalUpgradeMode', 'type': 'str'},
'infrastructure_service_manager': {'key': 'properties.infrastructureServiceManager', 'type': 'bool'},
'upgrade_wave': {'key': 'properties.upgradeWave', 'type': 'str'},
'upgrade_pause_start_timestamp_utc': {'key': 'properties.upgradePauseStartTimestampUtc', 'type': 'iso-8601'},
'upgrade_pause_end_timestamp_utc': {'key': 'properties.upgradePauseEndTimestampUtc', 'type': 'iso-8601'},
'wave_upgrade_paused': {'key': 'properties.waveUpgradePaused', 'type': 'bool'},
'notifications': {'key': 'properties.notifications', 'type': '[Notification]'},
}
def __init__(
self,
**kwargs
):
super(Cluster, self).__init__(**kwargs)
self.add_on_features = kwargs.get('add_on_features', None)
self.available_cluster_versions = None
self.azure_active_directory = kwargs.get('azure_active_directory', None)
self.certificate = kwargs.get('certificate', None)
self.certificate_common_names = kwargs.get('certificate_common_names', None)
self.client_certificate_common_names = kwargs.get('client_certificate_common_names', None)
self.client_certificate_thumbprints = kwargs.get('client_certificate_thumbprints', None)
self.cluster_code_version = kwargs.get('cluster_code_version', None)
self.cluster_endpoint = None
self.cluster_id = None
self.cluster_state = None
self.diagnostics_storage_account_config = kwargs.get('diagnostics_storage_account_config', None)
self.event_store_service_enabled = kwargs.get('event_store_service_enabled', None)
self.fabric_settings = kwargs.get('fabric_settings', None)
self.management_endpoint = kwargs.get('management_endpoint', None)
self.node_types = kwargs.get('node_types', None)
self.provisioning_state = None
self.reliability_level = kwargs.get('reliability_level', None)
self.reverse_proxy_certificate = kwargs.get('reverse_proxy_certificate', None)
self.reverse_proxy_certificate_common_names = kwargs.get('reverse_proxy_certificate_common_names', None)
self.upgrade_description = kwargs.get('upgrade_description', None)
self.upgrade_mode = kwargs.get('upgrade_mode', "Automatic")
self.application_type_versions_cleanup_policy = kwargs.get('application_type_versions_cleanup_policy', None)
self.vm_image = kwargs.get('vm_image', None)
self.sf_zonal_upgrade_mode = kwargs.get('sf_zonal_upgrade_mode', None)
self.vmss_zonal_upgrade_mode = kwargs.get('vmss_zonal_upgrade_mode', None)
self.infrastructure_service_manager = kwargs.get('infrastructure_service_manager', None)
self.upgrade_wave = kwargs.get('upgrade_wave', None)
self.upgrade_pause_start_timestamp_utc = kwargs.get('upgrade_pause_start_timestamp_utc', None)
self.upgrade_pause_end_timestamp_utc = kwargs.get('upgrade_pause_end_timestamp_utc', None)
self.wave_upgrade_paused = kwargs.get('wave_upgrade_paused', None)
self.notifications = kwargs.get('notifications', None)
class ClusterCodeVersionsListResult(msrest.serialization.Model):
"""The list results of the Service Fabric runtime versions.
:param value:
:type value: list[~azure.mgmt.servicefabric.models.ClusterCodeVersionsResult]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ClusterCodeVersionsResult]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ClusterCodeVersionsListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ClusterCodeVersionsResult(msrest.serialization.Model):
"""The result of the Service Fabric runtime versions.
:param id: The identification of the result.
:type id: str
:param name: The name of the result.
:type name: str
:param type: The result resource type.
:type type: str
:param code_version: The Service Fabric runtime version of the cluster.
:type code_version: str
:param support_expiry_utc: The date of expiry of support of the version.
:type support_expiry_utc: str
:param environment: Indicates if this version is for Windows or Linux operating system.
Possible values include: "Windows", "Linux".
:type environment: str or ~azure.mgmt.servicefabric.models.ClusterEnvironment
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'code_version': {'key': 'properties.codeVersion', 'type': 'str'},
'support_expiry_utc': {'key': 'properties.supportExpiryUtc', 'type': 'str'},
'environment': {'key': 'properties.environment', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ClusterCodeVersionsResult, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.name = kwargs.get('name', None)
self.type = kwargs.get('type', None)
self.code_version = kwargs.get('code_version', None)
self.support_expiry_utc = kwargs.get('support_expiry_utc', None)
self.environment = kwargs.get('environment', None)
class ClusterHealthPolicy(msrest.serialization.Model):
"""Defines a health policy used to evaluate the health of the cluster or of a cluster node.
:param max_percent_unhealthy_nodes: The maximum allowed percentage of unhealthy nodes before
reporting an error. For example, to allow 10% of nodes to be unhealthy, this value would be 10.
The percentage represents the maximum tolerated percentage of nodes that can be unhealthy
before the cluster is considered in error.
If the percentage is respected but there is at least one unhealthy node, the health is
evaluated as Warning.
The percentage is calculated by dividing the number of unhealthy nodes over the total number
of nodes in the cluster.
The computation rounds up to tolerate one failure on small numbers of nodes. Default
percentage is zero.
In large clusters, some nodes will always be down or out for repairs, so this percentage
should be configured to tolerate that.
:type max_percent_unhealthy_nodes: int
:param max_percent_unhealthy_applications: The maximum allowed percentage of unhealthy
applications before reporting an error. For example, to allow 10% of applications to be
unhealthy, this value would be 10.
The percentage represents the maximum tolerated percentage of applications that can be
unhealthy before the cluster is considered in error.
If the percentage is respected but there is at least one unhealthy application, the health is
evaluated as Warning.
This is calculated by dividing the number of unhealthy applications over the total number of
application instances in the cluster, excluding applications of application types that are
included in the ApplicationTypeHealthPolicyMap.
The computation rounds up to tolerate one failure on small numbers of applications. Default
percentage is zero.
:type max_percent_unhealthy_applications: int
:param application_health_policies: Defines the application health policy map used to evaluate
the health of an application or one of its children entities.
:type application_health_policies: dict[str,
~azure.mgmt.servicefabric.models.ApplicationHealthPolicy]
"""
_validation = {
'max_percent_unhealthy_nodes': {'maximum': 100, 'minimum': 0},
'max_percent_unhealthy_applications': {'maximum': 100, 'minimum': 0},
}
_attribute_map = {
'max_percent_unhealthy_nodes': {'key': 'maxPercentUnhealthyNodes', 'type': 'int'},
'max_percent_unhealthy_applications': {'key': 'maxPercentUnhealthyApplications', 'type': 'int'},
'application_health_policies': {'key': 'applicationHealthPolicies', 'type': '{ApplicationHealthPolicy}'},
}
def __init__(
self,
**kwargs
):
super(ClusterHealthPolicy, self).__init__(**kwargs)
self.max_percent_unhealthy_nodes = kwargs.get('max_percent_unhealthy_nodes', 0)
self.max_percent_unhealthy_applications = kwargs.get('max_percent_unhealthy_applications', 0)
self.application_health_policies = kwargs.get('application_health_policies', None)
class ClusterListResult(msrest.serialization.Model):
"""Cluster list results.
:param value:
:type value: list[~azure.mgmt.servicefabric.models.Cluster]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Cluster]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ClusterListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ClusterUpdateParameters(msrest.serialization.Model):
"""Cluster update request.
:param tags: A set of tags. Cluster update parameters.
:type tags: dict[str, str]
:param add_on_features: The list of add-on features to enable in the cluster.
:type add_on_features: list[str or ~azure.mgmt.servicefabric.models.AddOnFeatures]
:param certificate: The certificate to use for securing the cluster. The certificate provided
will be used for node to node security within the cluster, SSL certificate for cluster
management endpoint and default admin client.
:type certificate: ~azure.mgmt.servicefabric.models.CertificateDescription
:param certificate_common_names: Describes a list of server certificates referenced by common
name that are used to secure the cluster.
:type certificate_common_names: ~azure.mgmt.servicefabric.models.ServerCertificateCommonNames
:param client_certificate_common_names: The list of client certificates referenced by common
name that are allowed to manage the cluster. This will overwrite the existing list.
:type client_certificate_common_names:
list[~azure.mgmt.servicefabric.models.ClientCertificateCommonName]
:param client_certificate_thumbprints: The list of client certificates referenced by thumbprint
that are allowed to manage the cluster. This will overwrite the existing list.
:type client_certificate_thumbprints:
list[~azure.mgmt.servicefabric.models.ClientCertificateThumbprint]
:param cluster_code_version: The Service Fabric runtime version of the cluster. This property
can only by set the user when **upgradeMode** is set to 'Manual'. To get list of available
Service Fabric versions for new clusters use `ClusterVersion API <./ClusterVersion.md>`_. To
get the list of available version for existing clusters use **availableClusterVersions**.
:type cluster_code_version: str
:param event_store_service_enabled: Indicates if the event store service is enabled.
:type event_store_service_enabled: bool
:param fabric_settings: The list of custom fabric settings to configure the cluster. This will
overwrite the existing list.
:type fabric_settings: list[~azure.mgmt.servicefabric.models.SettingsSectionDescription]
:param node_types: The list of node types in the cluster. This will overwrite the existing
list.
:type node_types: list[~azure.mgmt.servicefabric.models.NodeTypeDescription]
:param reliability_level: The reliability level sets the replica set size of system services.
Learn about `ReliabilityLevel
<https://docs.microsoft.com/azure/service-fabric/service-fabric-cluster-capacity>`_.
* None - Run the System services with a target replica set count of 1. This should only be
used for test clusters.
* Bronze - Run the System services with a target replica set count of 3. This should only be
used for test clusters.
* Silver - Run the System services with a target replica set count of 5.
* Gold - Run the System services with a target replica set count of 7.
* Platinum - Run the System services with a target replica set count of 9. Possible values
include: "None", "Bronze", "Silver", "Gold", "Platinum".
:type reliability_level: str or ~azure.mgmt.servicefabric.models.ReliabilityLevel
:param reverse_proxy_certificate: The server certificate used by reverse proxy.
:type reverse_proxy_certificate: ~azure.mgmt.servicefabric.models.CertificateDescription
:param upgrade_description: The policy to use when upgrading the cluster.
:type upgrade_description: ~azure.mgmt.servicefabric.models.ClusterUpgradePolicy
:param application_type_versions_cleanup_policy: The policy used to clean up unused versions.
:type application_type_versions_cleanup_policy:
~azure.mgmt.servicefabric.models.ApplicationTypeVersionsCleanupPolicy
:param upgrade_mode: The upgrade mode of the cluster when new Service Fabric runtime version is
available. Possible values include: "Automatic", "Manual". Default value: "Automatic".
:type upgrade_mode: str or ~azure.mgmt.servicefabric.models.UpgradeMode
:param sf_zonal_upgrade_mode: This property controls the logical grouping of VMs in upgrade
domains (UDs). This property can't be modified if a node type with multiple Availability Zones
is already present in the cluster. Possible values include: "Parallel", "Hierarchical".
:type sf_zonal_upgrade_mode: str or ~azure.mgmt.servicefabric.models.SfZonalUpgradeMode
:param vmss_zonal_upgrade_mode: This property defines the upgrade mode for the virtual machine
scale set, it is mandatory if a node type with multiple Availability Zones is added. Possible
values include: "Parallel", "Hierarchical".
:type vmss_zonal_upgrade_mode: str or ~azure.mgmt.servicefabric.models.VmssZonalUpgradeMode
:param infrastructure_service_manager: Indicates if infrastructure service manager is enabled.
:type infrastructure_service_manager: bool
:param upgrade_wave: Indicates when new cluster runtime version upgrades will be applied after
they are released. By default is Wave0. Only applies when **upgradeMode** is set to
'Automatic'. Possible values include: "Wave0", "Wave1", "Wave2".
:type upgrade_wave: str or ~azure.mgmt.servicefabric.models.ClusterUpgradeCadence
:param upgrade_pause_start_timestamp_utc: The start timestamp to pause runtime version upgrades
on the cluster (UTC).
:type upgrade_pause_start_timestamp_utc: ~datetime.datetime
:param upgrade_pause_end_timestamp_utc: The end timestamp of pause runtime version upgrades on
the cluster (UTC).
:type upgrade_pause_end_timestamp_utc: ~datetime.datetime
:param wave_upgrade_paused: Boolean to pause automatic runtime version upgrades to the cluster.
:type wave_upgrade_paused: bool
:param notifications: Indicates a list of notification channels for cluster events.
:type notifications: list[~azure.mgmt.servicefabric.models.Notification]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'add_on_features': {'key': 'properties.addOnFeatures', 'type': '[str]'},
'certificate': {'key': 'properties.certificate', 'type': 'CertificateDescription'},
'certificate_common_names': {'key': 'properties.certificateCommonNames', 'type': 'ServerCertificateCommonNames'},
'client_certificate_common_names': {'key': 'properties.clientCertificateCommonNames', 'type': '[ClientCertificateCommonName]'},
'client_certificate_thumbprints': {'key': 'properties.clientCertificateThumbprints', 'type': '[ClientCertificateThumbprint]'},
'cluster_code_version': {'key': 'properties.clusterCodeVersion', 'type': 'str'},
'event_store_service_enabled': {'key': 'properties.eventStoreServiceEnabled', 'type': 'bool'},
'fabric_settings': {'key': 'properties.fabricSettings', 'type': '[SettingsSectionDescription]'},
'node_types': {'key': 'properties.nodeTypes', 'type': '[NodeTypeDescription]'},
'reliability_level': {'key': 'properties.reliabilityLevel', 'type': 'str'},
'reverse_proxy_certificate': {'key': 'properties.reverseProxyCertificate', 'type': 'CertificateDescription'},
'upgrade_description': {'key': 'properties.upgradeDescription', 'type': 'ClusterUpgradePolicy'},
'application_type_versions_cleanup_policy': {'key': 'properties.applicationTypeVersionsCleanupPolicy', 'type': 'ApplicationTypeVersionsCleanupPolicy'},
'upgrade_mode': {'key': 'properties.upgradeMode', 'type': 'str'},
'sf_zonal_upgrade_mode': {'key': 'properties.sfZonalUpgradeMode', 'type': 'str'},
'vmss_zonal_upgrade_mode': {'key': 'properties.vmssZonalUpgradeMode', 'type': 'str'},
'infrastructure_service_manager': {'key': 'properties.infrastructureServiceManager', 'type': 'bool'},
'upgrade_wave': {'key': 'properties.upgradeWave', 'type': 'str'},
'upgrade_pause_start_timestamp_utc': {'key': 'properties.upgradePauseStartTimestampUtc', 'type': 'iso-8601'},
'upgrade_pause_end_timestamp_utc': {'key': 'properties.upgradePauseEndTimestampUtc', 'type': 'iso-8601'},
'wave_upgrade_paused': {'key': 'properties.waveUpgradePaused', 'type': 'bool'},
'notifications': {'key': 'properties.notifications', 'type': '[Notification]'},
}
def __init__(
self,
**kwargs
):
super(ClusterUpdateParameters, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.add_on_features = kwargs.get('add_on_features', None)
self.certificate = kwargs.get('certificate', None)
self.certificate_common_names = kwargs.get('certificate_common_names', None)
self.client_certificate_common_names = kwargs.get('client_certificate_common_names', None)
self.client_certificate_thumbprints = kwargs.get('client_certificate_thumbprints', None)
self.cluster_code_version = kwargs.get('cluster_code_version', None)
self.event_store_service_enabled = kwargs.get('event_store_service_enabled', None)
self.fabric_settings = kwargs.get('fabric_settings', None)
self.node_types = kwargs.get('node_types', None)
self.reliability_level = kwargs.get('reliability_level', None)
self.reverse_proxy_certificate = kwargs.get('reverse_proxy_certificate', None)
self.upgrade_description = kwargs.get('upgrade_description', None)
self.application_type_versions_cleanup_policy = kwargs.get('application_type_versions_cleanup_policy', None)
self.upgrade_mode = kwargs.get('upgrade_mode', "Automatic")
self.sf_zonal_upgrade_mode = kwargs.get('sf_zonal_upgrade_mode', None)
self.vmss_zonal_upgrade_mode = kwargs.get('vmss_zonal_upgrade_mode', None)
self.infrastructure_service_manager = kwargs.get('infrastructure_service_manager', None)
self.upgrade_wave = kwargs.get('upgrade_wave', None)
self.upgrade_pause_start_timestamp_utc = kwargs.get('upgrade_pause_start_timestamp_utc', None)
self.upgrade_pause_end_timestamp_utc = kwargs.get('upgrade_pause_end_timestamp_utc', None)
self.wave_upgrade_paused = kwargs.get('wave_upgrade_paused', None)
self.notifications = kwargs.get('notifications', None)
class ClusterUpgradeDeltaHealthPolicy(msrest.serialization.Model):
"""Describes the delta health policies for the cluster upgrade.
All required parameters must be populated in order to send to Azure.
:param max_percent_delta_unhealthy_nodes: Required. The maximum allowed percentage of nodes
health degradation allowed during cluster upgrades.
The delta is measured between the state of the nodes at the beginning of upgrade and the state
of the nodes at the time of the health evaluation.
The check is performed after every upgrade domain upgrade completion to make sure the global
state of the cluster is within tolerated limits.
:type max_percent_delta_unhealthy_nodes: int
:param max_percent_upgrade_domain_delta_unhealthy_nodes: Required. The maximum allowed
percentage of upgrade domain nodes health degradation allowed during cluster upgrades.
The delta is measured between the state of the upgrade domain nodes at the beginning of
upgrade and the state of the upgrade domain nodes at the time of the health evaluation.
The check is performed after every upgrade domain upgrade completion for all completed upgrade
domains to make sure the state of the upgrade domains is within tolerated limits.
:type max_percent_upgrade_domain_delta_unhealthy_nodes: int
:param max_percent_delta_unhealthy_applications: Required. The maximum allowed percentage of
applications health degradation allowed during cluster upgrades.
The delta is measured between the state of the applications at the beginning of upgrade and
the state of the applications at the time of the health evaluation.
The check is performed after every upgrade domain upgrade completion to make sure the global
state of the cluster is within tolerated limits. System services are not included in this.
:type max_percent_delta_unhealthy_applications: int
:param application_delta_health_policies: Defines the application delta health policy map used
to evaluate the health of an application or one of its child entities when upgrading the
cluster.
:type application_delta_health_policies: dict[str,
~azure.mgmt.servicefabric.models.ApplicationDeltaHealthPolicy]
"""
_validation = {
'max_percent_delta_unhealthy_nodes': {'required': True, 'maximum': 100, 'minimum': 0},
'max_percent_upgrade_domain_delta_unhealthy_nodes': {'required': True, 'maximum': 100, 'minimum': 0},
'max_percent_delta_unhealthy_applications': {'required': True, 'maximum': 100, 'minimum': 0},
}
_attribute_map = {
'max_percent_delta_unhealthy_nodes': {'key': 'maxPercentDeltaUnhealthyNodes', 'type': 'int'},
'max_percent_upgrade_domain_delta_unhealthy_nodes': {'key': 'maxPercentUpgradeDomainDeltaUnhealthyNodes', 'type': 'int'},
'max_percent_delta_unhealthy_applications': {'key': 'maxPercentDeltaUnhealthyApplications', 'type': 'int'},
'application_delta_health_policies': {'key': 'applicationDeltaHealthPolicies', 'type': '{ApplicationDeltaHealthPolicy}'},
}
def __init__(
self,
**kwargs
):
super(ClusterUpgradeDeltaHealthPolicy, self).__init__(**kwargs)
self.max_percent_delta_unhealthy_nodes = kwargs['max_percent_delta_unhealthy_nodes']
self.max_percent_upgrade_domain_delta_unhealthy_nodes = kwargs['max_percent_upgrade_domain_delta_unhealthy_nodes']
self.max_percent_delta_unhealthy_applications = kwargs['max_percent_delta_unhealthy_applications']
self.application_delta_health_policies = kwargs.get('application_delta_health_policies', None)
class ClusterUpgradePolicy(msrest.serialization.Model):
"""Describes the policy used when upgrading the cluster.
All required parameters must be populated in order to send to Azure.
:param force_restart: If true, then processes are forcefully restarted during upgrade even when
the code version has not changed (the upgrade only changes configuration or data).
:type force_restart: bool
:param upgrade_replica_set_check_timeout: Required. The maximum amount of time to block
processing of an upgrade domain and prevent loss of availability when there are unexpected
issues. When this timeout expires, processing of the upgrade domain will proceed regardless of
availability loss issues. The timeout is reset at the start of each upgrade domain. The timeout
can be in either hh:mm:ss or in d.hh:mm:ss.ms format.
:type upgrade_replica_set_check_timeout: str
:param health_check_wait_duration: Required. The length of time to wait after completing an
upgrade domain before performing health checks. The duration can be in either hh:mm:ss or in
d.hh:mm:ss.ms format.
:type health_check_wait_duration: str
:param health_check_stable_duration: Required. The amount of time that the application or
cluster must remain healthy before the upgrade proceeds to the next upgrade domain. The
duration can be in either hh:mm:ss or in d.hh:mm:ss.ms format.
:type health_check_stable_duration: str
:param health_check_retry_timeout: Required. The amount of time to retry health evaluation when
the application or cluster is unhealthy before the upgrade rolls back. The timeout can be in
either hh:mm:ss or in d.hh:mm:ss.ms format.
:type health_check_retry_timeout: str
:param upgrade_timeout: Required. The amount of time the overall upgrade has to complete before
the upgrade rolls back. The timeout can be in either hh:mm:ss or in d.hh:mm:ss.ms format.
:type upgrade_timeout: str
:param upgrade_domain_timeout: Required. The amount of time each upgrade domain has to complete
before the upgrade rolls back. The timeout can be in either hh:mm:ss or in d.hh:mm:ss.ms
format.
:type upgrade_domain_timeout: str
:param health_policy: Required. The cluster health policy used when upgrading the cluster.
:type health_policy: ~azure.mgmt.servicefabric.models.ClusterHealthPolicy
:param delta_health_policy: The cluster delta health policy used when upgrading the cluster.
:type delta_health_policy: ~azure.mgmt.servicefabric.models.ClusterUpgradeDeltaHealthPolicy
"""
_validation = {
'upgrade_replica_set_check_timeout': {'required': True},
'health_check_wait_duration': {'required': True},
'health_check_stable_duration': {'required': True},
'health_check_retry_timeout': {'required': True},
'upgrade_timeout': {'required': True},
'upgrade_domain_timeout': {'required': True},
'health_policy': {'required': True},
}
_attribute_map = {
'force_restart': {'key': 'forceRestart', 'type': 'bool'},
'upgrade_replica_set_check_timeout': {'key': 'upgradeReplicaSetCheckTimeout', 'type': 'str'},
'health_check_wait_duration': {'key': 'healthCheckWaitDuration', 'type': 'str'},
'health_check_stable_duration': {'key': 'healthCheckStableDuration', 'type': 'str'},
'health_check_retry_timeout': {'key': 'healthCheckRetryTimeout', 'type': 'str'},
'upgrade_timeout': {'key': 'upgradeTimeout', 'type': 'str'},
'upgrade_domain_timeout': {'key': 'upgradeDomainTimeout', 'type': 'str'},
'health_policy': {'key': 'healthPolicy', 'type': 'ClusterHealthPolicy'},
'delta_health_policy': {'key': 'deltaHealthPolicy', 'type': 'ClusterUpgradeDeltaHealthPolicy'},
}
def __init__(
self,
**kwargs
):
super(ClusterUpgradePolicy, self).__init__(**kwargs)
self.force_restart = kwargs.get('force_restart', None)
self.upgrade_replica_set_check_timeout = kwargs['upgrade_replica_set_check_timeout']
self.health_check_wait_duration = kwargs['health_check_wait_duration']
self.health_check_stable_duration = kwargs['health_check_stable_duration']
self.health_check_retry_timeout = kwargs['health_check_retry_timeout']
self.upgrade_timeout = kwargs['upgrade_timeout']
self.upgrade_domain_timeout = kwargs['upgrade_domain_timeout']
self.health_policy = kwargs['health_policy']
self.delta_health_policy = kwargs.get('delta_health_policy', None)
class ClusterVersionDetails(msrest.serialization.Model):
"""The detail of the Service Fabric runtime version result.
:param code_version: The Service Fabric runtime version of the cluster.
:type code_version: str
:param support_expiry_utc: The date of expiry of support of the version.
:type support_expiry_utc: str
:param environment: Indicates if this version is for Windows or Linux operating system.
Possible values include: "Windows", "Linux".
:type environment: str or ~azure.mgmt.servicefabric.models.ClusterEnvironment
"""
_attribute_map = {
'code_version': {'key': 'codeVersion', 'type': 'str'},
'support_expiry_utc': {'key': 'supportExpiryUtc', 'type': 'str'},
'environment': {'key': 'environment', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ClusterVersionDetails, self).__init__(**kwargs)
self.code_version = kwargs.get('code_version', None)
self.support_expiry_utc = kwargs.get('support_expiry_utc', None)
self.environment = kwargs.get('environment', None)
class DiagnosticsStorageAccountConfig(msrest.serialization.Model):
"""The storage account information for storing Service Fabric diagnostic logs.
All required parameters must be populated in order to send to Azure.
:param storage_account_name: Required. The Azure storage account name.
:type storage_account_name: str
:param protected_account_key_name: Required. The protected diagnostics storage key name.
:type protected_account_key_name: str
:param protected_account_key_name2: The secondary protected diagnostics storage key name. If
one of the storage account keys is rotated the cluster will fallback to using the other.
:type protected_account_key_name2: str
:param blob_endpoint: Required. The blob endpoint of the azure storage account.
:type blob_endpoint: str
:param queue_endpoint: Required. The queue endpoint of the azure storage account.
:type queue_endpoint: str
:param table_endpoint: Required. The table endpoint of the azure storage account.
:type table_endpoint: str
"""
_validation = {
'storage_account_name': {'required': True},
'protected_account_key_name': {'required': True},
'blob_endpoint': {'required': True},
'queue_endpoint': {'required': True},
'table_endpoint': {'required': True},
}
_attribute_map = {
'storage_account_name': {'key': 'storageAccountName', 'type': 'str'},
'protected_account_key_name': {'key': 'protectedAccountKeyName', 'type': 'str'},
'protected_account_key_name2': {'key': 'protectedAccountKeyName2', 'type': 'str'},
'blob_endpoint': {'key': 'blobEndpoint', 'type': 'str'},
'queue_endpoint': {'key': 'queueEndpoint', 'type': 'str'},
'table_endpoint': {'key': 'tableEndpoint', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DiagnosticsStorageAccountConfig, self).__init__(**kwargs)
self.storage_account_name = kwargs['storage_account_name']
self.protected_account_key_name = kwargs['protected_account_key_name']
self.protected_account_key_name2 = kwargs.get('protected_account_key_name2', None)
self.blob_endpoint = kwargs['blob_endpoint']
self.queue_endpoint = kwargs['queue_endpoint']
self.table_endpoint = kwargs['table_endpoint']
class EndpointRangeDescription(msrest.serialization.Model):
"""Port range details.
All required parameters must be populated in order to send to Azure.
:param start_port: Required. Starting port of a range of ports.
:type start_port: int
:param end_port: Required. End port of a range of ports.
:type end_port: int
"""
_validation = {
'start_port': {'required': True},
'end_port': {'required': True},
}
_attribute_map = {
'start_port': {'key': 'startPort', 'type': 'int'},
'end_port': {'key': 'endPort', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(EndpointRangeDescription, self).__init__(**kwargs)
self.start_port = kwargs['start_port']
self.end_port = kwargs['end_port']
class ErrorModel(msrest.serialization.Model):
"""The structure of the error.
:param error: The error details.
:type error: ~azure.mgmt.servicefabric.models.ErrorModelError
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorModelError'},
}
def __init__(
self,
**kwargs
):
super(ErrorModel, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
class ErrorModelError(msrest.serialization.Model):
"""The error details.
:param code: The error code.
:type code: str
:param message: The error message.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ErrorModelError, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
class ManagedIdentity(msrest.serialization.Model):
"""Describes the managed identities for an Azure resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of the managed identity. This property will only be
provided for a system assigned identity.
:vartype principal_id: str
:ivar tenant_id: The tenant id of the managed identity. This property will only be provided for
a system assigned identity.
:vartype tenant_id: str
:param type: The type of managed identity for the resource. Possible values include:
"SystemAssigned", "UserAssigned", "SystemAssigned, UserAssigned", "None".
:type type: str or ~azure.mgmt.servicefabric.models.ManagedIdentityType
:param user_assigned_identities: The list of user identities associated with the resource. The
user identity dictionary key references will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
:type user_assigned_identities: dict[str,
~azure.mgmt.servicefabric.models.UserAssignedIdentity]
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedIdentity}'},
}
def __init__(
self,
**kwargs
):
super(ManagedIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = kwargs.get('type', None)
self.user_assigned_identities = kwargs.get('user_assigned_identities', None)
class PartitionSchemeDescription(msrest.serialization.Model):
"""Describes how the service is partitioned.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: NamedPartitionSchemeDescription, SingletonPartitionSchemeDescription, UniformInt64RangePartitionSchemeDescription.
All required parameters must be populated in order to send to Azure.
:param partition_scheme: Required. Specifies how the service is partitioned.Constant filled by
server. Possible values include: "Invalid", "Singleton", "UniformInt64Range", "Named".
:type partition_scheme: str or ~azure.mgmt.servicefabric.models.PartitionScheme
"""
_validation = {
'partition_scheme': {'required': True},
}
_attribute_map = {
'partition_scheme': {'key': 'partitionScheme', 'type': 'str'},
}
_subtype_map = {
'partition_scheme': {'Named': 'NamedPartitionSchemeDescription', 'Singleton': 'SingletonPartitionSchemeDescription', 'UniformInt64Range': 'UniformInt64RangePartitionSchemeDescription'}
}
def __init__(
self,
**kwargs
):
super(PartitionSchemeDescription, self).__init__(**kwargs)
self.partition_scheme = None # type: Optional[str]
class NamedPartitionSchemeDescription(PartitionSchemeDescription):
"""Describes the named partition scheme of the service.
All required parameters must be populated in order to send to Azure.
:param partition_scheme: Required. Specifies how the service is partitioned.Constant filled by
server. Possible values include: "Invalid", "Singleton", "UniformInt64Range", "Named".
:type partition_scheme: str or ~azure.mgmt.servicefabric.models.PartitionScheme
:param count: Required. The number of partitions.
:type count: int
:param names: Required. Array of size specified by the ‘count’ parameter, for the names of the
partitions.
:type names: list[str]
"""
_validation = {
'partition_scheme': {'required': True},
'count': {'required': True},
'names': {'required': True},
}
_attribute_map = {
'partition_scheme': {'key': 'partitionScheme', 'type': 'str'},
'count': {'key': 'count', 'type': 'int'},
'names': {'key': 'names', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(NamedPartitionSchemeDescription, self).__init__(**kwargs)
self.partition_scheme = 'Named' # type: str
self.count = kwargs['count']
self.names = kwargs['names']
class NodeTypeDescription(msrest.serialization.Model):
"""Describes a node type in the cluster, each node type represents sub set of nodes in the cluster.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the node type.
:type name: str
:param placement_properties: The placement tags applied to nodes in the node type, which can be
used to indicate where certain services (workload) should run.
:type placement_properties: dict[str, str]
:param capacities: The capacity tags applied to the nodes in the node type, the cluster
resource manager uses these tags to understand how much resource a node has.
:type capacities: dict[str, str]
:param client_connection_endpoint_port: Required. The TCP cluster management endpoint port.
:type client_connection_endpoint_port: int
:param http_gateway_endpoint_port: Required. The HTTP cluster management endpoint port.
:type http_gateway_endpoint_port: int
:param durability_level: The durability level of the node type. Learn about `DurabilityLevel
<https://docs.microsoft.com/azure/service-fabric/service-fabric-cluster-capacity>`_.
* Bronze - No privileges. This is the default.
* Silver - The infrastructure jobs can be paused for a duration of 10 minutes per UD.
* Gold - The infrastructure jobs can be paused for a duration of 2 hours per UD. Gold
durability can be enabled only on full node VM skus like D15_V2, G5 etc. Possible values
include: "Bronze", "Silver", "Gold".
:type durability_level: str or ~azure.mgmt.servicefabric.models.DurabilityLevel
:param application_ports: The range of ports from which cluster assigned port to Service Fabric
applications.
:type application_ports: ~azure.mgmt.servicefabric.models.EndpointRangeDescription
:param ephemeral_ports: The range of ephemeral ports that nodes in this node type should be
configured with.
:type ephemeral_ports: ~azure.mgmt.servicefabric.models.EndpointRangeDescription
:param is_primary: Required. The node type on which system services will run. Only one node
type should be marked as primary. Primary node type cannot be deleted or changed for existing
clusters.
:type is_primary: bool
:param vm_instance_count: Required. VMInstanceCount should be 1 to n, where n indicates the
number of VM instances corresponding to this nodeType. VMInstanceCount = 0 can be done only in
these scenarios: NodeType is a secondary nodeType. Durability = Bronze or Durability >= Bronze
and InfrastructureServiceManager = true. If VMInstanceCount = 0, implies the VMs for this
nodeType will not be used for the initial cluster size computation.
:type vm_instance_count: int
:param reverse_proxy_endpoint_port: The endpoint used by reverse proxy.
:type reverse_proxy_endpoint_port: int
:param is_stateless: Indicates if the node type can only host Stateless workloads.
:type is_stateless: bool
:param multiple_availability_zones: Indicates if the node type is enabled to support multiple
zones.
:type multiple_availability_zones: bool
"""
_validation = {
'name': {'required': True},
'client_connection_endpoint_port': {'required': True},
'http_gateway_endpoint_port': {'required': True},
'is_primary': {'required': True},
'vm_instance_count': {'required': True, 'maximum': 2147483647, 'minimum': 0},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'placement_properties': {'key': 'placementProperties', 'type': '{str}'},
'capacities': {'key': 'capacities', 'type': '{str}'},
'client_connection_endpoint_port': {'key': 'clientConnectionEndpointPort', 'type': 'int'},
'http_gateway_endpoint_port': {'key': 'httpGatewayEndpointPort', 'type': 'int'},
'durability_level': {'key': 'durabilityLevel', 'type': 'str'},
'application_ports': {'key': 'applicationPorts', 'type': 'EndpointRangeDescription'},
'ephemeral_ports': {'key': 'ephemeralPorts', 'type': 'EndpointRangeDescription'},
'is_primary': {'key': 'isPrimary', 'type': 'bool'},
'vm_instance_count': {'key': 'vmInstanceCount', 'type': 'int'},
'reverse_proxy_endpoint_port': {'key': 'reverseProxyEndpointPort', 'type': 'int'},
'is_stateless': {'key': 'isStateless', 'type': 'bool'},
'multiple_availability_zones': {'key': 'multipleAvailabilityZones', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(NodeTypeDescription, self).__init__(**kwargs)
self.name = kwargs['name']
self.placement_properties = kwargs.get('placement_properties', None)
self.capacities = kwargs.get('capacities', None)
self.client_connection_endpoint_port = kwargs['client_connection_endpoint_port']
self.http_gateway_endpoint_port = kwargs['http_gateway_endpoint_port']
self.durability_level = kwargs.get('durability_level', None)
self.application_ports = kwargs.get('application_ports', None)
self.ephemeral_ports = kwargs.get('ephemeral_ports', None)
self.is_primary = kwargs['is_primary']
self.vm_instance_count = kwargs['vm_instance_count']
self.reverse_proxy_endpoint_port = kwargs.get('reverse_proxy_endpoint_port', None)
self.is_stateless = kwargs.get('is_stateless', None)
self.multiple_availability_zones = kwargs.get('multiple_availability_zones', None)
class Notification(msrest.serialization.Model):
"""Describes the notification channel for cluster events.
All required parameters must be populated in order to send to Azure.
:param is_enabled: Required. Indicates if the notification is enabled.
:type is_enabled: bool
:param notification_category: Required. The category of notification. Possible values include:
"WaveProgress".
:type notification_category: str or ~azure.mgmt.servicefabric.models.NotificationCategory
:param notification_level: Required. The level of notification. Possible values include:
"Critical", "All".
:type notification_level: str or ~azure.mgmt.servicefabric.models.NotificationLevel
:param notification_targets: Required. List of targets that subscribe to the notification.
:type notification_targets: list[~azure.mgmt.servicefabric.models.NotificationTarget]
"""
_validation = {
'is_enabled': {'required': True},
'notification_category': {'required': True},
'notification_level': {'required': True},
'notification_targets': {'required': True},
}
_attribute_map = {
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
'notification_category': {'key': 'notificationCategory', 'type': 'str'},
'notification_level': {'key': 'notificationLevel', 'type': 'str'},
'notification_targets': {'key': 'notificationTargets', 'type': '[NotificationTarget]'},
}
def __init__(
self,
**kwargs
):
super(Notification, self).__init__(**kwargs)
self.is_enabled = kwargs['is_enabled']
self.notification_category = kwargs['notification_category']
self.notification_level = kwargs['notification_level']
self.notification_targets = kwargs['notification_targets']
class NotificationTarget(msrest.serialization.Model):
"""Describes the notification target properties.
All required parameters must be populated in order to send to Azure.
:param notification_channel: Required. The notification channel indicates the type of receivers
subscribed to the notification, either user or subscription. Possible values include:
"EmailUser", "EmailSubscription".
:type notification_channel: str or ~azure.mgmt.servicefabric.models.NotificationChannel
:param receivers: Required. List of targets that subscribe to the notification.
:type receivers: list[str]
"""
_validation = {
'notification_channel': {'required': True},
'receivers': {'required': True},
}
_attribute_map = {
'notification_channel': {'key': 'notificationChannel', 'type': 'str'},
'receivers': {'key': 'receivers', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(NotificationTarget, self).__init__(**kwargs)
self.notification_channel = kwargs['notification_channel']
self.receivers = kwargs['receivers']
class OperationListResult(msrest.serialization.Model):
"""Describes the result of the request to list Service Fabric resource provider operations.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: List of operations supported by the Service Fabric resource provider.
:type value: list[~azure.mgmt.servicefabric.models.OperationResult]
:ivar next_link: URL to get the next set of operation list results if there are any.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[OperationResult]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class OperationResult(msrest.serialization.Model):
"""Available operation list result.
:param name: The name of the operation.
:type name: str
:param is_data_action: Indicates whether the operation is a data action.
:type is_data_action: bool
:param display: The object that represents the operation.
:type display: ~azure.mgmt.servicefabric.models.AvailableOperationDisplay
:param origin: Origin result.
:type origin: str
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
'display': {'key': 'display', 'type': 'AvailableOperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationResult, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.is_data_action = kwargs.get('is_data_action', None)
self.display = kwargs.get('display', None)
self.origin = kwargs.get('origin', None)
self.next_link = kwargs.get('next_link', None)
class ServerCertificateCommonName(msrest.serialization.Model):
"""Describes the server certificate details using common name.
All required parameters must be populated in order to send to Azure.
:param certificate_common_name: Required. The common name of the server certificate.
:type certificate_common_name: str
:param certificate_issuer_thumbprint: Required. The issuer thumbprint of the server
certificate.
:type certificate_issuer_thumbprint: str
"""
_validation = {
'certificate_common_name': {'required': True},
'certificate_issuer_thumbprint': {'required': True},
}
_attribute_map = {
'certificate_common_name': {'key': 'certificateCommonName', 'type': 'str'},
'certificate_issuer_thumbprint': {'key': 'certificateIssuerThumbprint', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ServerCertificateCommonName, self).__init__(**kwargs)
self.certificate_common_name = kwargs['certificate_common_name']
self.certificate_issuer_thumbprint = kwargs['certificate_issuer_thumbprint']
class ServerCertificateCommonNames(msrest.serialization.Model):
"""Describes a list of server certificates referenced by common name that are used to secure the cluster.
:param common_names: The list of server certificates referenced by common name that are used to
secure the cluster.
:type common_names: list[~azure.mgmt.servicefabric.models.ServerCertificateCommonName]
:param x509_store_name: The local certificate store location. Possible values include:
"AddressBook", "AuthRoot", "CertificateAuthority", "Disallowed", "My", "Root", "TrustedPeople",
"TrustedPublisher".
:type x509_store_name: str or ~azure.mgmt.servicefabric.models.StoreName
"""
_attribute_map = {
'common_names': {'key': 'commonNames', 'type': '[ServerCertificateCommonName]'},
'x509_store_name': {'key': 'x509StoreName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ServerCertificateCommonNames, self).__init__(**kwargs)
self.common_names = kwargs.get('common_names', None)
self.x509_store_name = kwargs.get('x509_store_name', None)
class ServiceCorrelationDescription(msrest.serialization.Model):
"""Creates a particular correlation between services.
All required parameters must be populated in order to send to Azure.
:param scheme: Required. The ServiceCorrelationScheme which describes the relationship between
this service and the service specified via ServiceName. Possible values include: "Invalid",
"Affinity", "AlignedAffinity", "NonAlignedAffinity".
:type scheme: str or ~azure.mgmt.servicefabric.models.ServiceCorrelationScheme
:param service_name: Required. The name of the service that the correlation relationship is
established with.
:type service_name: str
"""
_validation = {
'scheme': {'required': True},
'service_name': {'required': True},
}
_attribute_map = {
'scheme': {'key': 'scheme', 'type': 'str'},
'service_name': {'key': 'serviceName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ServiceCorrelationDescription, self).__init__(**kwargs)
self.scheme = kwargs['scheme']
self.service_name = kwargs['service_name']
class ServiceLoadMetricDescription(msrest.serialization.Model):
"""Specifies a metric to load balance a service during runtime.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the metric. If the service chooses to report load during
runtime, the load metric name should match the name that is specified in Name exactly. Note
that metric names are case sensitive.
:type name: str
:param weight: The service load metric relative weight, compared to other metrics configured
for this service, as a number. Possible values include: "Zero", "Low", "Medium", "High".
:type weight: str or ~azure.mgmt.servicefabric.models.ServiceLoadMetricWeight
:param primary_default_load: Used only for Stateful services. The default amount of load, as a
number, that this service creates for this metric when it is a Primary replica.
:type primary_default_load: int
:param secondary_default_load: Used only for Stateful services. The default amount of load, as
a number, that this service creates for this metric when it is a Secondary replica.
:type secondary_default_load: int
:param default_load: Used only for Stateless services. The default amount of load, as a number,
that this service creates for this metric.
:type default_load: int
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'weight': {'key': 'weight', 'type': 'str'},
'primary_default_load': {'key': 'primaryDefaultLoad', 'type': 'int'},
'secondary_default_load': {'key': 'secondaryDefaultLoad', 'type': 'int'},
'default_load': {'key': 'defaultLoad', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ServiceLoadMetricDescription, self).__init__(**kwargs)
self.name = kwargs['name']
self.weight = kwargs.get('weight', None)
self.primary_default_load = kwargs.get('primary_default_load', None)
self.secondary_default_load = kwargs.get('secondary_default_load', None)
self.default_load = kwargs.get('default_load', None)
class ServicePlacementPolicyDescription(msrest.serialization.Model):
"""Describes the policy to be used for placement of a Service Fabric service.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: .
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of placement policy for a service fabric service. Following are
the possible values.Constant filled by server. Possible values include: "Invalid",
"InvalidDomain", "RequiredDomain", "PreferredPrimaryDomain", "RequiredDomainDistribution",
"NonPartiallyPlaceService".
:type type: str or ~azure.mgmt.servicefabric.models.ServicePlacementPolicyType
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
}
_subtype_map = {
'type': {}
}
def __init__(
self,
**kwargs
):
super(ServicePlacementPolicyDescription, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class ServiceResource(ProxyResource):
"""The service resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Azure resource identifier.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:param location: It will be deprecated in New API, resource location depends on the parent
resource.
:type location: str
:param tags: A set of tags. Azure resource tags.
:type tags: dict[str, str]
:ivar etag: Azure resource etag.
:vartype etag: str
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.servicefabric.models.SystemData
:param placement_constraints: The placement constraints as a string. Placement constraints are
boolean expressions on node properties and allow for restricting a service to particular nodes
based on the service requirements. For example, to place a service on nodes where NodeType is
blue specify the following: "NodeColor == blue)".
:type placement_constraints: str
:param correlation_scheme: A list that describes the correlation of the service with other
services.
:type correlation_scheme: list[~azure.mgmt.servicefabric.models.ServiceCorrelationDescription]
:param service_load_metrics: The service load metrics is given as an array of
ServiceLoadMetricDescription objects.
:type service_load_metrics: list[~azure.mgmt.servicefabric.models.ServiceLoadMetricDescription]
:param service_placement_policies: A list that describes the correlation of the service with
other services.
:type service_placement_policies:
list[~azure.mgmt.servicefabric.models.ServicePlacementPolicyDescription]
:param default_move_cost: Specifies the move cost for the service. Possible values include:
"Zero", "Low", "Medium", "High".
:type default_move_cost: str or ~azure.mgmt.servicefabric.models.MoveCost
:ivar provisioning_state: The current deployment or provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:param service_kind: The kind of service (Stateless or Stateful).Constant filled by server.
Possible values include: "Invalid", "Stateless", "Stateful".
:type service_kind: str or ~azure.mgmt.servicefabric.models.ServiceKind
:param service_type_name: The name of the service type.
:type service_type_name: str
:param partition_description: Describes how the service is partitioned.
:type partition_description: ~azure.mgmt.servicefabric.models.PartitionSchemeDescription
:param service_package_activation_mode: The activation Mode of the service package. Possible
values include: "SharedProcess", "ExclusiveProcess".
:type service_package_activation_mode: str or
~azure.mgmt.servicefabric.models.ArmServicePackageActivationMode
:param service_dns_name: Dns name used for the service. If this is specified, then the service
can be accessed via its DNS name instead of service name.
:type service_dns_name: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'placement_constraints': {'key': 'properties.placementConstraints', 'type': 'str'},
'correlation_scheme': {'key': 'properties.correlationScheme', 'type': '[ServiceCorrelationDescription]'},
'service_load_metrics': {'key': 'properties.serviceLoadMetrics', 'type': '[ServiceLoadMetricDescription]'},
'service_placement_policies': {'key': 'properties.servicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'},
'default_move_cost': {'key': 'properties.defaultMoveCost', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'service_kind': {'key': 'properties.serviceKind', 'type': 'str'},
'service_type_name': {'key': 'properties.serviceTypeName', 'type': 'str'},
'partition_description': {'key': 'properties.partitionDescription', 'type': 'PartitionSchemeDescription'},
'service_package_activation_mode': {'key': 'properties.servicePackageActivationMode', 'type': 'str'},
'service_dns_name': {'key': 'properties.serviceDnsName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ServiceResource, self).__init__(**kwargs)
self.placement_constraints = kwargs.get('placement_constraints', None)
self.correlation_scheme = kwargs.get('correlation_scheme', None)
self.service_load_metrics = kwargs.get('service_load_metrics', None)
self.service_placement_policies = kwargs.get('service_placement_policies', None)
self.default_move_cost = kwargs.get('default_move_cost', None)
self.provisioning_state = None
self.service_kind = None # type: Optional[str]
self.service_type_name = kwargs.get('service_type_name', None)
self.partition_description = kwargs.get('partition_description', None)
self.service_package_activation_mode = kwargs.get('service_package_activation_mode', None)
self.service_dns_name = kwargs.get('service_dns_name', None)
class ServiceResourceList(msrest.serialization.Model):
"""The list of service resources.
Variables are only populated by the server, and will be ignored when sending a request.
:param value:
:type value: list[~azure.mgmt.servicefabric.models.ServiceResource]
:ivar next_link: URL to get the next set of service list results if there are any.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ServiceResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ServiceResourceList, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class ServiceResourcePropertiesBase(msrest.serialization.Model):
"""The common service resource properties.
:param placement_constraints: The placement constraints as a string. Placement constraints are
boolean expressions on node properties and allow for restricting a service to particular nodes
based on the service requirements. For example, to place a service on nodes where NodeType is
blue specify the following: "NodeColor == blue)".
:type placement_constraints: str
:param correlation_scheme: A list that describes the correlation of the service with other
services.
:type correlation_scheme: list[~azure.mgmt.servicefabric.models.ServiceCorrelationDescription]
:param service_load_metrics: The service load metrics is given as an array of
ServiceLoadMetricDescription objects.
:type service_load_metrics: list[~azure.mgmt.servicefabric.models.ServiceLoadMetricDescription]
:param service_placement_policies: A list that describes the correlation of the service with
other services.
:type service_placement_policies:
list[~azure.mgmt.servicefabric.models.ServicePlacementPolicyDescription]
:param default_move_cost: Specifies the move cost for the service. Possible values include:
"Zero", "Low", "Medium", "High".
:type default_move_cost: str or ~azure.mgmt.servicefabric.models.MoveCost
"""
_attribute_map = {
'placement_constraints': {'key': 'placementConstraints', 'type': 'str'},
'correlation_scheme': {'key': 'correlationScheme', 'type': '[ServiceCorrelationDescription]'},
'service_load_metrics': {'key': 'serviceLoadMetrics', 'type': '[ServiceLoadMetricDescription]'},
'service_placement_policies': {'key': 'servicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'},
'default_move_cost': {'key': 'defaultMoveCost', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ServiceResourcePropertiesBase, self).__init__(**kwargs)
self.placement_constraints = kwargs.get('placement_constraints', None)
self.correlation_scheme = kwargs.get('correlation_scheme', None)
self.service_load_metrics = kwargs.get('service_load_metrics', None)
self.service_placement_policies = kwargs.get('service_placement_policies', None)
self.default_move_cost = kwargs.get('default_move_cost', None)
class ServiceResourceProperties(ServiceResourcePropertiesBase):
"""The service resource properties.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: StatefulServiceProperties, StatelessServiceProperties.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param placement_constraints: The placement constraints as a string. Placement constraints are
boolean expressions on node properties and allow for restricting a service to particular nodes
based on the service requirements. For example, to place a service on nodes where NodeType is
blue specify the following: "NodeColor == blue)".
:type placement_constraints: str
:param correlation_scheme: A list that describes the correlation of the service with other
services.
:type correlation_scheme: list[~azure.mgmt.servicefabric.models.ServiceCorrelationDescription]
:param service_load_metrics: The service load metrics is given as an array of
ServiceLoadMetricDescription objects.
:type service_load_metrics: list[~azure.mgmt.servicefabric.models.ServiceLoadMetricDescription]
:param service_placement_policies: A list that describes the correlation of the service with
other services.
:type service_placement_policies:
list[~azure.mgmt.servicefabric.models.ServicePlacementPolicyDescription]
:param default_move_cost: Specifies the move cost for the service. Possible values include:
"Zero", "Low", "Medium", "High".
:type default_move_cost: str or ~azure.mgmt.servicefabric.models.MoveCost
:ivar provisioning_state: The current deployment or provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by
server. Possible values include: "Invalid", "Stateless", "Stateful".
:type service_kind: str or ~azure.mgmt.servicefabric.models.ServiceKind
:param service_type_name: The name of the service type.
:type service_type_name: str
:param partition_description: Describes how the service is partitioned.
:type partition_description: ~azure.mgmt.servicefabric.models.PartitionSchemeDescription
:param service_package_activation_mode: The activation Mode of the service package. Possible
values include: "SharedProcess", "ExclusiveProcess".
:type service_package_activation_mode: str or
~azure.mgmt.servicefabric.models.ArmServicePackageActivationMode
:param service_dns_name: Dns name used for the service. If this is specified, then the service
can be accessed via its DNS name instead of service name.
:type service_dns_name: str
"""
_validation = {
'provisioning_state': {'readonly': True},
'service_kind': {'required': True},
}
_attribute_map = {
'placement_constraints': {'key': 'placementConstraints', 'type': 'str'},
'correlation_scheme': {'key': 'correlationScheme', 'type': '[ServiceCorrelationDescription]'},
'service_load_metrics': {'key': 'serviceLoadMetrics', 'type': '[ServiceLoadMetricDescription]'},
'service_placement_policies': {'key': 'servicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'},
'default_move_cost': {'key': 'defaultMoveCost', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'service_kind': {'key': 'serviceKind', 'type': 'str'},
'service_type_name': {'key': 'serviceTypeName', 'type': 'str'},
'partition_description': {'key': 'partitionDescription', 'type': 'PartitionSchemeDescription'},
'service_package_activation_mode': {'key': 'servicePackageActivationMode', 'type': 'str'},
'service_dns_name': {'key': 'serviceDnsName', 'type': 'str'},
}
_subtype_map = {
'service_kind': {'Stateful': 'StatefulServiceProperties', 'Stateless': 'StatelessServiceProperties'}
}
def __init__(
self,
**kwargs
):
super(ServiceResourceProperties, self).__init__(**kwargs)
self.provisioning_state = None
self.service_kind = 'ServiceResourceProperties' # type: str
self.service_type_name = kwargs.get('service_type_name', None)
self.partition_description = kwargs.get('partition_description', None)
self.service_package_activation_mode = kwargs.get('service_package_activation_mode', None)
self.service_dns_name = kwargs.get('service_dns_name', None)
class ServiceResourceUpdate(ProxyResource):
"""The service resource for patch operations.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Azure resource identifier.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:param location: It will be deprecated in New API, resource location depends on the parent
resource.
:type location: str
:param tags: A set of tags. Azure resource tags.
:type tags: dict[str, str]
:ivar etag: Azure resource etag.
:vartype etag: str
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.servicefabric.models.SystemData
:param placement_constraints: The placement constraints as a string. Placement constraints are
boolean expressions on node properties and allow for restricting a service to particular nodes
based on the service requirements. For example, to place a service on nodes where NodeType is
blue specify the following: "NodeColor == blue)".
:type placement_constraints: str
:param correlation_scheme: A list that describes the correlation of the service with other
services.
:type correlation_scheme: list[~azure.mgmt.servicefabric.models.ServiceCorrelationDescription]
:param service_load_metrics: The service load metrics is given as an array of
ServiceLoadMetricDescription objects.
:type service_load_metrics: list[~azure.mgmt.servicefabric.models.ServiceLoadMetricDescription]
:param service_placement_policies: A list that describes the correlation of the service with
other services.
:type service_placement_policies:
list[~azure.mgmt.servicefabric.models.ServicePlacementPolicyDescription]
:param default_move_cost: Specifies the move cost for the service. Possible values include:
"Zero", "Low", "Medium", "High".
:type default_move_cost: str or ~azure.mgmt.servicefabric.models.MoveCost
:param service_kind: The kind of service (Stateless or Stateful).Constant filled by server.
Possible values include: "Invalid", "Stateless", "Stateful".
:type service_kind: str or ~azure.mgmt.servicefabric.models.ServiceKind
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'placement_constraints': {'key': 'properties.placementConstraints', 'type': 'str'},
'correlation_scheme': {'key': 'properties.correlationScheme', 'type': '[ServiceCorrelationDescription]'},
'service_load_metrics': {'key': 'properties.serviceLoadMetrics', 'type': '[ServiceLoadMetricDescription]'},
'service_placement_policies': {'key': 'properties.servicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'},
'default_move_cost': {'key': 'properties.defaultMoveCost', 'type': 'str'},
'service_kind': {'key': 'properties.serviceKind', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ServiceResourceUpdate, self).__init__(**kwargs)
self.placement_constraints = kwargs.get('placement_constraints', None)
self.correlation_scheme = kwargs.get('correlation_scheme', None)
self.service_load_metrics = kwargs.get('service_load_metrics', None)
self.service_placement_policies = kwargs.get('service_placement_policies', None)
self.default_move_cost = kwargs.get('default_move_cost', None)
self.service_kind = None # type: Optional[str]
class ServiceResourceUpdateProperties(ServiceResourcePropertiesBase):
"""The service resource properties for patch operations.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: StatefulServiceUpdateProperties, StatelessServiceUpdateProperties.
All required parameters must be populated in order to send to Azure.
:param placement_constraints: The placement constraints as a string. Placement constraints are
boolean expressions on node properties and allow for restricting a service to particular nodes
based on the service requirements. For example, to place a service on nodes where NodeType is
blue specify the following: "NodeColor == blue)".
:type placement_constraints: str
:param correlation_scheme: A list that describes the correlation of the service with other
services.
:type correlation_scheme: list[~azure.mgmt.servicefabric.models.ServiceCorrelationDescription]
:param service_load_metrics: The service load metrics is given as an array of
ServiceLoadMetricDescription objects.
:type service_load_metrics: list[~azure.mgmt.servicefabric.models.ServiceLoadMetricDescription]
:param service_placement_policies: A list that describes the correlation of the service with
other services.
:type service_placement_policies:
list[~azure.mgmt.servicefabric.models.ServicePlacementPolicyDescription]
:param default_move_cost: Specifies the move cost for the service. Possible values include:
"Zero", "Low", "Medium", "High".
:type default_move_cost: str or ~azure.mgmt.servicefabric.models.MoveCost
:param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by
server. Possible values include: "Invalid", "Stateless", "Stateful".
:type service_kind: str or ~azure.mgmt.servicefabric.models.ServiceKind
"""
_validation = {
'service_kind': {'required': True},
}
_attribute_map = {
'placement_constraints': {'key': 'placementConstraints', 'type': 'str'},
'correlation_scheme': {'key': 'correlationScheme', 'type': '[ServiceCorrelationDescription]'},
'service_load_metrics': {'key': 'serviceLoadMetrics', 'type': '[ServiceLoadMetricDescription]'},
'service_placement_policies': {'key': 'servicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'},
'default_move_cost': {'key': 'defaultMoveCost', 'type': 'str'},
'service_kind': {'key': 'serviceKind', 'type': 'str'},
}
_subtype_map = {
'service_kind': {'Stateful': 'StatefulServiceUpdateProperties', 'Stateless': 'StatelessServiceUpdateProperties'}
}
def __init__(
self,
**kwargs
):
super(ServiceResourceUpdateProperties, self).__init__(**kwargs)
self.service_kind = 'ServiceResourceUpdateProperties' # type: str
class ServiceTypeDeltaHealthPolicy(msrest.serialization.Model):
"""Represents the delta health policy used to evaluate the health of services belonging to a service type when upgrading the cluster.
:param max_percent_delta_unhealthy_services: The maximum allowed percentage of services health
degradation allowed during cluster upgrades.
The delta is measured between the state of the services at the beginning of upgrade and the
state of the services at the time of the health evaluation.
The check is performed after every upgrade domain upgrade completion to make sure the global
state of the cluster is within tolerated limits.
:type max_percent_delta_unhealthy_services: int
"""
_validation = {
'max_percent_delta_unhealthy_services': {'maximum': 100, 'minimum': 0},
}
_attribute_map = {
'max_percent_delta_unhealthy_services': {'key': 'maxPercentDeltaUnhealthyServices', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ServiceTypeDeltaHealthPolicy, self).__init__(**kwargs)
self.max_percent_delta_unhealthy_services = kwargs.get('max_percent_delta_unhealthy_services', 0)
class ServiceTypeHealthPolicy(msrest.serialization.Model):
"""Represents the health policy used to evaluate the health of services belonging to a service type.
:param max_percent_unhealthy_services: The maximum percentage of services allowed to be
unhealthy before your application is considered in error.
:type max_percent_unhealthy_services: int
"""
_validation = {
'max_percent_unhealthy_services': {'maximum': 100, 'minimum': 0},
}
_attribute_map = {
'max_percent_unhealthy_services': {'key': 'maxPercentUnhealthyServices', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ServiceTypeHealthPolicy, self).__init__(**kwargs)
self.max_percent_unhealthy_services = kwargs.get('max_percent_unhealthy_services', 0)
class SettingsParameterDescription(msrest.serialization.Model):
"""Describes a parameter in fabric settings of the cluster.
All required parameters must be populated in order to send to Azure.
:param name: Required. The parameter name of fabric setting.
:type name: str
:param value: Required. The parameter value of fabric setting.
:type value: str
"""
_validation = {
'name': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SettingsParameterDescription, self).__init__(**kwargs)
self.name = kwargs['name']
self.value = kwargs['value']
class SettingsSectionDescription(msrest.serialization.Model):
"""Describes a section in the fabric settings of the cluster.
All required parameters must be populated in order to send to Azure.
:param name: Required. The section name of the fabric settings.
:type name: str
:param parameters: Required. The collection of parameters in the section.
:type parameters: list[~azure.mgmt.servicefabric.models.SettingsParameterDescription]
"""
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '[SettingsParameterDescription]'},
}
def __init__(
self,
**kwargs
):
super(SettingsSectionDescription, self).__init__(**kwargs)
self.name = kwargs['name']
self.parameters = kwargs['parameters']
class SingletonPartitionSchemeDescription(PartitionSchemeDescription):
"""SingletonPartitionSchemeDescription.
All required parameters must be populated in order to send to Azure.
:param partition_scheme: Required. Specifies how the service is partitioned.Constant filled by
server. Possible values include: "Invalid", "Singleton", "UniformInt64Range", "Named".
:type partition_scheme: str or ~azure.mgmt.servicefabric.models.PartitionScheme
"""
_validation = {
'partition_scheme': {'required': True},
}
_attribute_map = {
'partition_scheme': {'key': 'partitionScheme', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SingletonPartitionSchemeDescription, self).__init__(**kwargs)
self.partition_scheme = 'Singleton' # type: str
class StatefulServiceProperties(ServiceResourceProperties):
"""The properties of a stateful service resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param placement_constraints: The placement constraints as a string. Placement constraints are
boolean expressions on node properties and allow for restricting a service to particular nodes
based on the service requirements. For example, to place a service on nodes where NodeType is
blue specify the following: "NodeColor == blue)".
:type placement_constraints: str
:param correlation_scheme: A list that describes the correlation of the service with other
services.
:type correlation_scheme: list[~azure.mgmt.servicefabric.models.ServiceCorrelationDescription]
:param service_load_metrics: The service load metrics is given as an array of
ServiceLoadMetricDescription objects.
:type service_load_metrics: list[~azure.mgmt.servicefabric.models.ServiceLoadMetricDescription]
:param service_placement_policies: A list that describes the correlation of the service with
other services.
:type service_placement_policies:
list[~azure.mgmt.servicefabric.models.ServicePlacementPolicyDescription]
:param default_move_cost: Specifies the move cost for the service. Possible values include:
"Zero", "Low", "Medium", "High".
:type default_move_cost: str or ~azure.mgmt.servicefabric.models.MoveCost
:ivar provisioning_state: The current deployment or provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by
server. Possible values include: "Invalid", "Stateless", "Stateful".
:type service_kind: str or ~azure.mgmt.servicefabric.models.ServiceKind
:param service_type_name: The name of the service type.
:type service_type_name: str
:param partition_description: Describes how the service is partitioned.
:type partition_description: ~azure.mgmt.servicefabric.models.PartitionSchemeDescription
:param service_package_activation_mode: The activation Mode of the service package. Possible
values include: "SharedProcess", "ExclusiveProcess".
:type service_package_activation_mode: str or
~azure.mgmt.servicefabric.models.ArmServicePackageActivationMode
:param service_dns_name: Dns name used for the service. If this is specified, then the service
can be accessed via its DNS name instead of service name.
:type service_dns_name: str
:param has_persisted_state: A flag indicating whether this is a persistent service which stores
states on the local disk. If it is then the value of this property is true, if not it is false.
:type has_persisted_state: bool
:param target_replica_set_size: The target replica set size as a number.
:type target_replica_set_size: int
:param min_replica_set_size: The minimum replica set size as a number.
:type min_replica_set_size: int
:param replica_restart_wait_duration: The duration between when a replica goes down and when a
new replica is created, represented in ISO 8601 format (hh:mm:ss.s).
:type replica_restart_wait_duration: ~datetime.datetime
:param quorum_loss_wait_duration: The maximum duration for which a partition is allowed to be
in a state of quorum loss, represented in ISO 8601 format (hh:mm:ss.s).
:type quorum_loss_wait_duration: ~datetime.datetime
:param stand_by_replica_keep_duration: The definition on how long StandBy replicas should be
maintained before being removed, represented in ISO 8601 format (hh:mm:ss.s).
:type stand_by_replica_keep_duration: ~datetime.datetime
"""
_validation = {
'provisioning_state': {'readonly': True},
'service_kind': {'required': True},
'target_replica_set_size': {'minimum': 1},
'min_replica_set_size': {'minimum': 1},
}
_attribute_map = {
'placement_constraints': {'key': 'placementConstraints', 'type': 'str'},
'correlation_scheme': {'key': 'correlationScheme', 'type': '[ServiceCorrelationDescription]'},
'service_load_metrics': {'key': 'serviceLoadMetrics', 'type': '[ServiceLoadMetricDescription]'},
'service_placement_policies': {'key': 'servicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'},
'default_move_cost': {'key': 'defaultMoveCost', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'service_kind': {'key': 'serviceKind', 'type': 'str'},
'service_type_name': {'key': 'serviceTypeName', 'type': 'str'},
'partition_description': {'key': 'partitionDescription', 'type': 'PartitionSchemeDescription'},
'service_package_activation_mode': {'key': 'servicePackageActivationMode', 'type': 'str'},
'service_dns_name': {'key': 'serviceDnsName', 'type': 'str'},
'has_persisted_state': {'key': 'hasPersistedState', 'type': 'bool'},
'target_replica_set_size': {'key': 'targetReplicaSetSize', 'type': 'int'},
'min_replica_set_size': {'key': 'minReplicaSetSize', 'type': 'int'},
'replica_restart_wait_duration': {'key': 'replicaRestartWaitDuration', 'type': 'iso-8601'},
'quorum_loss_wait_duration': {'key': 'quorumLossWaitDuration', 'type': 'iso-8601'},
'stand_by_replica_keep_duration': {'key': 'standByReplicaKeepDuration', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(StatefulServiceProperties, self).__init__(**kwargs)
self.service_kind = 'Stateful' # type: str
self.has_persisted_state = kwargs.get('has_persisted_state', None)
self.target_replica_set_size = kwargs.get('target_replica_set_size', None)
self.min_replica_set_size = kwargs.get('min_replica_set_size', None)
self.replica_restart_wait_duration = kwargs.get('replica_restart_wait_duration', None)
self.quorum_loss_wait_duration = kwargs.get('quorum_loss_wait_duration', None)
self.stand_by_replica_keep_duration = kwargs.get('stand_by_replica_keep_duration', None)
class StatefulServiceUpdateProperties(ServiceResourceUpdateProperties):
"""The properties of a stateful service resource for patch operations.
All required parameters must be populated in order to send to Azure.
:param placement_constraints: The placement constraints as a string. Placement constraints are
boolean expressions on node properties and allow for restricting a service to particular nodes
based on the service requirements. For example, to place a service on nodes where NodeType is
blue specify the following: "NodeColor == blue)".
:type placement_constraints: str
:param correlation_scheme: A list that describes the correlation of the service with other
services.
:type correlation_scheme: list[~azure.mgmt.servicefabric.models.ServiceCorrelationDescription]
:param service_load_metrics: The service load metrics is given as an array of
ServiceLoadMetricDescription objects.
:type service_load_metrics: list[~azure.mgmt.servicefabric.models.ServiceLoadMetricDescription]
:param service_placement_policies: A list that describes the correlation of the service with
other services.
:type service_placement_policies:
list[~azure.mgmt.servicefabric.models.ServicePlacementPolicyDescription]
:param default_move_cost: Specifies the move cost for the service. Possible values include:
"Zero", "Low", "Medium", "High".
:type default_move_cost: str or ~azure.mgmt.servicefabric.models.MoveCost
:param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by
server. Possible values include: "Invalid", "Stateless", "Stateful".
:type service_kind: str or ~azure.mgmt.servicefabric.models.ServiceKind
:param target_replica_set_size: The target replica set size as a number.
:type target_replica_set_size: int
:param min_replica_set_size: The minimum replica set size as a number.
:type min_replica_set_size: int
:param replica_restart_wait_duration: The duration between when a replica goes down and when a
new replica is created, represented in ISO 8601 format (hh:mm:ss.s).
:type replica_restart_wait_duration: ~datetime.datetime
:param quorum_loss_wait_duration: The maximum duration for which a partition is allowed to be
in a state of quorum loss, represented in ISO 8601 format (hh:mm:ss.s).
:type quorum_loss_wait_duration: ~datetime.datetime
:param stand_by_replica_keep_duration: The definition on how long StandBy replicas should be
maintained before being removed, represented in ISO 8601 format (hh:mm:ss.s).
:type stand_by_replica_keep_duration: ~datetime.datetime
"""
_validation = {
'service_kind': {'required': True},
'target_replica_set_size': {'minimum': 1},
'min_replica_set_size': {'minimum': 1},
}
_attribute_map = {
'placement_constraints': {'key': 'placementConstraints', 'type': 'str'},
'correlation_scheme': {'key': 'correlationScheme', 'type': '[ServiceCorrelationDescription]'},
'service_load_metrics': {'key': 'serviceLoadMetrics', 'type': '[ServiceLoadMetricDescription]'},
'service_placement_policies': {'key': 'servicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'},
'default_move_cost': {'key': 'defaultMoveCost', 'type': 'str'},
'service_kind': {'key': 'serviceKind', 'type': 'str'},
'target_replica_set_size': {'key': 'targetReplicaSetSize', 'type': 'int'},
'min_replica_set_size': {'key': 'minReplicaSetSize', 'type': 'int'},
'replica_restart_wait_duration': {'key': 'replicaRestartWaitDuration', 'type': 'iso-8601'},
'quorum_loss_wait_duration': {'key': 'quorumLossWaitDuration', 'type': 'iso-8601'},
'stand_by_replica_keep_duration': {'key': 'standByReplicaKeepDuration', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(StatefulServiceUpdateProperties, self).__init__(**kwargs)
self.service_kind = 'Stateful' # type: str
self.target_replica_set_size = kwargs.get('target_replica_set_size', None)
self.min_replica_set_size = kwargs.get('min_replica_set_size', None)
self.replica_restart_wait_duration = kwargs.get('replica_restart_wait_duration', None)
self.quorum_loss_wait_duration = kwargs.get('quorum_loss_wait_duration', None)
self.stand_by_replica_keep_duration = kwargs.get('stand_by_replica_keep_duration', None)
class StatelessServiceProperties(ServiceResourceProperties):
"""The properties of a stateless service resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param placement_constraints: The placement constraints as a string. Placement constraints are
boolean expressions on node properties and allow for restricting a service to particular nodes
based on the service requirements. For example, to place a service on nodes where NodeType is
blue specify the following: "NodeColor == blue)".
:type placement_constraints: str
:param correlation_scheme: A list that describes the correlation of the service with other
services.
:type correlation_scheme: list[~azure.mgmt.servicefabric.models.ServiceCorrelationDescription]
:param service_load_metrics: The service load metrics is given as an array of
ServiceLoadMetricDescription objects.
:type service_load_metrics: list[~azure.mgmt.servicefabric.models.ServiceLoadMetricDescription]
:param service_placement_policies: A list that describes the correlation of the service with
other services.
:type service_placement_policies:
list[~azure.mgmt.servicefabric.models.ServicePlacementPolicyDescription]
:param default_move_cost: Specifies the move cost for the service. Possible values include:
"Zero", "Low", "Medium", "High".
:type default_move_cost: str or ~azure.mgmt.servicefabric.models.MoveCost
:ivar provisioning_state: The current deployment or provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by
server. Possible values include: "Invalid", "Stateless", "Stateful".
:type service_kind: str or ~azure.mgmt.servicefabric.models.ServiceKind
:param service_type_name: The name of the service type.
:type service_type_name: str
:param partition_description: Describes how the service is partitioned.
:type partition_description: ~azure.mgmt.servicefabric.models.PartitionSchemeDescription
:param service_package_activation_mode: The activation Mode of the service package. Possible
values include: "SharedProcess", "ExclusiveProcess".
:type service_package_activation_mode: str or
~azure.mgmt.servicefabric.models.ArmServicePackageActivationMode
:param service_dns_name: Dns name used for the service. If this is specified, then the service
can be accessed via its DNS name instead of service name.
:type service_dns_name: str
:param instance_count: The instance count.
:type instance_count: int
:param instance_close_delay_duration: Delay duration for RequestDrain feature to ensures that
the endpoint advertised by the stateless instance is removed before the delay starts prior to
closing the instance. This delay enables existing requests to drain gracefully before the
instance actually goes down
(https://docs.microsoft.com/en-us/azure/service-fabric/service-fabric-application-upgrade-advanced#avoid-connection-drops-during-stateless-service-planned-downtime-preview).
It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it
is interpreted as a number representing the total number of milliseconds.
:type instance_close_delay_duration: str
"""
_validation = {
'provisioning_state': {'readonly': True},
'service_kind': {'required': True},
'instance_count': {'minimum': -1},
}
_attribute_map = {
'placement_constraints': {'key': 'placementConstraints', 'type': 'str'},
'correlation_scheme': {'key': 'correlationScheme', 'type': '[ServiceCorrelationDescription]'},
'service_load_metrics': {'key': 'serviceLoadMetrics', 'type': '[ServiceLoadMetricDescription]'},
'service_placement_policies': {'key': 'servicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'},
'default_move_cost': {'key': 'defaultMoveCost', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'service_kind': {'key': 'serviceKind', 'type': 'str'},
'service_type_name': {'key': 'serviceTypeName', 'type': 'str'},
'partition_description': {'key': 'partitionDescription', 'type': 'PartitionSchemeDescription'},
'service_package_activation_mode': {'key': 'servicePackageActivationMode', 'type': 'str'},
'service_dns_name': {'key': 'serviceDnsName', 'type': 'str'},
'instance_count': {'key': 'instanceCount', 'type': 'int'},
'instance_close_delay_duration': {'key': 'instanceCloseDelayDuration', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StatelessServiceProperties, self).__init__(**kwargs)
self.service_kind = 'Stateless' # type: str
self.instance_count = kwargs.get('instance_count', None)
self.instance_close_delay_duration = kwargs.get('instance_close_delay_duration', None)
class StatelessServiceUpdateProperties(ServiceResourceUpdateProperties):
"""The properties of a stateless service resource for patch operations.
All required parameters must be populated in order to send to Azure.
:param placement_constraints: The placement constraints as a string. Placement constraints are
boolean expressions on node properties and allow for restricting a service to particular nodes
based on the service requirements. For example, to place a service on nodes where NodeType is
blue specify the following: "NodeColor == blue)".
:type placement_constraints: str
:param correlation_scheme: A list that describes the correlation of the service with other
services.
:type correlation_scheme: list[~azure.mgmt.servicefabric.models.ServiceCorrelationDescription]
:param service_load_metrics: The service load metrics is given as an array of
ServiceLoadMetricDescription objects.
:type service_load_metrics: list[~azure.mgmt.servicefabric.models.ServiceLoadMetricDescription]
:param service_placement_policies: A list that describes the correlation of the service with
other services.
:type service_placement_policies:
list[~azure.mgmt.servicefabric.models.ServicePlacementPolicyDescription]
:param default_move_cost: Specifies the move cost for the service. Possible values include:
"Zero", "Low", "Medium", "High".
:type default_move_cost: str or ~azure.mgmt.servicefabric.models.MoveCost
:param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by
server. Possible values include: "Invalid", "Stateless", "Stateful".
:type service_kind: str or ~azure.mgmt.servicefabric.models.ServiceKind
:param instance_count: The instance count.
:type instance_count: int
:param instance_close_delay_duration: Delay duration for RequestDrain feature to ensures that
the endpoint advertised by the stateless instance is removed before the delay starts prior to
closing the instance. This delay enables existing requests to drain gracefully before the
instance actually goes down
(https://docs.microsoft.com/en-us/azure/service-fabric/service-fabric-application-upgrade-advanced#avoid-connection-drops-during-stateless-service-planned-downtime-preview).
It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it
is interpreted as a number representing the total number of milliseconds.
:type instance_close_delay_duration: str
"""
_validation = {
'service_kind': {'required': True},
'instance_count': {'minimum': -1},
}
_attribute_map = {
'placement_constraints': {'key': 'placementConstraints', 'type': 'str'},
'correlation_scheme': {'key': 'correlationScheme', 'type': '[ServiceCorrelationDescription]'},
'service_load_metrics': {'key': 'serviceLoadMetrics', 'type': '[ServiceLoadMetricDescription]'},
'service_placement_policies': {'key': 'servicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'},
'default_move_cost': {'key': 'defaultMoveCost', 'type': 'str'},
'service_kind': {'key': 'serviceKind', 'type': 'str'},
'instance_count': {'key': 'instanceCount', 'type': 'int'},
'instance_close_delay_duration': {'key': 'instanceCloseDelayDuration', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StatelessServiceUpdateProperties, self).__init__(**kwargs)
self.service_kind = 'Stateless' # type: str
self.instance_count = kwargs.get('instance_count', None)
self.instance_close_delay_duration = kwargs.get('instance_close_delay_duration', None)
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:param created_by: The identity that created the resource.
:type created_by: str
:param created_by_type: The type of identity that created the resource.
:type created_by_type: str
:param created_at: The timestamp of resource creation (UTC).
:type created_at: ~datetime.datetime
:param last_modified_by: The identity that last modified the resource.
:type last_modified_by: str
:param last_modified_by_type: The type of identity that last modified the resource.
:type last_modified_by_type: str
:param last_modified_at: The timestamp of resource last modification (UTC).
:type last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(SystemData, self).__init__(**kwargs)
self.created_by = kwargs.get('created_by', None)
self.created_by_type = kwargs.get('created_by_type', None)
self.created_at = kwargs.get('created_at', None)
self.last_modified_by = kwargs.get('last_modified_by', None)
self.last_modified_by_type = kwargs.get('last_modified_by_type', None)
self.last_modified_at = kwargs.get('last_modified_at', None)
class UniformInt64RangePartitionSchemeDescription(PartitionSchemeDescription):
"""Describes a partitioning scheme where an integer range is allocated evenly across a number of partitions.
All required parameters must be populated in order to send to Azure.
:param partition_scheme: Required. Specifies how the service is partitioned.Constant filled by
server. Possible values include: "Invalid", "Singleton", "UniformInt64Range", "Named".
:type partition_scheme: str or ~azure.mgmt.servicefabric.models.PartitionScheme
:param count: Required. The number of partitions.
:type count: int
:param low_key: Required. String indicating the lower bound of the partition key range that
should be split between the partition ‘count’.
:type low_key: str
:param high_key: Required. String indicating the upper bound of the partition key range that
should be split between the partition ‘count’.
:type high_key: str
"""
_validation = {
'partition_scheme': {'required': True},
'count': {'required': True},
'low_key': {'required': True},
'high_key': {'required': True},
}
_attribute_map = {
'partition_scheme': {'key': 'partitionScheme', 'type': 'str'},
'count': {'key': 'count', 'type': 'int'},
'low_key': {'key': 'lowKey', 'type': 'str'},
'high_key': {'key': 'highKey', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UniformInt64RangePartitionSchemeDescription, self).__init__(**kwargs)
self.partition_scheme = 'UniformInt64Range' # type: str
self.count = kwargs['count']
self.low_key = kwargs['low_key']
self.high_key = kwargs['high_key']
class UpgradableVersionPathResult(msrest.serialization.Model):
"""The list of intermediate cluster code versions for an upgrade or downgrade. Or minimum and maximum upgradable version if no target was given.
:param supported_path:
:type supported_path: list[str]
"""
_attribute_map = {
'supported_path': {'key': 'supportedPath', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(UpgradableVersionPathResult, self).__init__(**kwargs)
self.supported_path = kwargs.get('supported_path', None)
class UpgradableVersionsDescription(msrest.serialization.Model):
"""UpgradableVersionsDescription.
All required parameters must be populated in order to send to Azure.
:param target_version: Required. The target code version.
:type target_version: str
"""
_validation = {
'target_version': {'required': True},
}
_attribute_map = {
'target_version': {'key': 'targetVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UpgradableVersionsDescription, self).__init__(**kwargs)
self.target_version = kwargs['target_version']
class UserAssignedIdentity(msrest.serialization.Model):
"""UserAssignedIdentity.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of user assigned identity.
:vartype principal_id: str
:ivar client_id: The client id of user assigned identity.
:vartype client_id: str
"""
_validation = {
'principal_id': {'readonly': True},
'client_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UserAssignedIdentity, self).__init__(**kwargs)
self.principal_id = None
self.client_id = None
|
examples/dataflow/chatbot/operations.py
|
agriyakhetarpal/dffml
| 171 |
65606
|
<reponame>agriyakhetarpal/dffml
import io
import re
import json
import tempfile
import contextlib
from aiohttp import ClientSession, ClientTimeout
from dffml.cli.cli import CLI
from dffml import op, config, Definition, BaseSecret
ACCESSTOKEN = Definition(name="access_token", primitive="str")
ROOMNAME = Definition(name="room_name", primitive="str")
ROOMID = Definition(name="room_id", primitive="str")
MESSAGE = Definition(name="message", primitive="str")
TOSEND = Definition(name="to_send", primitive="str")
@config
class GitterChannelConfig:
secret: BaseSecret
@op(
inputs={"room_uri": ROOMNAME},
outputs={"room_id": ROOMID},
config_cls=GitterChannelConfig,
imp_enter={
"secret": lambda self: self.config.secret,
"session": lambda self: ClientSession(trust_env=True),
},
ctx_enter={"sctx": lambda self: self.parent.secret()},
)
async def get_room_id(self, room_uri):
# Get unique roomid from room uri
access_token = await self.sctx.get("access_token")
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": f"Bearer {access_token}",
}
api_url = await self.sctx.get("api_url")
url = f"{api_url}/rooms"
async with self.parent.session.post(
url, json={"uri": room_uri}, headers=headers
) as resp:
response = await resp.json()
return {"room_id": response["id"]}
@op(
inputs={"room_id": ROOMID},
outputs={"message": MESSAGE},
config_cls=GitterChannelConfig,
imp_enter={
"secret": lambda self: self.config.secret,
"session": lambda self: ClientSession(
trust_env=True, timeout=ClientTimeout(total=None)
),
},
ctx_enter={"sctx": lambda self: self.parent.secret()},
)
async def stream_chat(self, room_id):
# Listen to messages in room
access_token = await self.sctx.get("access_token")
headers = {
"Accept": "application/json",
"Authorization": f"Bearer {access_token}",
}
stream_url = await self.sctx.get("stream_url")
url = f"{stream_url}/rooms/{room_id}/chatMessages"
botname = await self.sctx.get("botname")
async with self.parent.session.get(url, headers=headers) as resp:
async for data in resp.content:
# Gitter sends " \n" at some intervals
if data == " \n".encode():
continue
data = json.loads(data.strip())
message = data["text"]
# Only listen to messages directed to bot
if f"@{botname}" not in message:
continue
yield {"message": message}
@op(
inputs={"message": TOSEND, "room_id": ROOMID},
config_cls=GitterChannelConfig,
imp_enter={
"secret": lambda self: self.config.secret,
"session": lambda self: ClientSession(trust_env=True),
},
ctx_enter={"sctx": lambda self: self.parent.secret()},
)
async def send_message(self, message, room_id):
access_token = await self.sctx.get("access_token")
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": f"Bearer {access_token}",
}
try:
message = json.loads(message)
message = json.dumps(message, indent=4, sort_keys=True)
except:
pass
# For new line we need \\n,else Gitter api
# responds with 'Bad Request'
message = message.replace("\n", "\\n")
api_url = await self.sctx.get("api_url")
url = f"{api_url}/rooms/{room_id}/chatMessages"
async with self.parent.session.post(
url, headers=headers, json={"text": message}
) as resp:
response = await resp.json()
return
@op(
inputs={"message": MESSAGE,},
outputs={"message": TOSEND},
config_cls=GitterChannelConfig,
imp_enter={"secret": lambda self: self.config.secret},
ctx_enter={"sctx": lambda self: self.parent.secret()},
)
async def interpret_message(self, message):
greet = ["hey", "hello", "hi"]
for x in greet:
if x in message.lower():
return {"message": "Hey Hooman ฅ^•ﻌ•^ฅ"}
def extract_data(raw_data):
"""
Parses data from text
eg
>>> raw_data = "
details:
features: Years:int:1 Expertise:int:1 Trust:float:1
predict: Salary:float:1
data:
Years,Expertise,Trust,Salary
0,1,0.1,10
1,3,0.2,20
2,5,0.3,30
3,7,0.4,40
"
>>> extract_data(raw_data)
{
model-data:
"
Years,Expertise,Trust,Salary
0,1,0.1,10
1,3,0.2,20
2,5,0.3,30
3,7,0.4,40
"
,
features:
Years:int:1 Expertise:int:1 Trust:float:1
,
predict: Salary:float:1
}
"""
raw_data = raw_data.split("data:") # (Feature details, training data)
data = {"model-data": raw_data[1]}
raw_data = raw_data[0].split(
"\n"
) # splits feature details to separate lines
# Iterate and add to to dictionary `data`
for x in raw_data:
k, *v = x.split(":")
if isinstance(v, list): # for features
v = ":".join(v)
k = k.strip()
v = v.strip()
if k: # avoid blank
data[k] = v
return data
# Removing username from message
# The regex matches @ followed by anything that
# is not a whitespace in the first group and
# the rest of the string in the second group.
# We replace the string by the second group.
message = re.sub(r"(@[^\s]+)(.*)", r"\2", message).strip()
if message.lower().startswith("train model"):
return {"message": "Gimme more info!!"}
elif message.lower().startswith("predict:"):
# Only replace first occurrence of predict
# because the feature to predict will be labeled predict
raw_data = message.replace("predict:", "", 1).strip()
cmds = ["predict", "all"]
elif message.lower().startswith("details:"):
raw_data = message.replace("details:", "",).strip()
cmds = ["train"]
else:
return {"message": " Oops ,I didnt get that ᕙ(⇀‸↼‶)ᕗ "}
# We'll use scikit logistic regression
data = extract_data(raw_data)
model_type = "scikitlr"
features = data["features"].split(" ")
predict = data["predict"]
model_data = data["model-data"]
with tempfile.NamedTemporaryFile(suffix=".csv") as fileobj:
fileobj.write(model_data.lstrip().encode())
fileobj.seek(0)
stdout = io.StringIO()
with contextlib.redirect_stdout(stdout):
preds = await CLI.cli(
*cmds,
"-model",
model_type,
"-model-location",
"tempModel",
"-model-features",
*features,
"-model-predict",
predict,
"-sources",
"f=csv",
"-source-filename",
fileobj.name,
)
if "train" in cmds:
return {"message": "Done!!"}
else:
m = {}
for pred in preds:
pred = pred.predictions()
m.update({p: pred[p]["value"] for p in pred})
message = [f"{k}: {v}" for k, v in m.items()]
message = "\n".join(message)
return {"message": message}
|
tests/integration/runner.py
|
techjacker/systemdlogger
| 102 |
65609
|
import pytest
from unittest.mock import patch
import tests.fixtures.journal as FakeJournalExporter
from systemdlogger.elasticsearch import ElasticsearchLogger
@pytest.mark.parametrize(('config_path'), [
'tests/fixtures/config_es.json'
])
class TestRunner:
def setup_method(self, method):
""" setup any state tied to the execution of the given method in a
class. setup_method is invoked for every test method of a class.
"""
modules = {
'systemdlogger.journal': FakeJournalExporter
}
self.module_patcher = patch.dict('sys.modules', modules)
self.module_patcher.start()
from systemdlogger.runner import Runner
self.Runner = Runner
def teardown_method(self, method):
""" teardown any state that was previously setup with a setup_method
call.
"""
self.module_patcher.stop()
def test_init(self, config_path):
runner = self.Runner(config_path)
assert len(runner.loggers) == 1
assert isinstance(runner.loggers[0], ElasticsearchLogger)
def test_run(self, config_path):
runner = self.Runner(config_path)
runner.run()
|
sandbox/block_waveglow.py
|
Nijta/project-NN-Pytorch-scripts
| 150 |
65612
|
<reponame>Nijta/project-NN-Pytorch-scripts
#!/usr/bin/env python
"""
Building blocks for waveglow
"""
from __future__ import absolute_import
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import torch.nn.init as torch_init
import sandbox.block_nn as nii_nn
import sandbox.block_wavenet as nii_wavenet
import sandbox.block_glow as nii_glow
import core_scripts.data_io.conf as nii_io_conf
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2021, Xin Wang"
class Invertible1x1ConvWaveGlow(torch.nn.Module):
def __init__(self, feat_dim, flag_detjac=False):
super(Invertible1x1ConvWaveGlow, self).__init__()
torch.manual_seed(100)
with torch.no_grad():
W = torch.qr(torch.FloatTensor(feat_dim, feat_dim).normal_())[0]
# Ensure determinant is 1.0 not -1.0
if torch.det(W) < 0:
W[:,0] = -1*W[:,0]
# not necessary
W = W.transpose(0, 1)
self.weight = torch_nn.Parameter(W)
self.weight_inv = torch_nn.Parameter(W.clone())
self.weight_inv_flag = False
self.flag_detjac = flag_detjac
return
def forward(self, y, factor):
batch_size, length, feat_dim = y.size()
# Forward computation
log_det_W = length / factor * torch.logdet(self.weight)
z = torch.matmul(y, self.weight)
if self.flag_detjac:
return z, log_det_W
else:
return z
def reverse(self, x):
if not self.weight_inv_flag:
self.weight_inv.data = torch.inverse(self.weight.data)
self.weight_inv_flag = True
return torch.matmul(x, self.weight_inv)
class upsampleByTransConv(torch_nn.Module):
"""upsampleByTransConv
Upsampling layer using transposed convolution
"""
def __init__(self, feat_dim, upsample_rate, window_ratio=5):
"""upsampleByTransConv(feat_dim, upsample_rate, window_ratio=5)
Args
----
feat_dim: int, input feature should be (batch, length, feat_dim)
upsample_rate, int, output feature will be
(batch, length*upsample_rate, feat_dim)
window_ratio: int, default 5, window length of transconv will be
upsample_rate * window_ratio
"""
super(upsampleByTransConv, self).__init__()
window_l = upsample_rate * window_ratio
self.m_layer = torch_nn.ConvTranspose1d(
feat_dim, feat_dim, window_l, stride=upsample_rate)
self.m_uprate = upsample_rate
return
def forward(self, x):
""" y = upsampleByTransConv(x)
input
-----
x: tensor, (batch, length, feat_dim)
output
------
y: tensor, (batch, length*upsample_rate, feat_dim)
"""
l = x.shape[1] * self.m_uprate
y = self.m_layer(x.permute(0, 2, 1))[:, :, 0:l]
return y.permute(0, 2, 1).contiguous()
class SqueezeForWaveGlow(torch_nn.Module):
"""SqueezeForWaveGlow
Squeeze layer for WaveGlow
"""
def __init__(self, mode = 1):
"""SqueezeForGlow(mode=1)
Args
----
mode: int, mode of this squeeze layer
mode == 1: original squeeze method by squeezing 8 points
"""
super(SqueezeForWaveGlow, self).__init__()
self.m_mode = mode
# mode 1, squeeze by 8
self.m_mode_1_para = 8
return
def get_expected_squeeze_length(self, orig_length):
# return expected length after squeezing
if self.m_mode == 1:
return orig_length//self.m_mode_1_para
def get_squeeze_factor(self):
# return the configuration for squeezing
if self.m_mode == 1:
return self.m_mode_1_para
def forward(self, x):
"""SqueezeForWaveGlow(x)
input
-----
x: tensor, (batch, length, feat_dim)
output
------
y: tensor, (batch, length // squeeze, feat_dim * squeeze)
"""
if self.m_mode == 1:
# squeeze, the 8 points should be the last dimension
squeeze_len = x.shape[1] // self.m_mode_1_para
# trim length first
trim_len = squeeze_len * self.m_mode_1_para
x_tmp = x[:, 0:trim_len, :]
# (batch, time//squeeze_size, squeeze_size, dim)
x_tmp = x_tmp.view(x_tmp.shape[0], squeeze_len,
self.m_mode_1_para, -1)
# (batch, time//squeeze_size, dim, squeeze_size)
x_tmp = x_tmp.permute(0, 1, 3, 2).contiguous()
# (batch, time//squeeze_size, dim * squeeze_size)
return x_tmp.view(x_tmp.shape[0], squeeze_len, -1)
else:
print("SqueezeForWaveGlow not implemented")
return x_squeezed
def reverse(self, x_squeezed):
if self.m_mode == 1:
# (batch, time//squeeze_size, dim * squeeze_size)
batch, squeeze_len, squeeze_dim = x_squeezed.shape
# (batch, time//squeeze_size, dim, squeeze_size)
x_tmp = x_squeezed.view(
batch, squeeze_len, squeeze_dim // self.m_mode_1_para,
self.m_mode_1_para)
# (batch, time//squeeze_size, squeeze_size, dim)
x_tmp = x_tmp.permute(0, 1, 3, 2).contiguous()
# (batch, time, dim)
x = x_tmp.view(batch, squeeze_len * self.m_mode_1_para, -1)
else:
print("SqueezeForWaveGlow not implemented")
return x
class AffineCouplingWaveGlow_legacy(torch_nn.Module):
"""AffineCouplingWaveGlow_legacy
AffineCoupling block in WaveGlow
Example:
m_tmp = AffineCouplingWaveGlow_legacy(10, 10, 8, 512, 3, True, True)
data1 = torch.randn([2, 100, 10])
cond = torch.randn([2, 100, 10])
output, log_det = m_tmp(data1, cond)
data1_re = m_tmp.reverse(output, cond)
torch.std(data1 - data1_re)
"""
def __init__(self, in_dim, cond_dim,
wn_num_conv1d, wn_dim_channel, wn_kernel_size,
flag_affine=True, flag_detjac=False):
"""AffineCouplingWaveGlow_legacy(in_dim, cond_dim,
wn_num_conv1d, wn_dim_channel, wn_kernel_size,
flag_affine=True, flag_detjac=False)
Args:
-----
in_dim: int, dim of input audio data (batch, length, in_dim)
cond_dim, int, dim of condition feature (batch, length, cond_dim)
wn_num_conv1d: int, number of dilated conv WaveNet blocks
wn_dim_channel: int, dime of the WaveNet residual & skip channels
wn_kernel_size: int, kernel size of the dilated convolution layers
flag_affine: bool, whether use affine or additive transformation?
default True
flag_detjac: bool, whether return the determinant of Jacobian,
default False
y -> split() -> y1, y2 -> concate([y1, (y2+bias) * scale])
When flag_affine == True, y1 -> H() -> scale, bias
When flag_affine == False, y1 -> H() -> bias, scale=1
Here, H() is WaveNet blocks (dilated conv + gated activation)
"""
super(AffineCouplingWaveGlow_legacy, self).__init__()
self.flag_affine = flag_affine
self.flag_detjac = flag_detjac
if in_dim % 2 > 0:
print("AffineCoulingGlow(feat_dim), feat_dim is an odd number?!")
sys.exit(1)
if self.flag_affine:
# scale and bias
self.m_nn_outdim = in_dim // 2 * 2
else:
# only bias
self.m_nn_outdim = in_dim // 2
# pre-transform, change input audio dimension
# only half of the features will be used to produce scale and bias
tmp_l = torch_nn.Linear(in_dim // 2, wn_dim_channel)
# weight normalization
self.m_wn_pre = torch_nn.utils.weight_norm(tmp_l, name='weight')
# WaveNet blocks (dilated conv, gated activation functions)
tmp_wn = []
for i in range(wn_num_conv1d):
dilation = 2 ** i
tmp_wn.append(nii_wavenet.WaveNetBlock_v2(
wn_dim_channel, wn_dim_channel, wn_dim_channel, cond_dim,
dilation, cnn_kernel_size=wn_kernel_size, causal=False))
self.m_wn = torch_nn.ModuleList(tmp_wn)
# post-transform, change dim from WN channel to audio feature
tmp_l = torch_nn.Linear(wn_dim_channel, self.m_nn_outdim)
# For better initialization, bias=0, scale=1 for first mini-batch
tmp_l.weight.data.zero_()
tmp_l.bias.data.zero_()
self.m_wn_post = tmp_l
return
def _detjac(self, log_scale, factor=1):
# (batch, dim1, dim2, ..., feat_dim) -> (batch)
# sum over dim1, ... feat_dim
return nii_glow.sum_over_keep_batch(log_scale / factor)
def _nn_trans(self, y1, cond):
"""_nn_trans(self, y1, cond)
input
-----
y1: tensor, input feature, (batch, lengh, input_dim//2)
cond: tensor, condition feature, (batch, length, cond_dim)
output
------
scale: tensor, (batch, lengh, input_dim // 2)
bias: tensor, (batch, lengh, input_dim // 2)
log_scale: tensor, (batch, lengh, input_dim // 2)
Affine transformaiton can be done by scale * feature + bias
log_scale is used for det Jacobian computation
"""
# pre-transformation (batch, length, in_dim//2)
# -> (batch, length, WN_channel)
y1_trans = self.m_wn_pre(y1)
# WaveNet blocks
wn_output = 0
res_ch = y1_trans
for wn_layer in self.m_wn:
res_ch, ski_ch = wn_layer(res_ch, cond)
wn_output = wn_output + ski_ch / len(self.m_wn)
#wn_output = wn_output + res_ch / len(self.m_wn)
# post-transformation
y1_tmp = self.m_wn_post(wn_output)
if self.flag_affine:
log_scale, bias = y1_tmp.chunk(2, -1)
scale = torch.exp(log_scale)
else:
bias = y1_tmp
scale = torch.ones_like(y1)
log_scale = torch.zeros_like(y1)
return scale, bias, log_scale
def forward(self, y, cond, factor=1):
"""AffineCouplingWaveGlow_legacy.forward(y, cond)
input
-----
y: tensor, input feature, (batch, lengh, input_dim)
cond: tensor, condition feature , (batch, lengh, cond_dim)
output
------
x: tensor, input feature, (batch, lengh, input_dim)
detjac: tensor, det of jacobian, (batch,)
y1, y2 = split(y)
scale, bias = WN(y1)
x2 = y2 * scale + bias or (y2 + bias) * scale
return [y1, x2]
"""
# split
y1, y2 = y.chunk(2, -1)
scale, bias, log_scale = self._nn_trans(y1, cond)
# transform
x1 = y1
x2 = (y2 + bias) * scale
# concatenate
x = torch.cat([x1, x2], dim=-1)
if self.flag_detjac:
return x, self._detjac(log_scale, factor)
else:
return x
def reverse(self, x, cond):
"""AffineCouplingWaveGlow_legacy.reverse(y, cond)
input
-----
x: tensor, input feature, (batch, lengh, input_dim)
cond: tensor, condition feature , (batch, lengh, cond_dim)
output
------
y: tensor, input feature, (batch, lengh, input_dim)
x1, x2 = split(x)
scale, bias = WN(x1)
y2 = x2 / scale - bias
return [x1, y2]
"""
# split
x1, x2 = x.chunk(2, -1)
# reverse transform
y1 = x1
scale, bias, log_scale = self._nn_trans(y1, cond)
y2 = x2 / scale - bias
return torch.cat([y1, y2], dim=-1)
class WaveNetModuleForNonAR(torch_nn.Module):
"""WaveNetModuleWaveGlow
Casecade of multiple WaveNet blocks:
x -> ExpandDim -> conv1 -> gated -> res -> conv1 -> gated -> res ...
^ |
| v
cond skip
output = sum(skip_channels)
"""
def __init__(self, input_dim, cond_dim, out_dim, n_blocks,
gate_dim, res_ch, skip_ch, kernel_size=3):
super(WaveNetModuleForNonAR, self).__init__()
self.m_block_num = n_blocks
self.m_res_ch_dim = res_ch
self.m_skip_ch_dim = skip_ch
self.m_gate_dim = gate_dim
self.m_kernel_size = kernel_size
self.m_n_blocks = n_blocks
if self.m_gate_dim % 2 != 0:
self.m_gate_dim = self.m_gate_dim // 2 * 2
# input dimension expanding
tmp = torch_nn.Conv1d(input_dim, res_ch, 1)
self.l_expand = torch_nn.utils.weight_norm(tmp, name='weight')
# end dimension compressing
tmp = torch_nn.Conv1d(skip_ch, out_dim, 1)
tmp.weight.data.zero_()
tmp.bias.data.zero_()
self.l_compress = tmp
# dilated convolution and residual-skip-channel transformation
self.l_conv1 = []
self.l_resskip = []
for idx in range(n_blocks):
dilation = 2 ** idx
padding = int((kernel_size * dilation - dilation)/2)
conv1 = torch_nn.Conv1d(
res_ch, gate_dim, self.m_kernel_size,
dilation = dilation, padding=padding)
conv1 = torch_nn.utils.weight_norm(conv1, name='weight')
self.l_conv1.append(conv1)
if idx < n_blocks - 1:
outdim = self.m_res_ch_dim + self.m_skip_ch_dim
else:
outdim = self.m_skip_ch_dim
resskip = torch_nn.Conv1d(self.m_gate_dim//2, outdim, 1)
resskip = torch_nn.utils.weight_norm(resskip, name='weight')
self.l_resskip.append(resskip)
self.l_conv1 = torch_nn.ModuleList(self.l_conv1)
self.l_resskip = torch_nn.ModuleList(self.l_resskip)
# a single conditional feature transformation layer
cond_layer = torch_nn.Conv1d(cond_dim, gate_dim * n_blocks, 1)
cond_layer = torch_nn.utils.weight_norm(cond_layer, name='weight')
self.l_cond = cond_layer
return
def forward(self, x, cond):
"""
"""
# input feature expansion
# change the format to (batch, dimension, length)
x_expanded = self.l_expand(x.permute(0, 2, 1))
# condition feature transformation
cond_proc = self.l_cond(cond.permute(0, 2, 1))
# skip-channel accumulation
skip_ch_out = 0
conv_input = x_expanded
for idx, (l_conv1, l_resskip) in \
enumerate(zip(self.l_conv1, self.l_resskip)):
tmp_dim = idx * self.m_gate_dim
# condition feature of this layer
cond_tmp = cond_proc[:, tmp_dim : tmp_dim + self.m_gate_dim, :]
# conv transformed
conv_tmp = l_conv1(conv_input)
# gated activation
gated_tmp = cond_tmp + conv_tmp
t_part = torch.tanh(gated_tmp[:, :self.m_gate_dim//2, :])
s_part = torch.sigmoid(gated_tmp[:, self.m_gate_dim//2:, :])
gated_tmp = t_part * s_part
# transformation into skip / residual channels
resskip_tmp = l_resskip(gated_tmp)
# reschannel
if idx == self.m_n_blocks - 1:
skip_ch_out = skip_ch_out + resskip_tmp
else:
conv_input = conv_input + resskip_tmp[:, 0:self.m_res_ch_dim, :]
skip_ch_out = skip_ch_out + resskip_tmp[:, self.m_res_ch_dim:,:]
output = self.l_compress(skip_ch_out)
# permute back to (batch, length, dimension)
return output.permute(0, 2, 1)
class AffineCouplingWaveGlow(torch_nn.Module):
"""AffineCouplingWaveGlow
AffineCoupling block in WaveGlow
Example:
m_tmp = AffineCouplingWaveGlow(10, 10, 8, 512, 3, True, True)
data1 = torch.randn([2, 100, 10])
cond = torch.randn([2, 100, 10])
output, log_det = m_tmp(data1, cond)
data1_re = m_tmp.reverse(output, cond)
torch.std(data1 - data1_re)
"""
def __init__(self, in_dim, cond_dim,
wn_num_conv1d, wn_dim_channel, wn_kernel_size,
flag_affine=True, flag_detjac=False):
"""AffineCouplingWaveGlow(in_dim, cond_dim,
wn_num_conv1d, wn_dim_channel, wn_kernel_size,
flag_affine=True, flag_detjac=False)
Args:
-----
in_dim: int, dim of input audio data (batch, length, in_dim)
cond_dim, int, dim of condition feature (batch, length, cond_dim)
wn_num_conv1d: int, number of dilated conv WaveNet blocks
wn_dim_channel: int, dime of the WaveNet residual & skip channels
wn_kernel_size: int, kernel size of the dilated convolution layers
flag_affine: bool, whether use affine or additive transformation?
default True
flag_detjac: bool, whether return the determinant of Jacobian,
default False
y -> split() -> y1, y2 -> concate([y1, (y2+bias) * scale])
When flag_affine == True, y1 -> H() -> scale, bias
When flag_affine == False, y1 -> H() -> bias, scale=1
Here, H() is WaveNet blocks (dilated conv + gated activation)
"""
super(AffineCouplingWaveGlow, self).__init__()
self.flag_affine = flag_affine
self.flag_detjac = flag_detjac
if in_dim % 2 > 0:
print("AffineCoulingGlow(feat_dim), feat_dim is an odd number?!")
sys.exit(1)
if self.flag_affine:
# scale and bias
self.m_nn_outdim = in_dim // 2 * 2
else:
# only bias
self.m_nn_outdim = in_dim // 2
# WaveNet blocks (dilated conv, gated activation functions)
self.m_wn = WaveNetModuleForNonAR(
in_dim // 2, cond_dim, self.m_nn_outdim, wn_num_conv1d,
wn_dim_channel * 2, wn_dim_channel, wn_dim_channel,
wn_kernel_size
)
return
def _detjac(self, log_scale, factor=1):
# (batch, dim1, dim2, ..., feat_dim) -> (batch)
# sum over dim1, ... feat_dim
return nii_glow.sum_over_keep_batch(log_scale / factor)
def _nn_trans(self, y1, cond):
"""_nn_trans(self, y1, cond)
input
-----
y1: tensor, input feature, (batch, lengh, input_dim//2)
cond: tensor, condition feature, (batch, length, cond_dim)
output
------
scale: tensor, (batch, lengh, input_dim // 2)
bias: tensor, (batch, lengh, input_dim // 2)
log_scale: tensor, (batch, lengh, input_dim // 2)
Affine transformaiton can be done by scale * feature + bias
log_scale is used for det Jacobian computation
"""
y1_tmp = self.m_wn(y1, cond)
if self.flag_affine:
log_scale, bias = y1_tmp.chunk(2, -1)
scale = torch.exp(log_scale)
else:
bias = y1_tmp
scale = torch.ones_like(y1)
log_scale = torch.zeros_like(y1)
return scale, bias, log_scale
def forward(self, y, cond, factor=1):
"""AffineCouplingWaveGlow.forward(y, cond)
input
-----
y: tensor, input feature, (batch, lengh, input_dim)
cond: tensor, condition feature , (batch, lengh, cond_dim)
output
------
x: tensor, input feature, (batch, lengh, input_dim)
detjac: tensor, det of jacobian, (batch,)
y1, y2 = split(y)
scale, bias = WN(y1)
x2 = y2 * scale + bias or (y2 + bias) * scale
return [y1, x2]
"""
# split
y1, y2 = y.chunk(2, -1)
scale, bias, log_scale = self._nn_trans(y1, cond)
# transform
x1 = y1
x2 = (y2 + bias) * scale
# concatenate
x = torch.cat([x1, x2], dim=-1)
if self.flag_detjac:
return x, self._detjac(log_scale, factor)
else:
return x
def reverse(self, x, cond):
"""AffineCouplingWaveGlow.reverse(y, cond)
input
-----
x: tensor, input feature, (batch, lengh, input_dim)
cond: tensor, condition feature , (batch, lengh, cond_dim)
output
------
y: tensor, input feature, (batch, lengh, input_dim)
x1, x2 = split(x)
scale, bias = WN(x1)
y2 = x2 / scale - bias
return [x1, y2]
"""
# split
x1, x2 = x.chunk(2, -1)
# reverse transform
y1 = x1
scale, bias, log_scale = self._nn_trans(y1, cond)
y2 = x2 / scale - bias
return torch.cat([y1, y2], dim=-1)
class FlowStepWaveGlow(torch_nn.Module):
"""FlowStepWaveGlow
One flow step for waveglow
y -> intertical_1x1() -> AffineCoupling -> x
Example
m_tmp = FlowStepWaveGlow(10, 10, 8, 512, 3, flag_affine=True)
output, log_det = m_tmp(data1, cond)
data1_re = m_tmp.reverse(output, cond)
torch.std(data1 - data1_re)
"""
def __init__(self, in_dim, cond_dim,
wn_num_conv1d, wn_dim_channel, wn_kernel_size, flag_affine,
flag_affine_block_legacy=False):
"""FlowStepWaveGlow(in_dim, cond_dim,
wn_num_conv1d, wn_dim_channel, wn_kernel_size, flag_affine,
flag_affine_block_legacy=False)
Args
----
in_dim: int, input feature dim, (batch, length, in_dim)
cond_dim:, int, conditional feature dim, (batch, length, cond_dim)
wn_num_conv1d: int, number of 1Dconv WaveNet block in this flow step
wn_dim_channel: int, dim of the WaveNet residual and skip channels
wn_kernel_size: int, kernel size of the dilated convolution layers
flag_affine: bool, whether use affine or additive transformation?
default True
flag_affine_block_legacy, bool, whether use AffineCouplingWaveGlow or
AffineCouplingWaveGlow_legacy.
For wn_dim_channel and wn_kernel_size, see AffineCouplingWaveGlow
For flag_affine == False, scale will be 1.0
"""
super(FlowStepWaveGlow, self).__init__()
# Invertible transformation layer
#self.m_invtrans = nii_glow.InvertibleTrans(in_dim, flag_detjac=True)
self.m_invtrans = Invertible1x1ConvWaveGlow(in_dim, flag_detjac=True)
# Coupling layer
if flag_affine_block_legacy:
self.m_coupling = AffineCouplingWaveGlow_legacy(
in_dim, cond_dim, wn_num_conv1d, wn_dim_channel, wn_kernel_size,
flag_affine, flag_detjac=True)
else:
self.m_coupling = AffineCouplingWaveGlow(
in_dim, cond_dim, wn_num_conv1d, wn_dim_channel, wn_kernel_size,
flag_affine, flag_detjac=True)
return
def forward(self, y, cond, factor=1):
"""FlowStepWaveGlow.forward(y, cond, factor=1)
input
-----
y: tensor, input feature, (batch, lengh, input_dim)
cond: tensor, condition feature , (batch, lengh, cond_dim)
factor: int, this is used to divde likelihood, default 1
if we directly sum all detjac, they will become very large
however, we cannot average them directly on y because y
may have a different shape from the actual data y
output
------
x: tensor, input feature, (batch, lengh, input_dim)
detjac: tensor, det of jacobian, (batch,)
"""
# 1x1 transform
x_tmp, log_det_1 = self.m_invtrans(y, factor)
# coupling
x_tmp, log_det_2 = self.m_coupling(x_tmp, cond, factor)
return x_tmp, log_det_1 + log_det_2
def reverse(self, x, cond):
"""FlowStepWaveGlow.reverse(y, cond)
input
-----
x: tensor, input feature, (batch, lengh, input_dim)
cond: tensor, condition feature , (batch, lengh, cond_dim)
output
------
y: tensor, input feature, (batch, lengh, input_dim)
"""
y_tmp = self.m_coupling.reverse(x, cond)
y_tmp = self.m_invtrans.reverse(y_tmp)
return y_tmp
class WaveGlowBlock(torch_nn.Module):
"""WaveGlowBlock
A WaveGlowBlock includes multiple steps of flow.
The Nvidia WaveGlow does not define WaveGlowBlock but directly
defines 12 flow steps. However, after every 4 flow steps, two
dimension of z will be extracted (multi-scale approach).
It is not convenient to decide when to extract z.
Here, we define a WaveGlowBlock as the casecade of multiple flow
steps, and this WaveGlowBlock can extract the two dimensions from
the output of final flow step.
Example:
data1 = torch.randn([2, 10, 10])
cond = torch.randn([2, 10, 16])
m_block = WaveGlowBlock(10, 16, 5, 8, 512, 3)
x, z, log_det = m_block(data1, cond)
data_re = m_block.reverse(x, z, cond)
print(torch.std(data_re - data1))
"""
def __init__(self, in_dim, cond_dim, n_flow_steps,
wn_num_conv1d, wn_dim_channel, wn_kernel_size,
flag_affine=True,
flag_split = False,
flag_final_block=False,
split_dim = 2,
flag_affine_block_legacy=False):
"""WaveGlowBlock(in_dim, cond_dim, n_flow_steps,
wn_num_conv1d, wn_dim_channel, wn_kernel_size,
flag_affine=True, flag_split = False, split_dim = 2,
flag_affine_block_legacy=False)
Args
----
in_dim: int, input feature dim, (batch, length, in_dim)
cond_dim:, int, conditional feature dim, (batch, length, cond_dim)
n_flow_steps: int, number of flow steps in one block
wn_num_conv1d: int, number of dilated conv WaveNet blocks
wn_dim_channel: int, dim of the WaveNet residual and skip channels
wn_kernel_size: int, kernel size of the dilated convolution layers
flag_affine: bool, whether use affine or additive transformation?
default True
flag_split: bool, whether split output z for multi-scale structure
default True
flag_final_block: bool, whether this block is the final block
default False
split_dim: int, if flag_split==True, z[:, :, :split_dim] will be
extracted, z[:, :, split_dim:] can be used for the next
WaveGlowBlock
flag_affine_block_legacy, bool, whether use the legacy implementation
of wavenet-based affine transformaiton layer
default False.
For wn_dim_channel and wn_kernel_size, see AffineCouplingWaveGlow
For flag_affine, see AffineCouplingWaveGlow
"""
super(WaveGlowBlock, self).__init__()
tmp_flows = []
for i in range(n_flow_steps):
tmp_flows.append(
FlowStepWaveGlow(
in_dim, cond_dim,
wn_num_conv1d, wn_dim_channel, wn_kernel_size,
flag_affine, flag_affine_block_legacy))
self.m_flows = torch_nn.ModuleList(tmp_flows)
self.flag_split = flag_split
self.flag_final_block = flag_final_block
self.split_dim = split_dim
if self.flag_split and self.flag_final_block:
print("WaveGlowBlock: flag_split and flag_final_block are True")
print("This is unexpected. Please check model definition")
sys.exit(1)
if self.flag_split and self.split_dim <= 0:
print("WaveGlowBlock: split_dim should be > 0")
sys.exit(1)
return
def forward(self, y, cond, factor=1):
"""x, z, log_detjac = WaveGlowBlock(y)
y -> H() -> [z, x], log_det_jacobian
H() consists of multiple flow steps (1x1conv + AffineCoupling)
input
-----
y: tensor, (batch, length, dim)
cond, tensor, (batch, length, cond_dim)
factor, None or int, this is used to divde likelihood, default 1
output
------
log_detjac: tensor or scalar
if self.flag_split:
x: tensor, (batch, length, in_dim - split_dim),
z: tensor, (batch, length, split_dim),
else:
if self.flag_final_block:
x: None, no input to the next block
z: tensor, (batch, length, dim), for N(z; 0, I)
else:
x: tensor, (batch, length, dim),
z: None, no latent for N(z; 0, I) from this block
concate([x,z]) should have the same size as y
"""
# flows
log_detjac = 0
x_tmp = y
for l_flow in self.m_flows:
x_tmp, log_detjac_tmp = l_flow(x_tmp, cond, factor)
log_detjac = log_detjac + log_detjac_tmp
if self.flag_split:
z = x_tmp[:, :, :self.split_dim]
x = x_tmp[:, :, self.split_dim:]
else:
if self.flag_final_block:
z = x_tmp
x = None
else:
z = None
x = x_tmp
return x, z, log_detjac
def reverse(self, x, z, cond):
"""y = WaveGlowBlock.reverse(x, z, cond)
[z, x] -> H^{-1}() -> y
input
-----
if self.flag_split:
x: tensor, (batch, length, in_dim - split_dim),
z: tensor, (batch, length, split_dim),
else:
if self.flag_final_block:
x: None
z: tensor, (batch, length, in_dim)
else:
x: tensor, (batch, length, in_dim)
z: None
output
------
y: tensor, (batch, length, in_dim)
"""
if self.flag_split:
if x is None or z is None:
print("WaveGlowBlock.reverse: x and z should not be None")
sys.exit(1)
y_tmp = torch.cat([z, x], dim=-1)
else:
if self.flag_final_block:
if z is None:
print("WaveGlowBlock.reverse: z should not be None")
sys.exit(1)
y_tmp = z
else:
if x is None:
print("WaveGlowBlock.reverse: x should not be None")
sys.exit(1)
y_tmp = x
for l_flow in self.m_flows[::-1]:
# affine
y_tmp = l_flow.reverse(y_tmp, cond)
return y_tmp
class WaveGlow(torch_nn.Module):
"""WaveGlow
Example
cond_dim = 4
upsample = 80
num_blocks = 4
num_flows_inblock = 5
wn_num_conv1d = 8
wn_dim_channel = 512
wn_kernel_size = 3
# waveforms of length 1600
wave1 = torch.randn([2, 1600, 1])
# condition feature
cond = torch.randn([2, 1600//upsample, cond_dim])
# model
m_model = nii_waveglow.WaveGlow(
cond_dim, upsample,
num_blocks, num_flows_inblock, wn_num_conv1d,
wn_dim_channel, wn_kernel_size)
# forward computation, neg_log = -(logp + log_detjac)
# neg_log.backward() can be used for backward
z, neg_log, logp, log_detjac = m_model(wave1, cond)
# recover the signal
wave2 = m_model.reverse(z, cond)
# check difference between original wave and recovered wave
print(torch.std(wave1 - wave2))
"""
def __init__(self, cond_dim, upsample_rate,
num_blocks, num_flows_inblock,
wn_num_conv1d, wn_dim_channel, wn_kernel_size,
flag_affine = True,
early_hid_dim=2,
flag_affine_block_legacy=False):
"""WaveGlow(cond_dim, upsample_rate,
num_blocks, num_flows_inblock,
wn_num_conv1d, wn_dim_channel, wn_kernel_size,
flag_affine = True,
early_hid_dim=2,
flag_affine_block_legacy=False)
Args
----
cond_dim:, int, conditional feature dim, (batch, length, cond_dim)
upsample_rate: int, up-sampling rate for condition features
num_blocks: int, number of WaveGlowBlocks
num_flows_inblock: int, number of flow steps in one WaveGlowBlock
wn_num_conv1d: int, number of 1Dconv WaveNet block in this flow step
wn_dim_channel: int, dim of the WaveNet residual and skip channels
wn_kernel_size: int, kernel size of the dilated convolution layers
flag_affine: bool, whether use affine or additive transformation?
default True
early_hid_dim: int, dimension for z_1, z_2 ... , default 2
flag_affine_block_legacy, bool, whether use the legacy implementation
of wavenet-based affine transformaiton layer
default False. The difference is on the WaveNet part
Please configure AffineCouplingWaveGlow and
AffineCouplingWaveGlow_legacy
This model defines:
cond -> upsample/squeeze -> | ------> | --------> |
v v v
y -> squeeze -> WaveGlowBlock -> WGBlock ... WGBlock -> z
|-> z_1 |-> z_2
z_1, z_2, ... are the extracted z from a multi-scale flow structure
concate([z_1, z_2, z]) is expected to be the white Gaussian noise
If early_hid_dim == 0, z_1 and z_2 will not be extracted
"""
super(WaveGlow, self).__init__()
# input is assumed to be waveform
self.m_input_dim = 1
self.m_early_hid_dim = early_hid_dim
# squeeze layer
self.m_squeeze = SqueezeForWaveGlow()
# up-sampling layer
#self.m_upsample = nii_nn.UpSampleLayer(cond_dim, upsample_rate, True)
self.m_upsample = upsampleByTransConv(cond_dim, upsample_rate)
# wavenet-based flow blocks
# squeezed input dimension
squeezed_in_dim = self.m_input_dim * self.m_squeeze.get_squeeze_factor()
# squeezed condition feature dimension
squeezed_cond_dim = cond_dim * self.m_squeeze.get_squeeze_factor()
# save the dimension for get_z_noises
self.m_feat_dim = []
# define blocks
tmp_squeezed_in_dim = squeezed_in_dim
tmp_flow_blocks = []
for i in range(num_blocks):
# if this is not the last block and early_hid_dim >0
flag_split = (i < (num_blocks-1)) and early_hid_dim > 0
flag_final_block = i == (num_blocks-1)
# save the dimension for get_z_noises
if flag_final_block:
self.m_feat_dim.append(tmp_squeezed_in_dim)
else:
self.m_feat_dim.append(early_hid_dim if flag_split else 0)
tmp_flow_blocks.append(
WaveGlowBlock(
tmp_squeezed_in_dim, squeezed_cond_dim, num_flows_inblock,
wn_num_conv1d, wn_dim_channel, wn_kernel_size, flag_affine,
flag_split = flag_split, flag_final_block=flag_final_block,
split_dim = early_hid_dim,
flag_affine_block_legacy = flag_affine_block_legacy))
# multi-scale approach will extract a few dimensions for next flow
# thus, input dimension to the next block will be this
tmp_squeezed_in_dim = tmp_squeezed_in_dim - early_hid_dim
self.m_flowblocks = torch_nn.ModuleList(tmp_flow_blocks)
# done
return
def _normal_lh(self, noise):
# likelihood of normal distribution on the given noise
return -0.5 * np.log(2 * np.pi) - 0.5 * noise ** 2
def forward(self, y, cond):
"""z, neg_logp_y, logp_z, logdet = WaveGlow.forward(y, cond)
cond -> upsample/squeeze -> | ------> | --------> |
v v v
y -> squeeze -> WaveGlowBlock -> WGBlock ... WGBlock -> z
|-> z_1 |-> z_2
input
-----
y: tensor, (batch, waveform_length, 1)
cond: tensor, (batch, cond_length, 1)
output
------
z: list of tensors, [z_1, z_2, ... ,z ] in figure above
neg_logp_y: scalar, - log p(y)
logp_z: scalar, -log N(z), summed over one data sequence, but averaged
over batch.
logdet: scalar, -|det dH(.)/dy|, summed over one data sequence,
but averaged
over batch.
If self.early_hid_dim == 0, z_1, z_2 ... will be None
"""
# Rather than summing the likelihood and divide it by the number of
# data in the final step, we divide this factor from the likelihood
# caculating by each flow step and sum the scaled likelihood.
# Two methods are equivalent, but the latter may prevent numerical
# overflow of the likelihood value for long sentences
factor = np.prod([dim for dim in y.shape])
# waveform squeeze (batch, squeezed_length, squeezed_dim)
y_squeezed = self.m_squeeze(y)
squeezed_dim = y_squeezed.shape[-1]
# condition feature upsampling and squeeze
# (batch, squeezed_length, squeezed_dim_cond)
cond_up_squeezed = self.m_squeeze(self.m_upsample(cond))
# flows
z_bags = []
log_detjac = 0
log_pz = 0
x_tmp = y_squeezed
for m_block in self.m_flowblocks:
x_tmp, z_tmp, log_detjac_tmp = m_block(
x_tmp, cond_up_squeezed, factor)
# accumulate log det jacobian
log_detjac += log_detjac_tmp
# compute N(z; 0, I)
# save z_tmp (even if it is None)
z_bags.append(z_tmp)
# accumulate log_N(z; 0, I) only if it is valid
if z_tmp is not None:
log_pz += nii_glow.sum_over_keep_batch2(
self._normal_lh(z_tmp), factor)
# average over batch and data points
neg_logp_y = -(log_pz + log_detjac).sum()
return z_bags, neg_logp_y, \
log_pz.sum(), log_detjac.sum()
def reverse(self, z_bags, cond):
"""y = WaveGlow.reverse(z_bags, cond)
cond -> upsample/squeeze -> | ------> | --------> |
v v v
y <- unsqueeze <- WaveGlowBlock -> WGBlock ... WGBlock <- z
|<- z_1 |<- z_2
input
-----
z: list of tensors, [z_1, z_2, ... ,z ] in figure above
cond: tensor, (batch, cond_length, 1)
output
------
y: tensor, (batch, waveform_length, 1)
If self.early_hid_dim == 0, z_1, z_2 ... should be None
"""
# condition feature upsampling and squeeze
# (batch, squeezed_length, squeezed_dim_cond)
cond_up_sqe = self.m_squeeze(self.m_upsample(cond))
# initial
y_tmp = None
for z, m_block in zip(z_bags[::-1], self.m_flowblocks[::-1]):
y_tmp = m_block.reverse(y_tmp, z, cond_up_sqe)
y = self.m_squeeze.reverse(y_tmp)
return y
def get_z_noises(self, length, noise_std=0.7, batchsize=1):
"""z_bags = WaveGlow.get_z_noises(length, noise_std=0.7, batchsize=1)
Return a list of random noises for random sampling
input
-----
length: int, length of target waveform (without squeeze)
noise_std: float, std of Gaussian noise, default 0.7
batchsize: int, batch size of this random data, default 1
output
------
z_bags: list of tensors
Shape of tensor in z_bags is decided by WaveGlow configuration.
WaveGlow.reverse(z_bags, cond) can be used to generate waveform
"""
squeeze_length = self.m_squeeze.get_expected_squeeze_length(length)
device = next(self.parameters()).device
z_bags = []
# generate the z for each WaveGlowBlock
for feat_dim in self.m_feat_dim:
if feat_dim is not None and feat_dim > 0:
z_tmp = torch.randn(
[batchsize, squeeze_length, feat_dim],
dtype=nii_io_conf.d_dtype,
device=device)
z_bags.append(z_tmp * noise_std)
else:
z_bags.append(None)
return z_bags
if __name__ == "__main__":
print("Definition of WaveGlow")
|
maml/apps/symbolic/_selectors.py
|
anooptp/maml
| 161 |
65623
|
<reponame>anooptp/maml
"""
Selectors
"""
import inspect
from collections import defaultdict
from itertools import combinations
from typing import List, Optional, Union, Dict, Callable
import numpy as np
from scipy.linalg import lstsq
from scipy.optimize import minimize, NonlinearConstraint
from sklearn.linear_model import LinearRegression
from sklearn.metrics import get_scorer
from joblib import Parallel, delayed
# pylint: disable=R0201
class BaseSelector:
"""
Feature selector. This is meant to work on relatively smaller
number of features
"""
def __init__(self, coef_thres: float = 1e-6, method: str = "SLSQP"):
"""
Base selector
Args:
coef_thres (float): threshold to discard certain coefficents
method (str): optimization methods in scipy.optmize.minimize
"""
self.coef_thres = coef_thres
self.is_fitted = False
self.coef_: Optional[np.ndarray] = None
self.method = method
self.indices: Optional[np.ndarray] = None
def select(self, x: np.ndarray, y: np.ndarray, options: Optional[Dict] = None) -> Optional[np.ndarray]:
"""
Select feature indices from x
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
options (dict): options in the optimizations provided
to scipy.optimize.minimize
Returns: list of int indices
"""
n_data, n_dim = x.shape
options = options or {"maxiter": 1e4, "ftol": 1e-12}
res = minimize(
lambda beta: self.construct_loss(x=x, y=y, beta=beta),
[0] * n_dim,
jac=self.construct_jac(x=x, y=y),
method=self.method,
constraints=self.construct_constraints(x=x, y=y),
options=options,
)
if res.status != 0:
raise RuntimeError(f"Not converged, status {res.status}")
self.is_fitted = True
self.coef_ = res.x
# output coefficient indices that are above certain thresholds
self.indices = np.where(np.abs(self.coef_) > self.coef_thres)[0] # type: ignore
self.coef_[np.where(np.abs(self.coef_) <= self.coef_thres)[0]] = 0.0 # type: ignore
return self.indices
def construct_loss(self, x: np.ndarray, y: np.ndarray, beta: np.ndarray) -> float:
"""
Get loss function from data and tentative coefficients beta
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
beta (np.ndarray): N coefficients
Returns: loss value
"""
raise NotImplementedError
def construct_constraints(
self, x: np.ndarray, y: np.ndarray, beta: Optional[np.ndarray] = None
) -> Optional[Union[Dict, List, NonlinearConstraint]]:
"""
Get constraints dictionary from data, e.g.,
{"func": lambda beta: fun(x, y, beta), "type": "ineq"}
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
beta (np.ndarray): parameter to optimize
Returns: dict of constraints
"""
return None
def construct_jac(self, x: np.ndarray, y: np.ndarray) -> Optional[Callable]:
"""
Jacobian of cost function
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
Returns: Jacobian function
"""
return None
def evaluate(self, x: np.ndarray, y: np.ndarray, metric: str = "neg_mean_absolute_error") -> float:
"""
Evaluate the linear models using x, and y test data
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
metric (str): scorer function, used with
sklearn.metrics.get_scorer
Returns:
"""
metric_func = get_scorer(metric)
lr = LinearRegression(fit_intercept=False)
lr.coef_ = self.coef_[self.indices] # type: ignore
lr.intercept_ = 0
return metric_func(lr, x[:, self.indices], y)
def get_coef(self) -> Optional[np.ndarray]:
"""
Get coefficients
Returns: the coefficients array
"""
return self.coef_
def get_feature_indices(self) -> Optional[np.ndarray]:
"""
Get selected feature indices
Returns:
"""
return self.indices
def predict(self, x: np.ndarray) -> np.ndarray:
"""
Predict the results using sparsified coefficients
Args:
x (np.ndarray): design matrix
Returns:
"""
return x[:, self.indices].dot(self.coef_[self.indices]) # type: ignore
def compute_residual(self, x: np.ndarray, y: np.ndarray) -> np.ndarray:
"""
Compute
Args:
x (np.ndarray): design matrix
y (np.ndarray): target vector
Returns: residual vector
"""
return y - self.predict(x)
@classmethod
def _get_param_names(cls):
init = getattr(cls.__init__, "deprecated_original", cls.__init__)
if init is object.__init__:
return []
init_signature = inspect.signature(init)
parameters = [p for p in init_signature.parameters.values() if p.name != "self" and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_KEYWORD:
raise RuntimeError(
"scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention." % (cls, init_signature)
)
return sorted([p.name for p in parameters])
def get_params(self):
"""
Get params for this selector
Returns: mapping of string to any
parameter names mapped to their values
"""
out = {}
for key in self._get_param_names():
value = getattr(self, key, None)
out[key] = value
return out
def set_params(self, **params):
"""
Set the parameters of this selector
Args:
**params: dict
Selector parametrs
Returns:
self: selector instance
"""
if not params:
# Simple optimization to gain speed (inspect is slow)
return self
valid_params = self.get_params()
nested_params = defaultdict(dict) # grouped by prefix
for key, value in params.items():
key, delim, sub_key = key.partition("__")
if key not in valid_params:
raise ValueError(
"Invalid parameter %s for selector %s. "
"Check the list of available parameters "
"with `estimator.get_params().keys()`." % (key, self)
)
if delim:
nested_params[key][sub_key] = value
else:
setattr(self, key, value)
valid_params[key] = value
for key, sub_params in nested_params.items():
valid_params[key].set_params(**sub_params)
return self
class DantzigSelector(BaseSelector):
"""
Equation 11 in
https://orfe.princeton.edu/~jqfan/papers/06/SIS.pdf
and reference in https://projecteuclid.org/download/pdfview_1/euclid.aos/1201012958
"""
def __init__(self, lambd, sigma=1.0, **kwargs):
"""
Dantzig selector
Args:
lamb: tunable parameter
sigma: standard deviation of the error
"""
self.lambd = lambd
self.sigma = sigma
super().__init__(**kwargs)
def construct_loss(self, x, y, beta) -> float:
"""
Get loss function from data and tentative coefficients beta
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
beta (np.ndarray): N coefficients
Returns: loss value
"""
return np.sum(np.abs(beta)).item()
def construct_jac(self, x: np.ndarray, y: np.ndarray) -> Callable:
"""
Jacobian of cost functions
Args:
x:
y:
Returns:
"""
def _jac(beta):
sign = np.sign(beta)
sign[np.abs(sign) < 0.1] = 1.0
sign *= 30.0 # multiply the gradients to get better convergence
return sign
return _jac
def construct_constraints(
self, x: np.ndarray, y: np.ndarray, beta: Optional[np.ndarray] = None
) -> NonlinearConstraint:
"""
Get constraints dictionary from data, e.g.,
{"func": lambda beta: fun(x, y, beta), "type": "ineq"}
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
beta (np.ndarray): placeholder
Returns: dict of constraints
"""
def _constraint(beta):
return np.linalg.norm(x.T @ (y - x @ beta), np.infty)
def _jac(beta):
vec = x.T @ (y - x @ beta)
max_ind = np.argmax(np.abs(vec))
der = np.zeros_like(vec.ravel())
der[max_ind] = np.sign(vec[max_ind])
return -x.T.dot(x).dot(der)
return NonlinearConstraint(_constraint, -np.infty, self.lambd * self.sigma, jac=_jac)
class PenalizedLeastSquares(BaseSelector):
"""
Penalized least squares. In addition to minimizing the sum of squares loss,
it adds an additional penalty to the coefficients
"""
def construct_loss(self, x: np.ndarray, y: np.ndarray, beta: np.ndarray) -> float:
"""
Construct the loss function. An extra penalty term is added
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
beta (np.ndarray): N coefficients
Returns: sum of errors
"""
n = x.shape[0]
se = 1.0 / (2 * n) * np.sum((y - x.dot(beta)) ** 2) + self.penalty(beta, x=x, y=y)
return se
def _sse_jac(self, x, y, beta):
n = x.shape[0]
return 1.0 / n * (y - x.dot(beta)).T.dot(-x)
def _penalty_jac(self, x, y, beta):
return 0.0
def construct_jac(self, x: np.ndarray, y: np.ndarray):
"""
Construct the jacobian of loss function
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
Returns: jacobian vector
"""
def _jac(beta):
return self._sse_jac(x, y, beta) + self._penalty_jac(x, y, beta)
return _jac
def construct_constraints(
self, x: np.ndarray, y: np.ndarray, beta: Optional[np.ndarray] = None
) -> List[Optional[Dict]]:
"""
No constraints
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
beta (np.ndarray): placeholder only
Returns: a list of dictionary constraints
"""
return []
def penalty(self, beta: np.ndarray, x: Optional[np.ndarray] = None, y: Optional[np.ndarray] = None) -> float:
"""
Calculate the penalty from input x, output y and coefficient beta
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
beta (np.ndarray): N coefficients
Returns: penalty value
"""
return 0.0
class SCAD(PenalizedLeastSquares):
"""
Smoothly clipped absolute deviation (SCAD),
equation 12 and 13 in https://orfe.princeton.edu/~jqfan/papers/06/SIS.pdf
"""
def __init__(self, lambd: Union[float, np.ndarray], a: float = 3.7, **kwargs):
"""
Smoothly clipped absolute deviation.
Args:
lambd (float or list of floats): The weights for the penalty
a (float): hyperparameter in SCAD penalty
"""
self.lambd = lambd
self.a = a
super().__init__(**kwargs)
def penalty(self, beta: np.ndarray, x: Optional[np.ndarray] = None, y: Optional[np.ndarray] = None) -> float:
"""
Calculate the SCAD penalty from input x, output y
and coefficient beta
Args:
beta (np.ndarray): N coefficients
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
Returns: penalty value
"""
beta_abs = np.abs(beta)
penalty = (
self.lambd * beta_abs * (beta_abs <= self.lambd)
+ -(beta_abs ** 2 - 2 * self.a * self.lambd * beta_abs + self.lambd ** 2)
/ (2 * (self.a - 1))
* (beta_abs > self.lambd)
* (beta_abs <= self.a * self.lambd)
+ (self.a + 1) * self.lambd ** 2 / 2.0 * (beta_abs > self.a * self.lambd)
)
return np.sum(penalty).item()
def _penalty_jac(self, x, y, beta):
beta = np.abs(beta)
z = self.a * self.lambd - beta
z[z < 0] = 0
return self.lambd * (beta <= self.lambd + z / ((self.a - 1) * self.lambd) * (beta > self.lambd))
class Lasso(PenalizedLeastSquares):
"""
Simple Lasso regression
"""
def __init__(self, lambd, **kwargs):
"""
Lasso regression with lambda * norm_1(beta) as penalty
Args:
lambd (float): weights for the penalty
**kwargs:
"""
self.lambd = lambd
super().__init__(**kwargs)
def penalty(self, beta: np.ndarray, x: Optional[np.ndarray] = None, y: Optional[np.ndarray] = None) -> float:
"""
Calculate the penalty from input x, output y and coefficient beta
Args:
beta (np.ndarray): N coefficients
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
Returns: penalty value
"""
beta_abs = np.abs(beta)
return np.sum(self.lambd * beta_abs).item()
def _penalty_jac(self, x, y, beta):
sign = np.sign(beta)
sign[np.abs(sign) < 0.2] = 1
return self.lambd * sign
class AdaptiveLasso(PenalizedLeastSquares):
"""
Adaptive lasso regression using OLS coefficients
as the root-n estimator coefficients
"""
def __init__(self, lambd, gamma, **kwargs):
"""
Adaptive lasso regression
Args:
lambd (float or list of floats):
gamma (float): exponential for hat(beta)
**kwargs:
"""
self.lambd = lambd
self.gamma = gamma
self.w = 1
super().__init__(**kwargs)
def select(self, x, y, options=None) -> Optional[np.ndarray]:
"""
Select feature indices from x
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
options (dict): options in the optimizations provided
to scipy.optimize.minimize
Returns: list of int indices
"""
self.w = self.get_w(x, y)
return super().select(x, y, options)
def get_w(self, x, y) -> np.ndarray:
"""
Get adaptive weights from data
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
Returns: coefficients array
"""
beta_hat = lstsq(x, y)[0]
w = 1.0 / np.abs(beta_hat) ** self.gamma
return w
def penalty(self, beta: np.ndarray, x: Optional[np.ndarray] = None, y: Optional[np.ndarray] = None) -> float:
"""
Calculate the penalty from input x, output y and coefficient beta
Args:
beta (np.ndarray): N coefficients
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
Returns: penalty value
"""
return np.sum(self.lambd * self.w * np.abs(beta)).item()
def _penalty_jac(self, x, y, beta):
sign = np.sign(beta)
sign[np.abs(sign) < 0.2] = 1
return self.lambd * self.w * sign
class L0BrutalForce(BaseSelector):
"""
Brutal force combinatorial screening of features.
This method takes all possible combinations of features
and optimize the following loss function
1/2 * mean((y-x @ beta)**2) + lambd * |beta|_0
"""
def __init__(self, lambd: float, **kwargs):
"""
Initialization of L0 optimization
Args:
lambd (float): penalty term
**kwargs:
"""
self.lambd = lambd
super().__init__(**kwargs)
def select(
self, x: np.ndarray, y: np.ndarray, options: Optional[Dict] = None, n_job: int = 1
) -> Optional[np.ndarray]:
"""
L0 combinatorial optimization
Args:
x (np.ndarray): design matrix
y (np.ndarray): target vector
options:
n_job (int): number of cpu
Returns:
"""
n, p = x.shape
index_array = list(range(p))
def _lstsq(c):
x_comb = x[:, c]
beta = lstsq(x_comb, y)[0]
res = 1.0 / 2 * np.mean((x_comb.dot(beta) - y) ** 2)
penalty = self.lambd * len(c)
res += penalty
return res
indices = []
for p_temp in range(1, p + 1):
for comb in combinations(index_array, p_temp):
indices.append(comb)
loss = Parallel(n_jobs=n_job)(delayed(_lstsq)(comb) for comb in indices)
argmin = np.argmin(loss)
self.indices = np.array(indices[argmin])
x_temp = x[:, self.indices]
self.coef_ = np.zeros_like(x[0, :])
self.coef_[self.indices] = lstsq(x_temp, y)[0] # type: ignore
return self.indices
|
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/genshi.py
|
brianherrera/lumberyard
| 1,738 |
65629
|
from __future__ import absolute_import, division, unicode_literals
from genshi.core import QName
from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT
from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT
from . import base
from ..constants import voidElements, namespaces
class TreeWalker(base.TreeWalker):
def __iter__(self):
# Buffer the events so we can pass in the following one
previous = None
for event in self.tree:
if previous is not None:
for token in self.tokens(previous, event):
yield token
previous = event
# Don't forget the final event!
if previous is not None:
for token in self.tokens(previous, None):
yield token
def tokens(self, event, next):
kind, data, _ = event
if kind == START:
tag, attribs = data
name = tag.localname
namespace = tag.namespace
converted_attribs = {}
for k, v in attribs:
if isinstance(k, QName):
converted_attribs[(k.namespace, k.localname)] = v
else:
converted_attribs[(None, k)] = v
if namespace == namespaces["html"] and name in voidElements:
for token in self.emptyTag(namespace, name, converted_attribs,
not next or next[0] != END or
next[1] != tag):
yield token
else:
yield self.startTag(namespace, name, converted_attribs)
elif kind == END:
name = data.localname
namespace = data.namespace
if namespace != namespaces["html"] or name not in voidElements:
yield self.endTag(namespace, name)
elif kind == COMMENT:
yield self.comment(data)
elif kind == TEXT:
for token in self.text(data):
yield token
elif kind == DOCTYPE:
yield self.doctype(*data)
elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS,
START_CDATA, END_CDATA, PI):
pass
else:
yield self.unknown(kind)
|
configs/fcn/fcn_r50-d8_480x480_40k_pascal_context.py
|
weiyx16/mmsegmentation
| 367 |
65650
|
<filename>configs/fcn/fcn_r50-d8_480x480_40k_pascal_context.py
_base_ = [
'../_base_/models/fcn_r50-d8.py', '../_base_/datasets/pascal_context.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
]
model = dict(
decode_head=dict(num_classes=60),
auxiliary_head=dict(num_classes=60),
test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320)))
optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001)
|
tensorflow_probability/python/distributions/joint_distribution_auto_batched_test.py
|
jakee417/probability-1
| 3,670 |
65651
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for the JointDistributionAutoBatched."""
import collections
import os
# Dependency imports
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util
tfb = tfp.bijectors
tfd = tfp.distributions
JAX_MODE = False
Root = tfd.JointDistributionCoroutineAutoBatched.Root
@test_util.test_all_tf_execution_regimes
class JointDistributionAutoBatchedTest(test_util.TestCase):
@parameterized.named_parameters(
{'testcase_name': 'coroutine',
'jd_class': tfd.JointDistributionCoroutineAutoBatched},
{'testcase_name': 'sequential',
'jd_class': tfd.JointDistributionSequentialAutoBatched},
{'testcase_name': 'named',
'jd_class': tfd.JointDistributionNamedAutoBatched})
def test_batch_and_event_shape_with_plate(self, jd_class):
models = {}
def coroutine_model():
g = yield tfd.LogNormal(0., 1.)
df = yield tfd.Exponential(1.)
loc = yield tfd.Sample(tfd.Normal(0, g), 20)
yield tfd.StudentT(tf.expand_dims(df, -1), loc, 1)
models[tfd.JointDistributionCoroutineAutoBatched] = coroutine_model
models[tfd.JointDistributionSequentialAutoBatched] = [
tfd.LogNormal(0., 1.),
tfd.Exponential(1.),
lambda _, g: tfd.Sample(tfd.Normal(0, g), 20),
lambda loc, df: tfd.StudentT(tf.expand_dims(df, -1), loc, 1)
]
models[tfd.JointDistributionNamedAutoBatched] = collections.OrderedDict((
('g', tfd.LogNormal(0., 1.)),
('df', tfd.Exponential(1.)),
('loc', lambda g: tfd.Sample(tfd.Normal(0, g), 20)),
('x', lambda loc, df: tfd.StudentT(tf.expand_dims(df, -1), loc, 1))))
joint = jd_class(models[jd_class], validate_args=True)
# Properties `event_shape` and `batch_shape` should be defined
# even before any sampling calls have occurred.
self.assertAllEqual(joint._model_flatten(joint.event_shape),
[[], [], [20], [20]])
self.assertAllEqual(joint.batch_shape, [])
is_scalar = joint._model_flatten(joint.is_scalar_event())
self.assertAllEqual(is_scalar[0], True)
self.assertAllEqual(is_scalar[1], True)
self.assertAllEqual(is_scalar[2], False)
self.assertAllEqual(is_scalar[3], False)
event_shape = joint._model_flatten(joint.event_shape_tensor())
self.assertAllEqual(event_shape[0], [])
self.assertAllEqual(event_shape[1], [])
self.assertAllEqual(event_shape[2], [20])
self.assertAllEqual(event_shape[3], [20])
self.assertEqual(joint.is_scalar_batch(), True)
batch_shape = joint.batch_shape_tensor()
self.assertAllEqual(batch_shape, [])
@parameterized.named_parameters(
*(dict( # pylint: disable=g-complex-comprehension
testcase_name=jd_type + '_' + sampler_type,
jd_class=getattr(tfd, 'JointDistribution' + jd_type + 'AutoBatched'),
sampler_type=sampler_type)
for jd_type in ('Coroutine', 'Sequential', 'Named')
for sampler_type in ('stateful', 'stateless')))
def test_model_with_nontrivial_batch_shape(self, jd_class, sampler_type):
models = {}
def coroutine_model():
g = yield tfd.LogNormal(0., [1., 2.])
df = yield tfd.Exponential([1., 2.])
loc = yield tfd.Sample(tfd.Normal(0, g), 20)
yield tfd.StudentT(tf.expand_dims(df, -1), loc, 1)
models[tfd.JointDistributionCoroutineAutoBatched] = coroutine_model
models[tfd.JointDistributionSequentialAutoBatched] = [
tfd.LogNormal(0., [1., 2.]),
tfd.Exponential([1., 2.]),
lambda _, g: tfd.Sample(tfd.Normal(0, g), 20),
lambda loc, df: tfd.StudentT(tf.expand_dims(df, -1), loc, 1)
]
models[tfd.JointDistributionNamedAutoBatched] = collections.OrderedDict((
('g', tfd.LogNormal(0., [1., 2.])),
('df', tfd.Exponential([1., 2.])),
('loc', lambda g: tfd.Sample(tfd.Normal(0, g), 20)),
('x', lambda loc, df: tfd.StudentT(tf.expand_dims(df, -1), loc, 1))))
joint = jd_class(models[jd_class], batch_ndims=1, validate_args=True)
self.assertAllEqual(joint._model_flatten(joint.event_shape),
[[], [], [20], [20]])
self.assertAllEqual(joint.batch_shape, [2])
is_scalar = joint._model_flatten(joint.is_scalar_event())
self.assertAllEqual(is_scalar[0], True)
self.assertAllEqual(is_scalar[1], True)
self.assertAllEqual(is_scalar[2], False)
self.assertAllEqual(is_scalar[3], False)
self.assertAllEqual(joint.is_scalar_batch(), False)
batch_shape = self.evaluate(joint.batch_shape_tensor())
self.assertAllEqual(batch_shape, [2])
x = joint.sample([5], seed=test_util.test_seed(sampler_type=sampler_type))
lp = self.evaluate(joint.log_prob(x))
self.assertAllEqual(lp.shape, [5, 2])
def test_model_with_dynamic_batch_ndims(self):
if tf.executing_eagerly():
self.skipTest('Dynamic shape.')
def coroutine_model():
g = yield tfd.LogNormal(0., [1., 2.])
df = yield tfd.Exponential([1., 2.])
loc = yield tfd.Sample(tfd.Normal(0, g), 20)
yield tfd.StudentT(tf.expand_dims(df, -1), loc, 1)
joint = tfd.JointDistributionCoroutineAutoBatched(
coroutine_model,
batch_ndims=tf1.placeholder_with_default(1, shape=[]),
validate_args=True)
batch_shape_tensor = self.evaluate(joint.batch_shape_tensor())
self.assertAllEqual(batch_shape_tensor, [2])
event_shape_tensor = self.evaluate(joint.event_shape_tensor())
self.assertAllEqual(event_shape_tensor[0], [])
self.assertAllEqual(event_shape_tensor[1], [])
self.assertAllEqual(event_shape_tensor[2], [20])
self.assertAllEqual(event_shape_tensor[3], [20])
self.assertAllEqual(joint.batch_shape, tf.TensorShape(None))
self.assertAllEqual(joint._model_flatten(joint.event_shape),
[tf.TensorShape(None)] * 4)
x = joint.sample([5], seed=test_util.test_seed(sampler_type='stateless'))
lp = self.evaluate(joint.log_prob(x))
self.assertAllEqual(lp.shape, [5, 2])
@parameterized.named_parameters(
{'testcase_name': 'coroutine',
'base_jd_class': tfd.JointDistributionCoroutine,
'jda_class': tfd.JointDistributionCoroutineAutoBatched},
{'testcase_name': 'sequential',
'base_jd_class': tfd.JointDistributionSequential,
'jda_class': tfd.JointDistributionSequentialAutoBatched},
{'testcase_name': 'named',
'base_jd_class': tfd.JointDistributionNamed,
'jda_class': tfd.JointDistributionNamedAutoBatched})
def test_broadcast_ragged_batch_shape(self, base_jd_class, jda_class):
base_jd_models = {}
# Writing a JDC with ragged batch shape will broadcast the first
# distribution over the second.
# (though note, this model breaks `log_prob` with nontrivial sample shape).
def coroutine():
x = yield Root(tfd.Normal(0., scale=1.))
yield tfd.Normal(x[..., tf.newaxis], [1., 2., 3., 4., 5.])
base_jd_models[tfd.JointDistributionCoroutine] = coroutine
base_jd_models[tfd.JointDistributionSequential] = [
tfd.Normal(0., scale=1.),
lambda x: tfd.Normal(x[..., tf.newaxis], [1., 2., 3., 4., 5.])
]
base_jd_models[tfd.JointDistributionNamed] = {
'x': tfd.Normal(0., scale=1.),
'y': lambda x: tfd.Normal(x[..., tf.newaxis], [1., 2., 3., 4., 5.])
}
# But we can get equivalent behavior in a JDCA by expanding dims so that
# the batch dimensions line up.
jd_auto_models = {}
def coroutine_auto():
x = yield tfd.Normal(0., scale=[1.])
yield tfd.Normal(x, [1., 2., 3., 4., 5.])
jd_auto_models[tfd.JointDistributionCoroutineAutoBatched] = coroutine_auto
jd_auto_models[tfd.JointDistributionSequentialAutoBatched] = [
tfd.Normal(0., scale=[1.]),
lambda x: tfd.Normal(x, [1., 2., 3., 4., 5.])
]
jd_auto_models[tfd.JointDistributionNamedAutoBatched] = (
collections.OrderedDict((
('x', tfd.Normal(0., scale=[1.])),
('y', lambda x: tfd.Normal(x, [1., 2., 3., 4., 5.])))))
# Writing a JD with ragged batch shape will broadcast the first
# distribution over the second.
# (though note, this model breaks `log_prob` with nontrivial sample shape).
jd_broadcasting = base_jd_class(base_jd_models[base_jd_class])
# This model's broadcasting behavior is a footgun (it can break inference
# routines and cause silently incorrect optimization); it should be
# disallowed by `validate_args`.
with self.assertRaisesRegexp(
Exception,
('Component batch shapes are inconsistent|'
'Broadcasting probably indicates an error in model specification')):
jda_invalid = jda_class(jd_auto_models[jda_class],
batch_ndims=1, validate_args=True)
_ = self.evaluate(jda_invalid.log_prob(
jda_invalid.sample(seed=test_util.test_seed())))
# But, if the user wants to run with no guardrails, one can eke out
# performance wins when evaluating a shared value over multiple models.
jda_broadcasting = jda_class(jd_auto_models[jda_class], batch_ndims=1)
self.assertAllEqual(
jda_broadcasting._model_flatten(jda_broadcasting.event_shape),
[[], []])
self.assertAllEqual(jda_broadcasting.batch_shape, [5])
joint_sample = jda_broadcasting.sample(seed=test_util.test_seed())
x_sample, y_sample = self.evaluate(
list(joint_sample.values()) if hasattr(joint_sample, 'values')
else joint_sample)
# The model samples only a single value for x, shared across the batch.
self.assertAllEqual(x_sample.shape, [1])
self.assertAllEqual(y_sample.shape, [5])
lp_jd_broadcast = self.evaluate(jd_broadcasting.log_prob(
jd_broadcasting._model_unflatten([x_sample[..., 0], y_sample])))
lp_jda_broadcast = self.evaluate(jda_broadcasting.log_prob(
jda_broadcasting._model_unflatten([x_sample, y_sample])))
self.assertAllEqual(lp_jda_broadcast.shape, [5])
self.assertAllEqual(lp_jd_broadcast, lp_jda_broadcast)
# Try drawing multiple samples and computing log-prob.
joint_sample = self.evaluate(jda_broadcasting.sample(
[2, 3], seed=test_util.test_seed()))
lp_jda_broadcast = self.evaluate(jda_broadcasting.log_prob(joint_sample))
self.assertAllEqual(lp_jda_broadcast.shape, [2, 3, 5])
@parameterized.named_parameters(
{'testcase_name': 'coroutine',
'jd_class': tfd.JointDistributionCoroutineAutoBatched},
{'testcase_name': 'sequential',
'jd_class': tfd.JointDistributionSequentialAutoBatched},
{'testcase_name': 'named',
'jd_class': tfd.JointDistributionNamedAutoBatched})
def test_log_prob_and_prob_with_plate(self, jd_class):
models = {}
def coroutine_model():
a = yield tfd.Bernoulli(probs=0.5, dtype=tf.float32)
b = yield tfd.Sample(tfd.Bernoulli(probs=0.25 + 0.5*a,
dtype=tf.float32), 2)
yield tfd.Normal(loc=a, scale=1. + b)
models[tfd.JointDistributionCoroutineAutoBatched] = coroutine_model
models[tfd.JointDistributionSequentialAutoBatched] = [
tfd.Bernoulli(probs=0.5, dtype=tf.float32),
lambda a: tfd.Sample(tfd.Bernoulli( # pylint: disable=g-long-lambda
probs=0.25 + 0.5*a, dtype=tf.float32), 2),
lambda b, a: tfd.Normal(loc=a, scale=1. + b)
]
models[tfd.JointDistributionNamedAutoBatched] = collections.OrderedDict((
('a', tfd.Bernoulli(probs=0.5, dtype=tf.float32)),
('b', lambda a: tfd.Sample(tfd.Bernoulli( # pylint: disable=g-long-lambda
probs=0.25 + 0.5*a, dtype=tf.float32), 2)),
('c', lambda b, a: tfd.Normal(loc=a, scale=1. + b))))
joint = jd_class(models[jd_class], validate_args=True)
z = self.evaluate(joint.sample(seed=test_util.test_seed()))
a, b, c = z.values() if hasattr(z, 'values') else z
log_prob = self.evaluate(joint.log_prob(z))
prob = self.evaluate(joint.prob(z))
expected_log_prob = self.evaluate(
np.log(0.5) +
tf.reduce_sum(tf.math.log(b * (0.25 + 0.5 * a) +
(1 - b) * (0.75 - 0.5 * a))) +
tf.reduce_sum(-0.5 * ((c - a) / (1. + b))**2 -
0.5 * np.log(2. * np.pi) -
tf.math.log((1. + b))))
self.assertAllClose(log_prob, expected_log_prob)
self.assertAllClose(prob, np.exp(expected_log_prob))
@parameterized.named_parameters(
{'testcase_name': 'coroutine',
'jd_class': tfd.JointDistributionCoroutineAutoBatched},
{'testcase_name': 'sequential',
'jd_class': tfd.JointDistributionSequentialAutoBatched},
{'testcase_name': 'named',
'jd_class': tfd.JointDistributionNamedAutoBatched})
def test_log_prob_multiple_samples(self, jd_class):
models = {}
def coroutine_model():
a = yield tfd.Bernoulli(probs=0.5, dtype=tf.float32)
b = yield tfd.Bernoulli(probs=0.25 + 0.5*a,
dtype=tf.float32)
yield tfd.Normal(loc=a, scale=1. + b)
models[tfd.JointDistributionCoroutineAutoBatched] = coroutine_model
models[tfd.JointDistributionSequentialAutoBatched] = [
tfd.Bernoulli(probs=0.5, dtype=tf.float32),
lambda a: tfd.Bernoulli(probs=0.25 + 0.5*a, dtype=tf.float32),
lambda b, a: tfd.Normal(loc=a, scale=1. + b)
]
models[tfd.JointDistributionNamedAutoBatched] = collections.OrderedDict((
('a', tfd.Bernoulli(probs=0.5, dtype=tf.float32)),
('b', lambda a: tfd.Bernoulli(probs=0.25 + 0.5*a, dtype=tf.float32)),
('c', lambda b, a: tfd.Normal(loc=a, scale=1. + b))))
joint = jd_class(models[jd_class], validate_args=True)
z = joint.sample(4, seed=test_util.test_seed())
log_prob = joint.log_prob(z)
a, b, c = z.values() if hasattr(z, 'values') else z # pylint: disable=unbalanced-tuple-unpacking
expected_log_prob = (
np.log(0.5) +
tf.math.log(b * (0.25 + 0.5 * a) +
(1 - b) * (0.75 -0.5 * a)) +
-0.5 * ((c - a) / (1. + b)) ** 2 -
0.5 * np.log(2. * np.pi) -
tf.math.log((1. + b)))
self.assertAllClose(*self.evaluate([log_prob, expected_log_prob]))
@parameterized.named_parameters(
{'testcase_name': 'coroutine',
'jd_class': tfd.JointDistributionCoroutineAutoBatched},
{'testcase_name': 'sequential',
'jd_class': tfd.JointDistributionSequentialAutoBatched},
{'testcase_name': 'named',
'jd_class': tfd.JointDistributionNamedAutoBatched})
def test_sample_and_log_prob(self, jd_class):
# Define a bijector to detect if/when `inverse` is called.
inverted_values = []
class InverseTracingExp(tfb.Exp):
def _inverse(self, y):
inverted_values.append(y)
return tf.math.log(y)
models = {}
def coroutine_model():
g = yield InverseTracingExp()(tfd.Normal(0., 1.), name='g')
df = yield tfd.Exponential(1., name='df')
loc = yield tfd.Sample(tfd.Normal(0, g), 20, name='loc')
yield tfd.StudentT(df, loc, 1, name='x')
models[tfd.JointDistributionCoroutineAutoBatched] = coroutine_model
models[tfd.JointDistributionSequentialAutoBatched] = [
InverseTracingExp()(tfd.Normal(0., 1.), name='g'),
tfd.Exponential(1., name='df'),
lambda _, g: tfd.Sample(tfd.Normal(0, g), 20, name='loc'),
lambda loc, df: tfd.StudentT(df, loc, 1, name='x')
]
models[tfd.JointDistributionNamedAutoBatched] = collections.OrderedDict((
('g', InverseTracingExp()(tfd.Normal(0., 1.))),
('df', tfd.Exponential(1.)),
('loc', lambda g: tfd.Sample(tfd.Normal(0, g), 20)),
('x', lambda loc, df: tfd.StudentT(df, loc, 1))))
joint = jd_class(models[jd_class], validate_args=True)
seed = test_util.test_seed(sampler_type='stateless')
for sample_shape in ([], [5]):
inverted_values.clear()
x1, lp1 = self.evaluate(
joint.experimental_sample_and_log_prob(
sample_shape,
seed=seed,
df=2.7)) # Check that kwargs are supported.
x2 = self.evaluate(
joint.sample(sample_shape, seed=seed, df=2.7))
self.assertAllCloseNested(x1, x2)
self.assertLen(inverted_values, 0)
lp2 = joint.log_prob(x1)
self.assertLen(inverted_values, 1)
self.assertAllClose(lp1, lp2)
@test_util.jax_disable_test_missing_functionality('b/157594634')
def test_sample_distributions(self):
def coroutine_model():
g = yield tfd.Normal(0., 1., name='g')
df = yield tfd.Exponential(1., name='df')
loc = yield tfd.Normal(tf.zeros([20]), g, name='loc')
yield tfd.StudentT(df, loc, 1, name='x')
joint = tfd.JointDistributionCoroutineAutoBatched(coroutine_model)
ds, xs = joint.sample_distributions([4, 3], seed=test_util.test_seed())
for d, x in zip(ds, xs):
self.assertGreaterEqual(len(d.batch_shape), 2)
lp = d.log_prob(x)
self.assertAllEqual(lp.shape[:2], [4, 3])
@test_util.jax_disable_test_missing_functionality('b/201586404')
def test_sample_distributions_not_composite_tensor_raises_error(self):
def coroutine_model():
yield tfd.TransformedDistribution(tfd.Normal(0., 1.),
tfb.Exp(),
name='td')
joint = tfd.JointDistributionCoroutineAutoBatched(coroutine_model)
# Sampling with trivial sample shape avoids the vmap codepath.
ds, _ = joint.sample_distributions([], seed=test_util.test_seed())
self.assertIsInstance(ds[0], tfd.TransformedDistribution)
with self.assertRaisesRegex(
TypeError, r'Some component distribution\(s\) cannot be returned'):
joint.sample_distributions([4, 3], seed=test_util.test_seed())
def test_sample_with_batch_value(self):
@tfd.JointDistributionCoroutineAutoBatched
def dist():
a = yield tfd.Sample(tfd.Normal(0, 1.), 2)
b = yield tfd.Sample(tfd.Normal(0, 1.), 3)
# The following line fails if not autovectorized.
yield tfd.Normal(a[tf.newaxis, ...] * b[..., tf.newaxis], 1.)
x = self.evaluate(dist.sample(123, seed=test_util.test_seed()))
x2 = self.evaluate(dist.sample(value=x, seed=test_util.test_seed()))
self.assertAllCloseNested(x, x2)
# Also test a dict-type value (JDNamed).
dist = tfd.JointDistributionNamedAutoBatched({
'a': tfd.Sample(tfd.Normal(0, 1.), 2),
'b': tfd.Sample(tfd.Normal(0, 1.), 3),
'c': lambda a, b: tfd.Normal( # pylint: disable=g-long-lambda
a[tf.newaxis, ...] * b[..., tf.newaxis], 1.)})
x = self.evaluate(dist.sample(123, seed=test_util.test_seed()))
x2 = self.evaluate(dist.sample(value=x, seed=test_util.test_seed()))
self.assertAllCloseNested(x, x2)
def test_sample_with_value_as_kwarg(self):
@tfd.JointDistributionCoroutineAutoBatched
def dist():
a = yield tfd.Sample(tfd.Normal(0, 1.), 2, name='a')
b = yield tfd.Sample(tfd.Normal(0, 1.), 3, name='b')
# The following line fails if not autovectorized.
yield tfd.Normal(a[tf.newaxis, ...] * b[..., tf.newaxis], 1., name='c')
x = self.evaluate(dist.sample(4, seed=test_util.test_seed()))
x2 = self.evaluate(dist.sample(seed=test_util.test_seed(), a=x.a))
self.assertAllClose(x.a, x2.a)
self.assertAllEqual(x2.b.shape, [4, 3])
self.assertAllEqual(x2.c.shape, [4, 3, 2])
@parameterized.named_parameters(
dict(testcase_name='stateful', sampler_type='stateful'),
dict(testcase_name='stateless', sampler_type='stateless'))
def test_sample_with_partially_specified_value(self, sampler_type):
num_features = 5
def dist():
scale_variance = yield tfd.InverseGamma(0.5, 0.5)
scale_noncentered = yield tfd.Sample(tfd.HalfNormal(1.), num_features)
scale = scale_noncentered * scale_variance[..., None]**0.5
weights_noncentered = yield tfd.Sample(tfd.Normal(0., 1.), num_features)
yield tfd.Deterministic(weights_noncentered * scale)
joint = tfd.JointDistributionCoroutineAutoBatched(dist, validate_args=True)
value_partial_batch_dim = 4
value_ = (3.,
None,
None,
np.ones([value_partial_batch_dim, num_features]))
value = [None if v is None else tf.cast(v, tf.float32) for v in value_]
# The sample should keep the specified values.
xs = self.evaluate(
joint.sample(
value=value, seed=test_util.test_seed(sampler_type=sampler_type)))
self.assertAllEqual(xs[0], tf.fill([value_partial_batch_dim], value[0]))
self.assertAllEqual(xs[1].shape, [value_partial_batch_dim, num_features])
self.assertAllEqual(xs[2].shape, [value_partial_batch_dim, num_features])
self.assertAllEqual(xs[3], value[3])
# With sample shape.
sample_shape = [6, 2]
samples = joint.sample(sample_shape, value=value,
seed=test_util.test_seed(sampler_type=sampler_type))
xs = self.evaluate(samples)
expect_shp = sample_shape + [value_partial_batch_dim, num_features]
self.assertAllEqual(
xs[0], tf.fill(sample_shape + [value_partial_batch_dim], value[0]))
self.assertAllEqual(xs[1].shape, expect_shp)
self.assertAllEqual(xs[2].shape, expect_shp)
self.assertAllEqual(xs[3], value[3] * tf.ones(expect_shp))
sample_shape_dynamic = tf1.placeholder_with_default(
sample_shape, shape=None)
samples = joint.sample(sample_shape_dynamic, value=value,
seed=test_util.test_seed(sampler_type=sampler_type))
xs = self.evaluate(samples)
self.assertAllEqual(
xs[0], tf.fill(sample_shape + [value_partial_batch_dim], value[0]))
self.assertAllEqual(xs[1].shape, expect_shp)
self.assertAllEqual(xs[2].shape, expect_shp)
self.assertAllEqual(xs[3], value[3] * tf.ones(expect_shp))
@parameterized.named_parameters(
dict(testcase_name='stateful', sampler_type='stateful'),
dict(testcase_name='stateless', sampler_type='stateless'))
def test_sample_with_prefix_of_values(self, sampler_type):
num_rows = 4
num_columns = 5
def dist():
a = yield tfd.Sample(tfd.Normal(0., 1.), num_rows, name='a')
b = yield tfd.Sample(tfd.Normal(0., 1.), num_columns, name='b')
yield tfd.Normal(a[..., None] * b[None, ...], 1., name='c')
tuple_joint = tfd.JointDistributionCoroutineAutoBatched(
dist, validate_args=True)
namedtuple_joint = tfd.JointDistributionCoroutineAutoBatched(
dist,
sample_dtype=collections.namedtuple(
'ModelSpec', ['a', 'b', 'c'])(
a=tf.float32, b=tf.float32, c=tf.float32),
validate_args=True)
value_partial_batch_dim = 3
v0 = 3. * np.ones([value_partial_batch_dim, num_rows]).astype(np.float32)
# Tuple (or namedtuple) value contains only the first variable.
tuple_value = (v0,)
namedtuple_value = collections.namedtuple('ValueSpec', ['a'])(a=v0)
for joint in (tuple_joint, namedtuple_joint):
for value in (tuple_value, namedtuple_value):
xs = self.evaluate(
joint.sample(value=value,
seed=test_util.test_seed(sampler_type=sampler_type)))
self.assertAllEqual(xs[0], v0)
self.assertAllEqual(xs[1].shape,
[value_partial_batch_dim, num_columns])
self.assertAllEqual(xs[2].shape,
[value_partial_batch_dim, num_rows, num_columns])
def test_unit_sample_shape_avoids_vectorization(self):
xs = [] # Collect (possibly symbolic) Tensors sampled inside the model.
@tfd.JointDistributionCoroutineAutoBatched
def dist():
x = yield tfd.Normal(0., 1., name='x')
xs.append(x)
# Try sampling with a variety of unit sample shapes.
self.assertEqual(
[1],
dist.sample(
1, seed=test_util.test_seed(sampler_type='seedless')).x.shape)
self.assertEqual(
[1],
dist.sample([1],
seed=test_util.test_seed(sampler_type='seedless')).x.shape)
self.assertEqual(
[1, 1],
dist.sample([1, 1],
seed=test_util.test_seed(sampler_type='seedless')).x.shape)
# Check that the model only ever saw the trivial sample shape.
for x in xs:
self.assertEqual(x.shape, [])
def test_unit_sample_shape(self):
@tfd.JointDistributionCoroutineAutoBatched
def dist():
x = yield tfd.Normal(loc=tf.zeros([3]), scale=1., name='x')
yield tfd.Bernoulli(logits=tf.einsum('n->', x), name='y')
for sample_shape in [(), 1, [1], [1, 1], [2]]:
self.assertAllEqual(
dist.log_prob(
dist.sample(sample_shape,
seed=test_util.test_seed())).shape,
np.reshape(sample_shape, [-1]))
def test_sample_dtype_structures_output(self):
num_features = 4
def dist():
scale_variance = yield Root(tfd.InverseGamma(0.5, 0.5))
scale_noncentered = yield Root(
tfd.Sample(tfd.HalfNormal(1.), num_features))
scale = scale_noncentered * scale_variance[..., None]**0.5
weights_noncentered = yield Root(
tfd.Sample(tfd.Normal(0., 1.), num_features))
yield tfd.Deterministic(weights_noncentered * scale)
# Currently sample_dtype is only used for `tf.nest.pack_structure_as`. In
# the future we may use it for error checking and/or casting.
sample_dtype = collections.namedtuple('Model', [
'scale_variance',
'scale_noncentered',
'weights_noncentered',
'weights',
])(*([None]*4))
joint = tfd.JointDistributionCoroutineAutoBatched(
dist, sample_dtype=sample_dtype, validate_args=True)
self.assertAllEqual(sorted(sample_dtype._fields),
sorted(joint.sample(
seed=test_util.test_seed())._fields))
ds, xs = joint.sample_distributions(seed=test_util.test_seed())
tf.nest.assert_same_structure(sample_dtype, ds)
tf.nest.assert_same_structure(sample_dtype, xs)
self.assertEqual([3, 4], joint.log_prob(joint.sample(
[3, 4], seed=test_util.test_seed())).shape)
def test_repr_with_custom_sample_dtype(self):
sd = collections.namedtuple('Model', ['s', 'w'])(None, None)
def dist():
s = yield tfd.Sample(tfd.InverseGamma(2, 2), 100)
yield tfd.Normal(0, s)
m = tfd.JointDistributionCoroutineAutoBatched(dist, sample_dtype=sd)
self.assertEqual(
('<tfp.distributions.JointDistributionCoroutineAutoBatched'
' \'JointDistributionCoroutineAutoBatched\''
' batch_shape=[]'
' event_shape=Model(s=[100], w=[100])'
' dtype=Model(s=float32, w=float32)>'),
repr(m))
@parameterized.named_parameters(
{'testcase_name': 'coroutine',
'jd_class': tfd.JointDistributionCoroutineAutoBatched},
{'testcase_name': 'sequential',
'jd_class': tfd.JointDistributionSequentialAutoBatched},
{'testcase_name': 'named',
'jd_class': tfd.JointDistributionNamedAutoBatched})
@test_util.jax_disable_variable_test
def test_latent_dirichlet_allocation(self, jd_class): # pylint: disable=g-doc-args
"""Tests Latent Dirichlet Allocation joint model.
The LDA generative process can be written as:
```none
N[i] ~ Poisson(xi)
theta[i] ~ Dirichlet(alpha)
Z[i] ~ Multinomial(N[i], theta[i])
for k in 1...K:
X[i,k] ~ Multinomial(Z[i, k], beta[j])
```
Typically `xi` is specified and `alpha`, `beta` are fit using type-II
maximum likelihood estimators.
Reference: http://www.jmlr.org/papers/volume3/blei03a/blei03a.pdf
"""
seed = test_util.test_seed_stream()
# Hyperparameters.
num_topics = 3
num_words = 10
avg_doc_length = 5
u = tfd.Uniform(low=-1., high=1.)
alpha = tfp.util.TransformedVariable(
u.sample([num_topics], seed=seed()),
tfb.Softplus(), name='alpha')
beta = tf.Variable(u.sample([num_topics, num_words],
seed=seed()), name='beta')
# Note near 1:1 with mathematical specification. The main distinction is the
# use of Independent--this lets us easily aggregate multinomials across
# topics (and in any "shape" of documents).
def lda_coroutine_model():
n = yield Root(tfd.Poisson(rate=avg_doc_length))
theta = yield Root(tfd.Dirichlet(concentration=alpha))
z = yield tfd.Multinomial(total_count=n, probs=theta)
yield tfd.Multinomial(total_count=z, logits=beta)
if jd_class is tfd.JointDistributionCoroutineAutoBatched:
model = lda_coroutine_model
elif jd_class is tfd.JointDistributionSequentialAutoBatched:
model = [
tfd.Poisson(rate=avg_doc_length), # n
tfd.Dirichlet(concentration=alpha), # theta
lambda theta, n: tfd.Multinomial(total_count=n, probs=theta), # z
lambda z: tfd.Multinomial(total_count=z, logits=beta)
]
elif jd_class is tfd.JointDistributionNamedAutoBatched:
model = collections.OrderedDict((
('n', tfd.Poisson(rate=avg_doc_length)),
('theta', tfd.Dirichlet(concentration=alpha)),
('z', lambda theta, n: tfd.Multinomial(total_count=n, probs=theta)),
('X', lambda z: tfd.Multinomial(total_count=z, logits=beta))))
# TODO(b/159842104): Enable autovectorization for Multinomial sampling.
lda = jd_class(model, validate_args=True, use_vectorized_map=False)
# Now, let's sample some "documents" and compute the log-prob of each.
docs_shape = [2, 4] # That is, 8 docs in the shape of [2, 4].
sample = lda.sample(docs_shape, seed=seed())
log_probs = lda.log_prob(sample)
self.assertEqual(docs_shape, log_probs.shape)
# Verify we correctly track trainable variables.
self.assertLen(lda.trainable_variables, 2)
self.assertIs(alpha.pretransformed_input, lda.trainable_variables[0])
self.assertIs(beta, lda.trainable_variables[1])
# Ensure we can compute gradients.
with tf.GradientTape() as tape:
# Note: The samples are not taped, hence implicitly "stop_gradient."
negloglik = -lda.log_prob(sample)
grads = tape.gradient(negloglik, lda.trainable_variables)
self.assertLen(grads, 2)
self.assertAllEqual((alpha.pretransformed_input.shape, beta.shape),
(grads[0].shape, grads[1].shape))
self.assertAllNotNone(grads)
@parameterized.named_parameters(
{'testcase_name': 'coroutine',
'jd_class': tfd.JointDistributionCoroutineAutoBatched},
{'testcase_name': 'sequential',
'jd_class': tfd.JointDistributionSequentialAutoBatched},
{'testcase_name': 'named',
'jd_class': tfd.JointDistributionNamedAutoBatched})
def test_default_event_space_bijector(self, jd_class):
models = {}
def coroutine_model():
high = yield tfd.LogNormal(0., [1.])
yield tfd.Uniform(low=[[-1., -2.]], high=high[..., tf.newaxis])
yield tfd.Deterministic([[0., 1., 2.]])
models[tfd.JointDistributionCoroutineAutoBatched] = coroutine_model
models[tfd.JointDistributionSequentialAutoBatched] = [
tfd.LogNormal(0., [1.]),
lambda high: tfd.Uniform(low=[[-1., -2.]], high=high[..., tf.newaxis]),
tfd.Deterministic([[0., 1., 2.]])
]
models[tfd.JointDistributionNamedAutoBatched] = collections.OrderedDict((
('high', tfd.LogNormal(0., [1.])),
('x', lambda high: tfd.Uniform(low=[[-1., -2.]], # pylint: disable=g-long-lambda
high=high[..., tf.newaxis])),
('y', tfd.Deterministic([[0., 1., 2.]]))))
joint = jd_class(models[jd_class], batch_ndims=1, validate_args=True)
self.assertAllEqual(joint.batch_shape, [1])
self.assertAllEqualNested(tf.nest.flatten(joint.event_shape),
[[], [2], [3]])
joint_bijector = joint.experimental_default_event_space_bijector()
y = self.evaluate(joint.sample([2, 3], seed=test_util.test_seed()))
x = joint_bijector.inverse(y)
self.assertAllCloseNested(y, joint_bijector.forward(x))
fldj = joint_bijector.forward_log_det_jacobian(
x, event_ndims=tf.nest.pack_sequence_as(joint.dtype, [0, 1, 2]))
ildj = joint_bijector.inverse_log_det_jacobian(
y, event_ndims=tf.nest.pack_sequence_as(joint.dtype, [0, 1, 1]))
self.assertAllEqual(fldj.shape, joint.log_prob(y).shape)
self.assertAllClose(fldj, -ildj)
# Passing inputs *without* batch shape should return sane outputs.
y = self.evaluate(joint.sample([], seed=test_util.test_seed()))
# Strip the sample to represent just a single event.
unbatched_y = tf.nest.map_structure(lambda t: t[0, ...], y)
self.assertAllEqualNested(tf.nest.map_structure(tf.shape, unbatched_y),
joint.event_shape_tensor())
ildj = joint_bijector.inverse_log_det_jacobian(
unbatched_y,
event_ndims=tf.nest.pack_sequence_as(joint.dtype, [0, 1, 1]))
self.assertAllEqual(ildj.shape, joint.log_prob(unbatched_y).shape)
@parameterized.named_parameters(
{'testcase_name': 'coroutine',
'jd_class': tfd.JointDistributionCoroutineAutoBatched},
{'testcase_name': 'sequential',
'jd_class': tfd.JointDistributionSequentialAutoBatched},
{'testcase_name': 'named',
'jd_class': tfd.JointDistributionNamedAutoBatched})
def test_default_event_space_bijector_constant_jacobian(self, jd_class):
models = {}
def coroutine_model():
yield tfd.Normal(0., [1., 2.], name='x')
models[tfd.JointDistributionCoroutineAutoBatched] = coroutine_model
models[tfd.JointDistributionSequentialAutoBatched] = [
tfd.Normal(0., [1., 2.], name='x')
]
models[tfd.JointDistributionNamedAutoBatched] = {
'x': tfd.Normal(0., [1., 2.], name='x')}
joint = jd_class(models[jd_class], batch_ndims=1, validate_args=True)
self.assertAllEqual(joint.batch_shape, [2])
joint_bijector = joint.experimental_default_event_space_bijector()
y = self.evaluate(joint.sample([3], seed=test_util.test_seed()))
x = joint_bijector.inverse(y)
self.assertAllCloseNested(y, joint_bijector.forward(x))
fldj = joint_bijector.forward_log_det_jacobian(x)
ildj = joint_bijector.inverse_log_det_jacobian(y)
self.assertAllEqual(fldj.shape, joint.log_prob(y).shape)
self.assertAllClose(fldj, -ildj)
def test_nested_joint_distributions(self):
batch_shape = [2, 3]
def inner_fn():
xy = yield tfd.JointDistributionNamedAutoBatched(
{'x': tfd.Normal(loc=tf.zeros(batch_shape),
scale=tf.ones(batch_shape),
name='x'),
'y': lambda x: tfd.Poisson(log_rate=x, name='y')},
batch_ndims=2,
name='xy')
_ = yield tfd.Normal(loc=0., scale=xy['y'], name='z')
joint = tfd.JointDistributionSequentialAutoBatched([
tfd.JointDistributionCoroutineAutoBatched(inner_fn,
batch_ndims=1,
name='a')])
z = joint.sample(seed=test_util.test_seed())
# Batch and event shape.
self.assertAllEqual(joint.batch_shape, [])
self.assertAllEqualNested(
tf.nest.map_structure(lambda x: tf.TensorShape(x.shape), z),
joint.event_shape)
# Sample shape.
z2 = self.evaluate(
joint.sample(5, seed=test_util.test_seed()))
lp2 = joint.log_prob(z2)
self.assertAllEqual(lp2.shape, [5])
z3 = joint.sample(value=z2, seed=test_util.test_seed())
self.assertAllCloseNested(z2, z3)
@parameterized.named_parameters(*[
dict(testcase_name='_{}{}'.format(jd_class.__name__, # pylint: disable=g-complex-comprehension
'_jit' if jit else ''),
jd_class=jd_class, jit=jit)
for jd_class in (tfd.JointDistributionCoroutineAutoBatched,
tfd.JointDistributionSequentialAutoBatched,
tfd.JointDistributionNamedAutoBatched)
for jit in (False, True)
])
def test_kahan_precision(self, jd_class, jit):
maybe_jit = lambda f: f
if jit:
self.skip_if_no_xla()
if not JAX_MODE and not tf.test.is_gpu_available():
self.skipTest('b/179303849')
maybe_jit = tf.function(jit_compile=True)
def make_models(dtype):
models = {}
def mk_20k_poisson(log_rate):
return tfd.Poisson(log_rate=tf.broadcast_to(log_rate[..., tf.newaxis],
log_rate.shape + (20_000,)))
def coroutine_model():
log_rate = yield tfd.Normal(0., dtype(.2), name='log_rate')
yield mk_20k_poisson(log_rate).copy(name='x')
models[tfd.JointDistributionCoroutineAutoBatched] = coroutine_model
models[tfd.JointDistributionSequentialAutoBatched] = [
tfd.Normal(0., dtype(.2)), mk_20k_poisson
]
models[tfd.JointDistributionNamedAutoBatched] = collections.OrderedDict((
('log_rate', tfd.Normal(0., dtype(.2))), ('x', mk_20k_poisson)))
return models
joint = jd_class(make_models(np.float32)[jd_class], validate_args=True,
experimental_use_kahan_sum=True)
joint64 = jd_class(make_models(np.float64)[jd_class], validate_args=True)
stream = test_util.test_seed_stream()
nsamp = 7
xs = self.evaluate(
joint.sample(log_rate=tf.zeros([nsamp]), seed=stream()))
if isinstance(xs, dict):
xs['log_rate'] = tfd.Normal(0, .2).sample(nsamp, seed=stream())
else:
xs = (tfd.Normal(0, .2).sample(nsamp, seed=stream()), xs[1])
xs64 = tf.nest.map_structure(lambda x: tf.cast(x, tf.float64), xs)
lp = maybe_jit(joint.copy(validate_args=not jit).log_prob)(xs)
lp64 = joint64.log_prob(xs64)
lp, lp64 = self.evaluate((tf.cast(lp, tf.float64), lp64))
# Without Kahan, example max-abs-diff: ~0.06
self.assertAllClose(lp64, lp, rtol=0., atol=.01)
def test_kahan_broadcasting_check(self):
def model():
_ = yield tfd.Normal(0., 1.) # Batch shape ()
_ = yield tfd.Normal([0., 1., 2.], 1.) # Batch shape [3]
dist = tfd.JointDistributionCoroutineAutoBatched(
model, validate_args=True, experimental_use_kahan_sum=True,
batch_ndims=1)
sample = self.evaluate(dist.sample(seed=test_util.test_seed(
sampler_type='stateless')))
with self.assertRaises(ValueError):
self.evaluate(dist.log_prob(sample))
if __name__ == '__main__':
# TODO(b/173158845): XLA:CPU reassociates away the Kahan correction term.
os.environ['XLA_FLAGS'] = '--xla_cpu_enable_fast_math=false'
test_util.main()
|
cupyx/scipy/special/_bessel.py
|
prkhrsrvstv1/cupy
| 6,180 |
65658
|
<filename>cupyx/scipy/special/_bessel.py
from cupy import _core
j0 = _core.create_ufunc(
'cupyx_scipy_special_j0', ('f->f', 'd->d'),
'out0 = j0(in0)',
doc='''Bessel function of the first kind of order 0.
.. seealso:: :meth:`scipy.special.j0`
''')
j1 = _core.create_ufunc(
'cupyx_scipy_special_j1', ('f->f', 'd->d'),
'out0 = j1(in0)',
doc='''Bessel function of the first kind of order 1.
.. seealso:: :meth:`scipy.special.j1`
''')
y0 = _core.create_ufunc(
'cupyx_scipy_special_y0', ('f->f', 'd->d'),
'out0 = y0(in0)',
doc='''Bessel function of the second kind of order 0.
.. seealso:: :meth:`scipy.special.y0`
''')
y1 = _core.create_ufunc(
'cupyx_scipy_special_y1', ('f->f', 'd->d'),
'out0 = y1(in0)',
doc='''Bessel function of the second kind of order 1.
.. seealso:: :meth:`scipy.special.y1`
''')
i0 = _core.create_ufunc(
'cupyx_scipy_special_i0', ('f->f', 'd->d'),
'out0 = cyl_bessel_i0(in0)',
doc='''Modified Bessel function of order 0.
.. seealso:: :meth:`scipy.special.i0`
''')
i1 = _core.create_ufunc(
'cupyx_scipy_special_i1', ('f->f', 'd->d'),
'out0 = cyl_bessel_i1(in0)',
doc='''Modified Bessel function of order 1.
.. seealso:: :meth:`scipy.special.i1`
''')
|
lectures/lecture00/code/helloWorldBroke.py
|
mateusza/Introduction-to-Python-Numerical-Analysis-for-Engineers-and-Scientist
| 101 |
65668
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
print 'hello world'
|
configs/irr/irrpwc_8x1_sfine_half_flyingthings3d_subset_384x768.py
|
hologerry/mmflow
| 481 |
65672
|
<reponame>hologerry/mmflow
_base_ = [
'../_base_/models/irrpwc.py',
'../_base_/datasets/flyingthings3d_subset_bi_with_occ_384x768.py',
'../_base_/schedules/schedule_s_fine_half.py',
'../_base_/default_runtime.py'
]
custom_hooks = [dict(type='EMAHook')]
data = dict(
train_dataloader=dict(
samples_per_gpu=1, workers_per_gpu=5, drop_last=True),
val_dataloader=dict(samples_per_gpu=1, workers_per_gpu=5, shuffle=False),
test_dataloader=dict(samples_per_gpu=1, workers_per_gpu=5, shuffle=False))
# Train on FlyingChairsOcc and finetune on FlyingThings3D_subset
load_from = 'https://download.openmmlab.com/mmflow/irr/irrpwc_8x1_sshort_flyingchairsocc_384x448.pth' # noqa
|
Python_Scripts/twitter/func_call.py
|
kaustubhsh/TIGMINT
| 177 |
65673
|
import sys
import base_func as base
import twint
from similar_hashtags import similar_hashtags
from top_mentions_hashtags import top_mentions_hashtags as mentions
def basic(username,search):
base.get_user_bio(username,search)
base.get_user_tweets(username,search,True)
def get_keyword(key,limit=100):
base.get_tweets(key,limit)
def top_mention():
key_val = int(input('no of users'))
seed_user = list(map(str,input('Enter usernames').strip().split()))[:key_val]
limit = int(input('No of tweets to be pulled')) # default limit = 500
for username in seed_user:
mentions.get_top_mentions_hashtags(username)
def similar_hashtag():
key_val = int(input('no of hastags'))
seed_hash = list(map(str,input('Enter hashtags').strip().split()))[:key_val]
limit = int(input('No of tweets to be pulled')) # default limit = 500
for seed_hashtag in seed_hash:
similar_hashtags.get_similar_hashtags(seed_hashtag, limit)
if __name__ == "__main__":
username = sys.argv[1]
string = sys.argv[2]
basic(username,string)
|
modules/search/search_config.py
|
276793422/attack-website
| 327 |
65686
|
<filename>modules/search/search_config.py
module_name = "Search"
priority = 17
|
followbot/follower_blind.py
|
amjadmajid/rosbook
| 442 |
65692
|
<reponame>amjadmajid/rosbook<filename>followbot/follower_blind.py
#!/usr/bin/env python
# BEGIN ALL
import rospy
from sensor_msgs.msg import Image
import cv2, cv_bridge
from geometry_msgs.msg import Twist
class Follower:
def __init__(self):
self.bridge = cv_bridge.CvBridge()
cv2.namedWindow("window", 1)
self.image_sub = rospy.Subscriber('camera/rgb/image_raw',
Image, self.image_callback)
self.cmd_vel_pub = rospy.Publisher('cmd_vel_mux/input/teleop',
Twist, queue_size=1)
self.twist = Twist()
def image_callback(self, msg):
image = self.bridge.imgmsg_to_cv2(msg)
cv2.imshow("window", image)
cv2.waitKey(3)
#self.twist.linear.x = 0.2
#self.cmd_vel_pub.publish(self.twist)
rospy.init_node('follower')
follower = Follower()
rospy.spin()
# END ALL
|
clai/server/searchlib/se_provider.py
|
cohmoti/clai
| 391 |
65702
|
#
# Copyright (C) 2020 IBM. All Rights Reserved.
#
# See LICENSE.txt file in the root directory
# of this source tree for licensing information.
#
import json
from typing import List, Dict
from clai.server.searchlib.providers import Provider
class StackExchange(Provider):
def __init__(self, name: str, description: str, section: dict):
super().__init__(name, description, section)
self.__log_debug__("UNIX StackExchange provider initialized")
def call(self, query: str, limit: int = 1, **kwargs):
self.__log_debug__(
f"call(query={query}, limit={str(limit)}), **kwargs={str(kwargs)})"
)
payload = {"text": query, "limit": limit}
request = self.__send_post_request__(self.base_uri, data=json.dumps(payload))
if request.status_code == 200:
return request.json()["hits"]
return None
def extract_search_result(self, data: List[Dict]) -> str:
return data[0]["Answer"]
def get_printable_output(self, data: List[Dict]) -> str:
lines = [
f"Post: {data[0]['Content'][:384] + ' ...'}",
f"Answer: {data[0]['Answer'][:256] + ' ...'}",
f"Link: {data[0]['Url']}\n",
]
return "\n".join(lines)
|
tensorflow/python/estimator/export/export_output.py
|
DEVESHTARASIA/tensorflow
| 384 |
65713
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes for different types of export output."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.saved_model import signature_def_utils
class ExportOutput(object):
"""Represents an output of a model that can be served.
These typically correspond to model heads.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def as_signature_def(self, receiver_tensors):
"""Generate a SignatureDef proto for inclusion in a MetaGraphDef.
The SignatureDef will specify outputs as described in this ExportOutput,
and will use the provided receiver_tensors as inputs.
Args:
receiver_tensors: a `Tensor`, or a dict of string to `Tensor`, specifying
input nodes that will be fed.
"""
pass
class ClassificationOutput(ExportOutput):
"""Represents the output of a classification head.
Either classes or scores or both must be set.
The classes `Tensor` must provide string labels, not integer class IDs.
If only classes is set, it is interpreted as providing top-k results in
descending order.
If only scores is set, it is interpreted as providing a score for every class
in order of class ID.
If both classes and scores are set, they are interpreted as zipped, so each
score corresponds to the class at the same index. Clients should not depend
on the order of the entries.
"""
def __init__(self, scores=None, classes=None):
"""Constructor for `ClassificationOutput`.
Args:
scores: A float `Tensor` giving scores (sometimes but not always
interpretable as probabilities) for each class. May be `None`, but
only if `classes` is set. Interpretation varies-- see class doc.
classes: A string `Tensor` giving predicted class labels. May be `None`,
but only if `scores` is set. Interpretation varies-- see class doc.
Raises:
ValueError: if neither classes nor scores is set, or one of them is not a
`Tensor` with the correct dtype.
"""
if (scores is not None
and not (isinstance(scores, ops.Tensor)
and scores.dtype.is_floating)):
raise ValueError('Classification scores must be a float32 Tensor; '
'got {}'.format(scores))
if (classes is not None
and not (isinstance(classes, ops.Tensor)
and dtypes.as_dtype(classes.dtype) == dtypes.string)):
raise ValueError('Classification classes must be a string Tensor; '
'got {}'.format(classes))
if scores is None and classes is None:
raise ValueError('At least one of scores and classes must be set.')
self._scores = scores
self._classes = classes
@property
def scores(self):
return self._scores
@property
def classes(self):
return self._classes
def as_signature_def(self, receiver_tensors):
if len(receiver_tensors) != 1:
raise ValueError('Classification input must be a single string Tensor; '
'got {}'.format(receiver_tensors))
(_, examples), = receiver_tensors.items()
if dtypes.as_dtype(examples.dtype) != dtypes.string:
raise ValueError('Classification input must be a single string Tensor; '
'got {}'.format(receiver_tensors))
return signature_def_utils.classification_signature_def(
examples, self.classes, self.scores)
class RegressionOutput(ExportOutput):
"""Represents the output of a regression head."""
def __init__(self, value):
"""Constructor for `RegressionOutput`.
Args:
value: a float `Tensor` giving the predicted values. Required.
Raises:
ValueError: if the value is not a `Tensor` with dtype tf.float32.
"""
if not (isinstance(value, ops.Tensor) and value.dtype.is_floating):
raise ValueError('Regression output value must be a float32 Tensor; '
'got {}'.format(value))
self._value = value
@property
def value(self):
return self._value
def as_signature_def(self, receiver_tensors):
if len(receiver_tensors) != 1:
raise ValueError('Regression input must be a single string Tensor; '
'got {}'.format(receiver_tensors))
(_, examples), = receiver_tensors.items()
if dtypes.as_dtype(examples.dtype) != dtypes.string:
raise ValueError('Regression input must be a single string Tensor; '
'got {}'.format(receiver_tensors))
return signature_def_utils.regression_signature_def(examples, self.value)
class PredictOutput(ExportOutput):
"""Represents the output of a generic prediction head.
A generic prediction need not be either a classification or a regression.
Named outputs must be provided as a dict from string to `Tensor`,
"""
def __init__(self, outputs):
"""Constructor for PredictOutput.
Args:
outputs: A dict of string to `Tensor` representing the predictions.
Raises:
ValueError: if the outputs is not dict, or any of its keys are not
strings, or any of its values are not `Tensor`s.
"""
if not isinstance(outputs, dict):
raise ValueError(
'Prediction outputs must be given as a dict of string to Tensor; '
'got {}'.format(outputs))
for key, value in outputs.items():
if not isinstance(key, six.string_types):
raise ValueError(
'Prediction output key must be a string; got {}.'.format(key))
if not isinstance(value, ops.Tensor):
raise ValueError(
'Prediction output value must be a Tensor; got {}.'.format(value))
self._outputs = outputs
@property
def outputs(self):
return self._outputs
def as_signature_def(self, receiver_tensors):
return signature_def_utils.predict_signature_def(receiver_tensors,
self.outputs)
|
catalyst/contrib/utils/swa.py
|
tadejsv/catalyst
| 2,693 |
65743
|
<gh_stars>1000+
from typing import List, Union
from collections import OrderedDict
import glob
import os
from pathlib import Path
import torch
def _load_weights(path: str) -> dict:
"""
Load weights of a model.
Args:
path: Path to model weights
Returns:
Weights
"""
weights = torch.load(path, map_location=lambda storage, loc: storage)
if "model_state_dict" in weights:
weights = weights["model_state_dict"]
return weights
def average_weights(state_dicts: List[dict]) -> OrderedDict:
"""
Averaging of input weights.
Args:
state_dicts: Weights to average
Raises:
KeyError: If states do not match
Returns:
Averaged weights
"""
# source https://gist.github.com/qubvel/70c3d5e4cddcde731408f478e12ef87b
params_keys = None
for i, state_dict in enumerate(state_dicts):
model_params_keys = list(state_dict.keys())
if params_keys is None:
params_keys = model_params_keys
elif params_keys != model_params_keys:
raise KeyError(
"For checkpoint {}, expected list of params: {}, "
"but found: {}".format(i, params_keys, model_params_keys)
)
average_dict = OrderedDict()
for k in state_dicts[0].keys():
average_dict[k] = torch.div(
sum(state_dict[k] for state_dict in state_dicts), len(state_dicts)
)
return average_dict
def get_averaged_weights_by_path_mask(
path_mask: str, logdir: Union[str, Path] = None
) -> OrderedDict:
"""
Averaging of input weights and saving them.
Args:
path_mask: globe-like pattern for models to average
logdir: Path to logs directory
Returns:
Averaged weights
"""
if logdir is None:
models_pathes = glob.glob(path_mask)
else:
models_pathes = glob.glob(os.path.join(logdir, "checkpoints", path_mask))
all_weights = [_load_weights(path) for path in models_pathes]
averaged_dict = average_weights(all_weights)
return averaged_dict
__all__ = ["average_weights", "get_averaged_weights_by_path_mask"]
|
database/api/scores_old.py
|
aprilsanchez/ictf-framework
| 110 |
65759
|
<gh_stars>100-1000
# Scores old stuff
#
# Helper functions
#
# FIXME: use ticks instead, teams are either up for the whole tick or down for the whole tick
def _get_uptime_for_team(team_id, cursor):
"""Calculate the uptime for a team.
The uptime is normalized to 0 to 100. An uptime of 100 means the team was
online for the entire tick, while an uptime of 0 means it was not online
at all.
:param int team_id: ID of the team.
:param cursor: Cursor that points to the MySQL database.
:return: Uptime of the team, between [0, 100]
"""
# FIXME: This currently does not work for disabled and enabled services.
# We should calculate the uptime per tick.
# Fetch total number of total tests made
cursor.execute("""SELECT COUNT(id) AS count, service_id
FROM team_service_state WHERE team_id = %s
GROUP BY service_id""",
(team_id,))
total_counts = dict()
for result in cursor.fetchall():
total_counts[result["service_id"]] = result["count"]
# Fetch number of tests that were successful (up and working)
cursor.execute("""SELECT COUNT(id) AS count, service_id
FROM team_service_state WHERE team_id = %s
AND state = 'up'
GROUP BY service_id""",
(team_id,))
up_counts = {}
for result in cursor.fetchall():
up_counts[result["service_id"]] = result["count"]
# Calculate the average uptime
services = len(total_counts.keys())
avg_uptime = 0
for service_id, total in total_counts.items():
up_ = up_counts[service_id]
uptime = (up_ * 1.) / (total * 1.)
avg_uptime += uptime / services
return avg_uptime * 100
@app.route("/scores_deprecated")
@requires_auth
def scores_deprecated():
"""The ``/scores`` endpoint requires authentication and expects no
additional argument. It is used to retrieve the current scores for each
team.
It can be reached at ``/scores?secret=<API_SECRET>``.
The JSON response is::
{
"scores": {team_id: {score: int,
sla: int (0-100, percentage),
raw_score: int }}
}
:return: a JSON dictionary containing status information on the flag.
"""
cursor = mysql.cursor()
cursor.execute("""SELECT team_id, name as team_name, SUM(score) AS score
FROM team_score
JOIN teams ON teams.id = team_score.team_id
GROUP BY team_id""")
scores_ = {}
# Currently, we are multiplying overall score with overall SLA. Do we
# actually want to do this, or do we want do calculate this per tick?
for result in cursor.fetchall():
team_id = result["team_id"]
team_name = result["team_name"]
raw_score = int(result["score"])
sla_percentage = _get_uptime_for_team(team_id, cursor)
scores_[team_id] = {"team_name": team_name,
"raw_score": raw_score,
"sla": int(sla_percentage),
"score": raw_score * (sla_percentage / 100.)}
return json.dumps({"scores": scores_})
|
tools/pot/openvino/tools/pot/algorithms/quantization/accuracy_aware_common/utils.py
|
chccc1994/openvino
| 2,406 |
65766
|
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from copy import deepcopy
from functools import partial
import numpy as np
import scipy
from addict import Dict
from ....algorithms.quantization import utils as eu
from ....engines.ac_engine import ACEngine
from ....graph.model_utils import get_nodes_by_type
from ....graph.node_utils import get_all_node_outputs
from ....graph.utils import find_operation_matches
from ....samplers.creator import create_sampler
SPECIAL_METRICS = ['cmc', 'reid_map', 'pairwise_accuracy_subsets', 'pairwise_accuracy', 'normalized_embedding_accuracy',
'face_recognition_tafa_pair_metric', 'localization_recall',
'coco_orig_keypoints_precision', 'coco_orig_segm_precision', 'coco_orig_keypoints_precision']
METRICS_CONFIGS = {'sigmoid_recom_loss': {'metrics': 'log_loss',
'postprocessing': 'sigmoid_normalize_recommendation'},
'coco_precision': {'metrics': 'coco_precision'},
'coco_segm_precision': {'metrics': 'coco_segm_precision'}}
METRIC2PROXY_METRIC = {
'hit_ratio':
{
'persample': 'sigmoid_recom_loss',
'ranking': 'sigmoid_recom_loss'
},
'ndcg':
{
'persample': 'sigmoid_recom_loss',
'ranking': 'sigmoid_recom_loss'
},
'coco_orig_precision':
{
'persample': 'coco_precision'
},
'coco_orig_keypoints_precision':
{
'persample': 'coco_precision'
},
'coco_orig_segm_precision':
{
'persample': 'coco_segm_precision'
}
}
def create_metric_config(engine, algo_config: Dict, force_logit_comparison=False,
logit_distance_type='cosine') -> Dict:
def create_metric_params(metric_name):
engine_metrics_attributes = engine.get_metrics_attributes()
if metric_name not in engine_metrics_attributes:
RuntimeError('Couldn\'t create metric parameters. '
'Metric {} not registered in the engine.'.format(metric_name))
params = Dict()
params.name = metric_name
params.type = engine_metrics_attributes[metric_name]['type']
params.is_special = (params.type in SPECIAL_METRICS) or force_logit_comparison
if engine_metrics_attributes[metric_name]['direction'] == 'higher-better':
params.comparator = (lambda a: a)
elif engine_metrics_attributes[metric_name]['direction'] == 'higher-worse':
params.comparator = (lambda a: -a)
else:
raise ValueError('Unexpected {} metric direction value.'.format(metric_name))
params.sort_fn = partial(sort_by_logit_distance, distance=logit_distance_type) \
if params.is_special else partial(sort_by_metric_difference, comp_fn=params.comparator)
return params
def metric_to_proxy_map(metrics):
"""Determines which metrics need proxy metrics and creates metrics to proxy metrics map.
:param metrics: optimizable metrics names
:returns a dictionary of metrics to proxy metrics mapping {metric_name: 'persample': proxy_name,
'ranking': proxy_name}
a list of proxy metrics names to register
"""
def update_proxy_list(proxy_metric_name):
"""Updates a list of proxy metrics names to register.
:return a proxy metric name in accordance with the engine naming
"""
proxy_config = METRICS_CONFIGS.get(proxy_metric_name, {})
metric_config = proxy_config.get('metrics')
postprocessing_config = proxy_config.get('postprocessing')
if metric_config or postprocessing_config:
to_register.add(proxy_metric_name)
return metric_name_from_config(metric_config)
match_names_config = Dict({metric_name: {} for metric_name in metrics})
to_register = set()
for metric_name, metric_type in metrics:
if metric_type in METRIC2PROXY_METRIC:
persample_metric_name = METRIC2PROXY_METRIC[metric_type].get('persample')
persample_proxy_metric_name = update_proxy_list(persample_metric_name)
if persample_proxy_metric_name:
match_names_config[metric_name].persample = persample_proxy_metric_name
ranking_metric_name = METRIC2PROXY_METRIC[metric_type].get('ranking')
ranking_proxy_metric_name = update_proxy_list(ranking_metric_name)
if ranking_proxy_metric_name:
match_names_config[metric_name].ranking = ranking_proxy_metric_name
return match_names_config, list(to_register)
metrics_attributes = engine.get_metrics_attributes()
# configure which metrics to optimize
if algo_config.metrics:
metrics_names = []
for metric in algo_config.metrics:
metric_type = metric.type if metric.type else metric.name
metrics_names.append((metric.name, metric_type))
else:
metrics_names = [(metric_name, metric_attr.get('type', metric_name)) for metric_name, metric_attr
in metrics_attributes.items()]
# register proxy metrics
metrics_to_proxy_map, metrics_to_register = metric_to_proxy_map(metrics_names)
register_metrics(engine, metrics_to_register)
metrics_config = Dict()
for metric, _ in metrics_names:
persample_name = metrics_to_proxy_map[metric].get('persample', metric)
ranking_name = metrics_to_proxy_map[metric].get('ranking', metric)
metrics_config[metric].persample = create_metric_params(persample_name)
metrics_config[metric].ranking = create_metric_params(ranking_name)
metrics_config[metric].update(create_metric_params(metric))
return metrics_config
def metric_name_from_config(metric_config):
if isinstance(metric_config, str):
return metric_config
if isinstance(metric_config, dict):
return metric_config.get('name', metric_config['type'])
return None
def register_metrics(engine, metrics_names: list):
"""Registers metrics and postprocessing in the engine.
:param engine: an engine in which metrics will be registered
:param metrics_names: a list of metrics names
"""
registered_metrics = engine.get_metrics_attributes()
for metric in metrics_names:
if metric not in METRICS_CONFIGS:
raise ValueError('Cannot register metric. Unsupported name {}.'.format(metric))
proxy_config = METRICS_CONFIGS.get(metric, {})
if 'metrics' in proxy_config:
metric_config = proxy_config['metrics']
if metric_name_from_config(metric_config) not in registered_metrics:
register_metric(engine, metric_config)
if 'postprocessing' in proxy_config:
postprocessing_config = proxy_config['postprocessing']
register_postprocessing(engine, postprocessing_config)
def sort_by_logit_distance(u, v, reverse=False, distance='cosine'):
if len(u) != len(v):
raise RuntimeError('Cannot compare samples. '
'Lists of per-sample metric results should be the same length.')
kd_distance = lambda u, v: scipy.stats.entropy(scipy.special.softmax(u),
scipy.special.softmax(v))
mse_distance = lambda u, v: np.mean((u - v) ** 2)
distance_function = {
'cosine': scipy.spatial.distance.cosine,
'kd': kd_distance,
'mse': mse_distance,
}
distance_between_samples = np.array([distance_function[distance](ui.flatten(), vi.flatten())
for ui, vi in zip(u, v)])
sorted_arr = np.argsort(distance_between_samples)
if reverse:
sorted_arr = np.flip(sorted_arr)
return sorted_arr
def sort_by_metric_difference(u, v, comp_fn=lambda a: a, reverse=False):
if len(u) != len(v):
raise RuntimeError('Cannot compare samples. '
'Lists of per-sample metric results should be the same length.')
u = np.asarray(u)
v = np.asarray(v)
sorted_arr = np.argsort(comp_fn(u - v))
if reverse:
sorted_arr = np.flip(sorted_arr)
return sorted_arr
def register_metric(engine, metric_config):
if isinstance(engine, ACEngine):
engine.add_metric(metric_config)
else:
raise NotImplementedError('{} engine cannot register new metrics.'
.format(type(engine).__name__))
def register_postprocessing(engine, postprocessing_config):
if isinstance(engine, ACEngine):
engine.add_postprocessing(postprocessing_config)
else:
raise NotImplementedError('{} engine cannot register new postprocessing.'
.format(type(engine).__name__))
def is_preset_performance(config: Dict):
if config.weights.mode == 'symmetric' and config.activations.mode == 'symmetric':
return True
if config.weights.mode == 'asymmetric' or config.activations.mode == 'asymmetric':
return False
if config.preset == 'performance':
return True
return False
def get_mixed_preset_config(config: Dict):
config = deepcopy(config)
config.update(preset='mixed')
if config.activations.mode:
config.activations.mode = 'asymmetric'
if config.weights.mode:
config.weights.mode = 'symmetric'
return config
def get_num_of_quantized_ops(model, quantizable_operations):
quantized_ops = set()
nodes_to_see = []
for fq_node in get_nodes_by_type(model, ['FakeQuantize']):
nodes_to_see.extend(get_all_node_outputs(fq_node))
while nodes_to_see:
child = nodes_to_see.pop()
if find_operation_matches(quantizable_operations, child):
quantized_ops.add(child)
continue
nodes_to_see.extend(get_all_node_outputs(child))
return len(quantized_ops)
def evaluate_model(
model, engine,
dataset_size,
subset_indices=None,
print_progress=True,
metrics_config=None,
per_sample_subset_indices=None,
output_node_name=None,
stats_layout=None,
):
"""Evaluates the model and processes metrics values
:param model: model to evaluate
:param subset_indices: image indices to evaluate on. If None evaluate on whole dataset
:param per_sample_subset_indices: image indices for which to return per-sample metrics.
If None for all predicted images
:param print_progress: Whether to print inference progress
:returns a dictionary of predicted metrics {metric_name: value}
a dictionary of per-sample metrics values {metric_name: [values]}
"""
engine.set_model(model)
eu.select_evaluation_dataset(engine)
if not subset_indices:
subset_indices = range(dataset_size)
index_sampler = create_sampler(engine, samples=subset_indices)
(metrics_per_sample, metrics), raw_output = engine.predict(stats_layout=stats_layout,
sampler=index_sampler,
metric_per_sample=True,
print_progress=print_progress)
raw_output = process_raw_output(raw_output, output_node_name)
metrics_per_sample = process_per_sample_metrics(metrics_per_sample,
metrics_config,
per_sample_subset_indices,
raw_output=raw_output)
metrics = dict((name, value) for name, value in metrics.items() if name in metrics_config)
eu.reset_dataset_to_default(engine)
return metrics, metrics_per_sample
def process_raw_output(output, output_node_name):
if not output:
return []
return output[output_node_name]['output_logits']
def process_per_sample_metrics(metrics_per_sample, metrics_config,
indices=None, raw_output=None):
"""Creates a dictionary of per-sample metrics values {metric_name: [values]}
:param metrics_per_sample: list of per-sample metrics
:param indices: indices of samples to be considered. All if None
:param raw_output: raw output from the model
:return processed dictionary
"""
metrics_to_keep = {config.persample.name: config.persample
for config in metrics_config.values()}
if not metrics_to_keep:
return {}
processed_metrics_per_sample = dict((name, []) for name in metrics_to_keep)
for metric_name, metric_params in metrics_to_keep.items():
if metric_params.is_special:
processed_metrics_per_sample[metric_name] = raw_output
for value in metrics_per_sample:
if value['metric_name'] in metrics_to_keep:
if metrics_to_keep[value['metric_name']].is_special:
continue
if value['result'] is not None:
result_value = np.nanmean(value['result'])
else:
result_value = None
processed_metrics_per_sample[value['metric_name']].append(result_value)
# check that all metrics have equal number of samples
if not len({len(value) for value in processed_metrics_per_sample.values()}) == 1:
raise RuntimeError('Inconsistent number of per-sample metric values')
if indices:
for name, values in processed_metrics_per_sample.items():
processed_metrics_per_sample[name] = [values[i] for i in indices]
return processed_metrics_per_sample
|
src/sage/combinat/root_system/coxeter_group.py
|
fchapoton/sage
| 1,742 |
65771
|
<reponame>fchapoton/sage
"""
Coxeter Groups
"""
#*****************************************************************************
# Copyright (C) 2010 <NAME> <nthiery at users.sf.net>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.combinat.root_system.weyl_group import WeylGroup
from sage.combinat.root_system.reflection_group_real import ReflectionGroup
from sage.combinat.root_system.cartan_type import CartanType
def CoxeterGroup(data, implementation="reflection", base_ring=None, index_set=None):
"""
Return an implementation of the Coxeter group given by ``data``.
INPUT:
- ``data`` -- a Cartan type (or coercible into; see :class:`CartanType`)
or a Coxeter matrix or graph
- ``implementation`` -- (default: ``'reflection'``) can be one of
the following:
* ``'permutation'`` - as a permutation representation
* ``'matrix'`` - as a Weyl group (as a matrix group acting on the
root space); if this is not implemented, this uses the "reflection"
implementation
* ``'coxeter3'`` - using the coxeter3 package
* ``'reflection'`` - as elements in the reflection representation; see
:class:`~sage.groups.matrix_gps.coxeter_groups.CoxeterMatrixGroup`
- ``base_ring`` -- (optional) the base ring for the ``'reflection'``
implementation
- ``index_set`` -- (optional) the index set for the ``'reflection'``
implementation
EXAMPLES:
Now assume that ``data`` represents a Cartan type. If
``implementation`` is not specified, the reflection representation
is returned::
sage: W = CoxeterGroup(["A",2])
sage: W
Finite Coxeter group over Integer Ring with Coxeter matrix:
[1 3]
[3 1]
sage: W = CoxeterGroup(["A",3,1]); W
Coxeter group over Integer Ring with Coxeter matrix:
[1 3 2 3]
[3 1 3 2]
[2 3 1 3]
[3 2 3 1]
sage: W = CoxeterGroup(['H',3]); W
Finite Coxeter group over Number Field in a with defining polynomial x^2 - 5 with a = 2.236067977499790? with Coxeter matrix:
[1 3 2]
[3 1 5]
[2 5 1]
We now use the ``implementation`` option::
sage: W = CoxeterGroup(["A",2], implementation = "permutation") # optional - gap3
sage: W # optional - gap3
Permutation Group with generators [(1,4)(2,3)(5,6), (1,3)(2,5)(4,6)]
sage: W.category() # optional - gap3
Join of Category of finite enumerated permutation groups
and Category of finite weyl groups
and Category of well generated finite irreducible complex reflection groups
sage: W = CoxeterGroup(["A",2], implementation="matrix")
sage: W
Weyl Group of type ['A', 2] (as a matrix group acting on the ambient space)
sage: W = CoxeterGroup(["H",3], implementation="matrix")
sage: W
Finite Coxeter group over Number Field in a with defining polynomial x^2 - 5 with a = 2.236067977499790? with Coxeter matrix:
[1 3 2]
[3 1 5]
[2 5 1]
sage: W = CoxeterGroup(["H",3], implementation="reflection")
sage: W
Finite Coxeter group over Number Field in a with defining polynomial x^2 - 5 with a = 2.236067977499790? with Coxeter matrix:
[1 3 2]
[3 1 5]
[2 5 1]
sage: W = CoxeterGroup(["A",4,1], implementation="permutation")
Traceback (most recent call last):
...
ValueError: the type must be finite
sage: W = CoxeterGroup(["A",4], implementation="chevie"); W # optional - gap3
Irreducible real reflection group of rank 4 and type A4
We use the different options for the "reflection" implementation::
sage: W = CoxeterGroup(["H",3], implementation="reflection", base_ring=RR)
sage: W
Finite Coxeter group over Real Field with 53 bits of precision with Coxeter matrix:
[1 3 2]
[3 1 5]
[2 5 1]
sage: W = CoxeterGroup([[1,10],[10,1]], implementation="reflection", index_set=['a','b'], base_ring=SR)
sage: W
Finite Coxeter group over Symbolic Ring with Coxeter matrix:
[ 1 10]
[10 1]
TESTS::
sage: W = groups.misc.CoxeterGroup(["H",3])
"""
if implementation not in ["permutation", "matrix", "coxeter3", "reflection", "chevie", None]:
raise ValueError("invalid type implementation")
from sage.groups.matrix_gps.coxeter_group import CoxeterMatrixGroup
try:
cartan_type = CartanType(data)
except (TypeError, ValueError): # If it is not a Cartan type, try to see if we can represent it as a matrix group
return CoxeterMatrixGroup(data, base_ring, index_set)
if implementation is None:
implementation = "matrix"
if implementation == "reflection":
return CoxeterMatrixGroup(cartan_type, base_ring, index_set)
if implementation == "coxeter3":
try:
from sage.libs.coxeter3.coxeter_group import CoxeterGroup
except ImportError:
raise RuntimeError("coxeter3 must be installed")
else:
return CoxeterGroup(cartan_type)
if implementation == "permutation":
if not cartan_type.is_finite():
raise ValueError("the type must be finite")
if cartan_type.is_crystallographic():
return WeylGroup(cartan_type, implementation="permutation")
return ReflectionGroup(cartan_type, index_set=index_set)
elif implementation == "matrix":
if cartan_type.is_crystallographic():
return WeylGroup(cartan_type)
return CoxeterMatrixGroup(cartan_type, base_ring, index_set)
elif implementation == "chevie":
return ReflectionGroup(cartan_type, index_set=index_set)
raise NotImplementedError("Coxeter group of type {} as {} group not implemented".format(cartan_type, implementation))
from sage.misc.persist import register_unpickle_override
register_unpickle_override('sage.combinat.root_system.coxeter_group', 'CoxeterGroupAsPermutationGroup', ReflectionGroup)
|
homeassistant/components/flick_electric/sensor.py
|
learn-home-automation/core
| 22,481 |
65774
|
<filename>homeassistant/components/flick_electric/sensor.py
"""Support for Flick Electric Pricing data."""
from datetime import timedelta
import logging
import async_timeout
from pyflick import FlickAPI, FlickPrice
from homeassistant.components.sensor import SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ATTRIBUTION, ATTR_FRIENDLY_NAME
from homeassistant.core import HomeAssistant
from homeassistant.util.dt import utcnow
from .const import ATTR_COMPONENTS, ATTR_END_AT, ATTR_START_AT, DOMAIN
_LOGGER = logging.getLogger(__name__)
_AUTH_URL = "https://api.flick.energy/identity/oauth/token"
_RESOURCE = "https://api.flick.energy/customer/mobile_provider/price"
SCAN_INTERVAL = timedelta(minutes=5)
ATTRIBUTION = "Data provided by Flick Electric"
FRIENDLY_NAME = "Flick Power Price"
UNIT_NAME = "cents"
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities
):
"""Flick Sensor Setup."""
api: FlickAPI = hass.data[DOMAIN][entry.entry_id]
async_add_entities([FlickPricingSensor(api)], True)
class FlickPricingSensor(SensorEntity):
"""Entity object for Flick Electric sensor."""
_attr_native_unit_of_measurement = UNIT_NAME
def __init__(self, api: FlickAPI) -> None:
"""Entity object for Flick Electric sensor."""
self._api: FlickAPI = api
self._price: FlickPrice = None
self._attributes = {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_FRIENDLY_NAME: FRIENDLY_NAME,
}
@property
def name(self):
"""Return the name of the sensor."""
return FRIENDLY_NAME
@property
def native_value(self):
"""Return the state of the sensor."""
return self._price.price
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return self._attributes
async def async_update(self):
"""Get the Flick Pricing data from the web service."""
if self._price and self._price.end_at >= utcnow():
return # Power price data is still valid
async with async_timeout.timeout(60):
self._price = await self._api.getPricing()
self._attributes[ATTR_START_AT] = self._price.start_at
self._attributes[ATTR_END_AT] = self._price.end_at
for component in self._price.components:
if component.charge_setter not in ATTR_COMPONENTS:
_LOGGER.warning("Found unknown component: %s", component.charge_setter)
continue
self._attributes[component.charge_setter] = float(component.value)
|
omaha_server/crash/views.py
|
fiadm/omaha-server
| 142 |
65789
|
<filename>omaha_server/crash/views.py<gh_stars>100-1000
# coding: utf8
"""
This software is licensed under the Apache 2 license, quoted below.
Copyright 2014 Crystalnix Limited
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy of
the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.
"""
import json
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse_lazy
from django.views.generic import FormView
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse, HttpResponseBadRequest
from crash.forms import CrashFrom, CrashDescriptionForm
from crash.models import Crash
from omaha_server.utils import get_client_ip
class CrashFormView(FormView):
http_method_names = ('post',)
form_class = CrashFrom
@csrf_exempt
def dispatch(self, *args, **kwargs):
return super(CrashFormView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
meta = self.request.POST.dict()
meta.pop("appid", None)
meta.pop("userid", None)
obj = form.save(commit=False)
if meta:
obj.meta = meta
obj.ip = get_client_ip(self.request)
obj.save()
return HttpResponse(obj.pk, status=200)
def form_invalid(self, form):
return HttpResponse(json.dumps(form.errors), status=400, content_type='application/json')
class CrashDescriptionFormView(FormView):
form_class = CrashDescriptionForm
template_name = 'crash/crash_description.html'
success_url = reverse_lazy('crash_description_submitted')
def dispatch(self, request, *args, **kwargs):
# verify crash_id refers to valid crash object
try:
self.crash = Crash.objects.select_related('crash_description').get(pk=self.kwargs.get('pk'))
except Crash.DoesNotExist:
return HttpResponseBadRequest('no such crash')
# verify there is no crash description for that object yet
try:
desc = self.crash.crash_description
return HttpResponseBadRequest('already reported as \"%s\"' % desc.summary)
except ObjectDoesNotExist:
pass
return super(CrashDescriptionFormView, self).dispatch(request, *args, **kwargs)
def get_initial(self):
data = super(CrashDescriptionFormView, self).get_initial()
data['description'] = self.request.GET.get('comment')
return data
def form_valid(self, form):
obj = form.save(commit=False)
obj.crash = self.crash
obj.save()
return super(CrashDescriptionFormView, self).form_valid(form)
|
spitfire/runtime/template.py
|
atubbs/spitfire
| 385 |
65797
|
<reponame>atubbs/spitfire<filename>spitfire/runtime/template.py<gh_stars>100-1000
# Copyright 2007 The Spitfire Authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# an 'abstract' base class for a template, seems like a good idea for now
import cStringIO as StringIO
from spitfire import runtime
from spitfire.runtime import baked
from spitfire.runtime import filters
from spitfire.runtime import udn
try:
from spitfire.runtime import _template # pylint: disable=g-import-not-at-top
except ImportError:
_template = None
# NOTE: in some instances, this is faster than using cStringIO
# this is slightly counter intuitive and probably means there is more here than
# meets the eye.
class BufferIO(list):
write = list.append
def getvalue(self):
return ''.join(self)
class _BaseSpitfireTemplate(object):
# filter_function checks if the value should be filtered. If it is a
# SanitizedPlaceholder or the placeholder_function has a skip_filter
# annotation, there is no need to filter. Otherwise, call
# self._filter_function.
def filter_function(self, value, placeholder_function=None):
"""Checks if the value should be filtered.
If it is a SanitizedPlaceholder or the placeholder_function has
a skip_filter annotation, there is no need to filter. Otherwise, call
self._filter_function.
Args:
value: The value that may need to be filtered.
placeholder_function: If present and annotated, do not filter.
Returns:
value, filtered if necessary.
"""
if isinstance(value, baked.SanitizedPlaceholder):
return value
elif (placeholder_function is not None and
getattr(placeholder_function, 'skip_filter', False)):
return value
else:
return self._filter_function(value)
def get_spitfire_template_class(prefer_c_extension=True):
"""Returns an appropriate SpitfireTemplate class.
Args:
prefer_c_extension: If set True and _template loaded properly, use the
C extension's baseclass implementation.
Returns:
A SpitfireTemplate class with an appropriate base class.
"""
if prefer_c_extension and _template is not None:
baseclass = _template.BaseSpitfireTemplate
else:
baseclass = _BaseSpitfireTemplate
class _SpitfireTemplate(baseclass):
# store a reference to the filter function - this is tricky because of
# some python stuff. filter functions look like this:
#
# def filter_function(template_instance, value):
#
# when this is assigned to a template instance, accessing this name
# binds the function to the current instance. using the name
# 'template_instance' to indicate that these functions aren't really
# related to the template.
_filter_function = staticmethod(filters.simple_str_filter)
repeat = None
placeholder_cache = None
def __init__(self,
search_list=None,
default_filter=None,
use_placeholder_cache=False):
# use_placeholder_cache - cache the values returned from the
# search_list? The cached values will live for the lifetime of
# this object.
self.search_list = search_list
if use_placeholder_cache:
self.placeholder_cache = {}
if default_filter is not None:
self._filter_function = default_filter
# FIXME: repeater support is not needed most of the time, just
# disable it for the time being
# self.repeat = spitfire.runtime.repeater.RepeatTracker()
def get_var(self, name, default=None):
return udn.resolve_from_search_list(self.search_list, name, default)
def has_var(self, name):
var = self.get_var(name, default=runtime.UnresolvedPlaceholder)
return var is not runtime.UnresolvedPlaceholder
@staticmethod
def new_buffer():
return BufferIO()
return _SpitfireTemplate
SpitfireTemplate = get_spitfire_template_class()
def template_method(function):
function.template_method = True
function.skip_filter = True
return function
|
tests/test_router_register.py
|
ricfri/fastapi-users
| 660 |
65812
|
<reponame>ricfri/fastapi-users
from typing import Any, AsyncGenerator, Dict, cast
import httpx
import pytest
from fastapi import FastAPI, status
from fastapi_users.router import ErrorCode, get_register_router
from tests.conftest import User, UserCreate
@pytest.fixture
@pytest.mark.asyncio
async def test_app_client(
get_user_manager, get_test_client
) -> AsyncGenerator[httpx.AsyncClient, None]:
register_router = get_register_router(
get_user_manager,
User,
UserCreate,
)
app = FastAPI()
app.include_router(register_router)
async for client in get_test_client(app):
yield client
@pytest.mark.router
@pytest.mark.asyncio
class TestRegister:
async def test_empty_body(self, test_app_client: httpx.AsyncClient):
response = await test_app_client.post("/register", json={})
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
async def test_missing_email(self, test_app_client: httpx.AsyncClient):
json = {"password": "<PASSWORD>"}
response = await test_app_client.post("/register", json=json)
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
async def test_missing_password(self, test_app_client: httpx.AsyncClient):
json = {"email": "<EMAIL>"}
response = await test_app_client.post("/register", json=json)
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
async def test_wrong_email(self, test_app_client: httpx.AsyncClient):
json = {"email": "king.arthur", "password": "<PASSWORD>"}
response = await test_app_client.post("/register", json=json)
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
async def test_invalid_password(self, test_app_client: httpx.AsyncClient):
json = {"email": "<EMAIL>", "password": "g"}
response = await test_app_client.post("/register", json=json)
assert response.status_code == status.HTTP_400_BAD_REQUEST
data = cast(Dict[str, Any], response.json())
assert data["detail"] == {
"code": ErrorCode.REGISTER_INVALID_PASSWORD,
"reason": "Password should be at least 3 characters",
}
@pytest.mark.parametrize(
"email", ["<EMAIL>", "<EMAIL>"]
)
async def test_existing_user(self, email, test_app_client: httpx.AsyncClient):
json = {"email": email, "password": "<PASSWORD>"}
response = await test_app_client.post("/register", json=json)
assert response.status_code == status.HTTP_400_BAD_REQUEST
data = cast(Dict[str, Any], response.json())
assert data["detail"] == ErrorCode.REGISTER_USER_ALREADY_EXISTS
@pytest.mark.parametrize("email", ["<EMAIL>", "<EMAIL>"])
async def test_valid_body(self, email, test_app_client: httpx.AsyncClient):
json = {"email": email, "password": "<PASSWORD>"}
response = await test_app_client.post("/register", json=json)
assert response.status_code == status.HTTP_201_CREATED
data = cast(Dict[str, Any], response.json())
assert "hashed_password" not in data
assert "password" not in data
assert data["id"] is not None
async def test_valid_body_is_superuser(self, test_app_client: httpx.AsyncClient):
json = {
"email": "<EMAIL>",
"password": "<PASSWORD>",
"is_superuser": True,
}
response = await test_app_client.post("/register", json=json)
assert response.status_code == status.HTTP_201_CREATED
data = cast(Dict[str, Any], response.json())
assert data["is_superuser"] is False
async def test_valid_body_is_active(self, test_app_client: httpx.AsyncClient):
json = {
"email": "<EMAIL>",
"password": "<PASSWORD>",
"is_active": False,
}
response = await test_app_client.post("/register", json=json)
assert response.status_code == status.HTTP_201_CREATED
data = cast(Dict[str, Any], response.json())
assert data["is_active"] is True
@pytest.mark.asyncio
async def test_register_namespace(get_user_manager):
app = FastAPI()
app.include_router(
get_register_router(
get_user_manager,
User,
UserCreate,
)
)
assert app.url_path_for("register:register") == "/register"
|
peewee__examples/serialization__model_to_dict__dict_to_model/main.py
|
DazEB2/SimplePyScripts
| 117 |
65833
|
<reponame>DazEB2/SimplePyScripts<filename>peewee__examples/serialization__model_to_dict__dict_to_model/main.py<gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import datetime as DT
# pip install peewee
from peewee import *
from playhouse.shortcuts import model_to_dict, dict_to_model
db = SqliteDatabase(':memory:', pragmas={'foreign_keys': 1})
class BaseModel(Model):
class Meta:
database = db
class Person(BaseModel):
name = CharField()
birthday = DateField()
def __str__(self):
return f'Person(id={self.id} name={self.name!r} birthday={self.birthday} ' \
f'pets={", ".join(p.name for p in self.pets)!r})'
class Pet(BaseModel):
owner = ForeignKeyField(Person, backref='pets')
name = CharField()
animal_type = CharField()
def __str__(self):
return f'Pet(id={self.id} name={self.name!r} owner={self.owner.name!r} self.animal_type={self.animal_type!r})'
db.connect()
db.create_tables([Person, Pet])
person = Person.create(name='Ivan', birthday=DT.date.today())
Pet.create(owner=person, name='Oval', animal_type='Dog')
Pet.create(owner=person, name='Bortik', animal_type='Cat')
print(person)
# Person(id=1 name='Ivan' birthday=2020-01-09 pets='Oval, Bortik')
print()
data_backrefs_false = model_to_dict(person)
print(type(data_backrefs_false), data_backrefs_false)
# <class 'dict'> {'id': 1, 'name': 'Ivan', 'birthday': datetime.date(2020, 1, 9)}
data_backrefs_true = model_to_dict(person, backrefs=True)
print(type(data_backrefs_true), data_backrefs_true)
# <class 'dict'> {'id': 1, 'name': 'Ivan', 'birthday': datetime.date(2020, 1, 9),
# 'pets': [{'id': 1, 'name': 'Oval', 'animal_type': 'Dog'}, {'id': 2, 'name': 'Bortik', 'animal_type': 'Cat'}]}
print()
# Create another database and import this
db = SqliteDatabase('persons.sqlite', pragmas={'foreign_keys': 1})
Person._meta.database = db
Pet._meta.database = db
db.connect()
db.create_tables([Person, Pet])
Pet.truncate_table()
Person.truncate_table()
#
person = dict_to_model(Person, data_backrefs_false)
print(person)
print(list(person.pets))
print(list(Pet.select()))
# Person(id=1 name='Ivan' birthday=2020-01-09 pets='')
# []
# []
print()
person = dict_to_model(Person, data_backrefs_true)
print(person)
person.save(force_insert=True) # Must-have .save( and force_insert=True
print(list(person.pets))
for p in person.pets:
p.save(force_insert=True)
print(list(Pet.select()))
# Person(id=1 name='Ivan' birthday=2020-01-09 pets='Oval, Bortik')
# [<Pet: Pet(id=1 name='Oval' owner='Ivan' self.animal_type='Dog')>, <Pet: Pet(id=2 name='Bortik' owner='Ivan' self.animal_type='Cat')>]
# [<Pet: Pet(id=1 name='Oval' owner='Ivan' self.animal_type='Dog')>, <Pet: Pet(id=2 name='Bortik' owner='Ivan' self.animal_type='Cat')>]
|
voc_loader.py
|
abhi-kumar/blitznet
| 331 |
65845
|
import logging
import os
import numpy as np
import xml.etree.ElementTree as ET
from PIL import Image
from paths import DATASETS_ROOT
log = logging.getLogger()
VOC_CATS = ['__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor']
class VOCLoader():
def __init__(self, year, split, segmentation=False, augmented_seg=False):
assert year in ['07', '12']
self.dataset = 'voc'
self.year = year
self.root = os.path.join(DATASETS_ROOT, 'VOCdevkit/VOC20%s/' % year)
self.split = split
assert split in ['train', 'val', 'trainval', 'test']
cats = VOC_CATS
self.cats_to_ids = dict(map(reversed, enumerate(cats)))
self.ids_to_cats = dict(enumerate(cats))
self.num_classes = len(cats)
self.categories = cats[1:]
self.segmentation = segmentation
self.augmented_seg = augmented_seg
assert not self.segmentation or self.segmentation and self.year == '12'
if self.augmented_seg:
filelist = 'ImageSets/SegmentationAug/%s.txt'
elif self.segmentation:
filelist = 'ImageSets/Segmentation/%s.txt'
else:
filelist = 'ImageSets/Main/%s.txt'
with open(os.path.join(self.root, filelist % self.split), 'r') as f:
self.filenames = f.read().split('\n')[:-1]
log.info("Created a loader VOC%s %s with %i images" % (year, split, len(self.filenames)))
def load_image(self, name):
im = Image.open('%sJPEGImages/%s.jpg' % (self.root, name)).convert('RGB')
im = np.array(im) / 255.0
im = im.astype(np.float32)
return im
def get_filenames(self):
return self.filenames
def read_annotations(self, name):
bboxes = []
cats = []
tree = ET.parse('%sAnnotations/%s.xml' % (self.root, name))
root = tree.getroot()
width = int(root.find('size/width').text)
height = int(root.find('size/height').text)
difficulty = []
for obj in root.findall('object'):
cat = self.cats_to_ids[obj.find('name').text]
difficult = (int(obj.find('difficult').text) != 0)
difficulty.append(difficult)
cats.append(cat)
bbox_tag = obj.find('bndbox')
x = int(bbox_tag.find('xmin').text)
y = int(bbox_tag.find('ymin').text)
w = int(bbox_tag.find('xmax').text)-x
h = int(bbox_tag.find('ymax').text)-y
bboxes.append((x, y, w, h))
gt_cats = np.array(cats)
gt_bboxes = np.array(bboxes).reshape((len(bboxes), 4))
difficulty = np.array(difficulty)
seg_gt = self.read_segmentations(name, height, width)
output = gt_bboxes, seg_gt, gt_cats, width, height, difficulty
return output
def read_segmentations(self, name, height, width):
if self.segmentation:
try:
seg_folder = self.root + 'SegmentationClass/'
seg_file = seg_folder + name + '.png'
seg_map = Image.open(seg_file)
except:
assert self.augmented_seg
seg_folder = self.root + 'SegmentationClassAug/'
seg_file = seg_folder + name + '.png'
seg_map = Image.open(seg_file)
segmentation = np.array(seg_map, dtype=np.uint8)
else:
# if there is no segmentation for a particular image we fill the mask
# with zeros to keep the same amount of tensors but don't learn from it
segmentation = np.zeros([height, width], dtype=np.uint8) + 255
return segmentation
|
src/skmultiflow/_demos/_test_adwin.py
|
denisesato/scikit-multiflow
| 663 |
65846
|
import numpy as np
from skmultiflow.drift_detection import ADWIN
def demo():
""" _test_adwin
In this demo, an ADWIN object evaluates a sequence of numbers corresponding to 2 distributions.
The ADWIN object indicates the indices where change is detected.
The first half of the data is a sequence of randomly generated 0's and 1's.
The second half of the data is a normal distribution of integers from 0 to 7.
"""
adwin = ADWIN()
size = 2000
change_start = 999
np.random.seed(1)
data_stream = np.random.randint(2, size=size)
data_stream[change_start:] = np.random.randint(8, size=size-change_start)
for i in range(size):
adwin.add_element(data_stream[i])
if adwin.detected_change():
print('Change has been detected in data: ' + str(data_stream[i]) + ' - of index: ' + str(i))
if __name__ == '__main__':
demo()
|
paddlespeech/cli/stats/infer.py
|
jerryuhoo/PaddleSpeech
| 1,379 |
65864
|
<reponame>jerryuhoo/PaddleSpeech<gh_stars>1000+
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from typing import List
from prettytable import PrettyTable
from ..utils import cli_register
from ..utils import stats_wrapper
__all__ = ['StatsExecutor']
model_name_format = {
'asr': 'Model-Language-Sample Rate',
'cls': 'Model-Sample Rate',
'st': 'Model-Source language-Target language',
'text': 'Model-Task-Language',
'tts': 'Model-Language',
'vector': 'Model-Sample Rate'
}
@cli_register(
name='paddlespeech.stats',
description='Get speech tasks support models list.')
class StatsExecutor():
def __init__(self):
super().__init__()
self.parser = argparse.ArgumentParser(
prog='paddlespeech.stats', add_help=True)
self.task_choices = ['asr', 'cls', 'st', 'text', 'tts', 'vector']
self.parser.add_argument(
'--task',
type=str,
default='asr',
choices=self.task_choices,
help='Choose speech task.',
required=True)
def show_support_models(self, pretrained_models: dict):
fields = model_name_format[self.task].split("-")
table = PrettyTable(fields)
for key in pretrained_models:
table.add_row(key.split("-"))
print(table)
def execute(self, argv: List[str]) -> bool:
"""
Command line entry.
"""
parser_args = self.parser.parse_args(argv)
has_exceptions = False
try:
self(parser_args.task)
except Exception as e:
has_exceptions = True
if has_exceptions:
return False
else:
return True
@stats_wrapper
def __call__(
self,
task: str=None, ):
"""
Python API to call an executor.
"""
self.task = task
if self.task not in self.task_choices:
print("Please input correct speech task, choices = " + str(
self.task_choices))
elif self.task == 'asr':
try:
from ..asr.pretrained_models import pretrained_models
print(
"Here is the list of ASR pretrained models released by PaddleSpeech that can be used by command line and python API"
)
self.show_support_models(pretrained_models)
except BaseException:
print("Failed to get the list of ASR pretrained models.")
elif self.task == 'cls':
try:
from ..cls.pretrained_models import pretrained_models
print(
"Here is the list of CLS pretrained models released by PaddleSpeech that can be used by command line and python API"
)
self.show_support_models(pretrained_models)
except BaseException:
print("Failed to get the list of CLS pretrained models.")
elif self.task == 'st':
try:
from ..st.pretrained_models import pretrained_models
print(
"Here is the list of ST pretrained models released by PaddleSpeech that can be used by command line and python API"
)
self.show_support_models(pretrained_models)
except BaseException:
print("Failed to get the list of ST pretrained models.")
elif self.task == 'text':
try:
from ..text.pretrained_models import pretrained_models
print(
"Here is the list of TEXT pretrained models released by PaddleSpeech that can be used by command line and python API"
)
self.show_support_models(pretrained_models)
except BaseException:
print("Failed to get the list of TEXT pretrained models.")
elif self.task == 'tts':
try:
from ..tts.pretrained_models import pretrained_models
print(
"Here is the list of TTS pretrained models released by PaddleSpeech that can be used by command line and python API"
)
self.show_support_models(pretrained_models)
except BaseException:
print("Failed to get the list of TTS pretrained models.")
elif self.task == 'vector':
try:
from ..vector.pretrained_models import pretrained_models
print(
"Here is the list of Speaker Recognition pretrained models released by PaddleSpeech that can be used by command line and python API"
)
self.show_support_models(pretrained_models)
except BaseException:
print(
"Failed to get the list of Speaker Recognition pretrained models."
)
|
tools/Vitis-AI-Quantizer/vai_q_pytorch/nndct_shared/utils/registry.py
|
hito0512/Vitis-AI
| 848 |
65900
|
#
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Registry mechanism for "registering" classes/functions for general use.
This is typically used with a decorator that calls Register for adding
a class or function to a registry.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class Registry(object):
"""Provides a registry for saving objects."""
def __init__(self, name):
"""Creates a new registry."""
self._name = name
self._registry = {}
def register(self, obj, name=None):
"""Registers a Python object "obj" for the given "name".
Args:
obj: The object to add to the registry.
name: An optional string specifying the registry key for the obj.
If None, obj.__name__ will be used.
Raises:
KeyError: If same name is registered twice.
"""
if not name:
name = obj.__name__
if name in self._registry:
raise KeyError("Name '%s' has been registered in '%s'!" %
(name, self._name))
# logging.vlog(1, "Registering %s (%s) in %s.", name, obj, self._name)
self._registry[name] = obj
def list(self):
"""Lists registered items.
Returns:
A list of names of registered objects.
"""
return self._registry.keys()
def lookup(self, name):
"""Looks up "name".
Args:
name: a string specifying the registry key for the obj.
Returns:
Registered object if found
Raises:
LookupError: if "name" has not been registered.
"""
if name in self._registry:
return self._registry[name]
else:
raise LookupError("%s registry has no entry for: %s" % (self._name, name))
|
aliyun-python-sdk-hbase/aliyunsdkhbase/request/v20190101/CreateClusterRequest.py
|
yndu13/aliyun-openapi-python-sdk
| 1,001 |
65901
|
<filename>aliyun-python-sdk-hbase/aliyunsdkhbase/request/v20190101/CreateClusterRequest.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkhbase.endpoint import endpoint_data
class CreateClusterRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'HBase', '2019-01-01', 'CreateCluster','hbase')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ClusterName(self):
return self.get_query_params().get('ClusterName')
def set_ClusterName(self,ClusterName):
self.add_query_param('ClusterName',ClusterName)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_EngineVersion(self):
return self.get_query_params().get('EngineVersion')
def set_EngineVersion(self,EngineVersion):
self.add_query_param('EngineVersion',EngineVersion)
def get_ResourceGroupId(self):
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self,ResourceGroupId):
self.add_query_param('ResourceGroupId',ResourceGroupId)
def get_Engine(self):
return self.get_query_params().get('Engine')
def set_Engine(self,Engine):
self.add_query_param('Engine',Engine)
def get_AutoRenewPeriod(self):
return self.get_query_params().get('AutoRenewPeriod')
def set_AutoRenewPeriod(self,AutoRenewPeriod):
self.add_query_param('AutoRenewPeriod',AutoRenewPeriod)
def get_Period(self):
return self.get_query_params().get('Period')
def set_Period(self,Period):
self.add_query_param('Period',Period)
def get_DiskSize(self):
return self.get_query_params().get('DiskSize')
def set_DiskSize(self,DiskSize):
self.add_query_param('DiskSize',DiskSize)
def get_EncryptionKey(self):
return self.get_query_params().get('EncryptionKey')
def set_EncryptionKey(self,EncryptionKey):
self.add_query_param('EncryptionKey',EncryptionKey)
def get_MasterInstanceType(self):
return self.get_query_params().get('MasterInstanceType')
def set_MasterInstanceType(self,MasterInstanceType):
self.add_query_param('MasterInstanceType',MasterInstanceType)
def get_DiskType(self):
return self.get_query_params().get('DiskType')
def set_DiskType(self,DiskType):
self.add_query_param('DiskType',DiskType)
def get_VSwitchId(self):
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self,VSwitchId):
self.add_query_param('VSwitchId',VSwitchId)
def get_SecurityIPList(self):
return self.get_query_params().get('SecurityIPList')
def set_SecurityIPList(self,SecurityIPList):
self.add_query_param('SecurityIPList',SecurityIPList)
def get_ColdStorageSize(self):
return self.get_query_params().get('ColdStorageSize')
def set_ColdStorageSize(self,ColdStorageSize):
self.add_query_param('ColdStorageSize',ColdStorageSize)
def get_PeriodUnit(self):
return self.get_query_params().get('PeriodUnit')
def set_PeriodUnit(self,PeriodUnit):
self.add_query_param('PeriodUnit',PeriodUnit)
def get_CoreInstanceType(self):
return self.get_query_params().get('CoreInstanceType')
def set_CoreInstanceType(self,CoreInstanceType):
self.add_query_param('CoreInstanceType',CoreInstanceType)
def get_VpcId(self):
return self.get_query_params().get('VpcId')
def set_VpcId(self,VpcId):
self.add_query_param('VpcId',VpcId)
def get_NodeCount(self):
return self.get_query_params().get('NodeCount')
def set_NodeCount(self,NodeCount):
self.add_query_param('NodeCount',NodeCount)
def get_ZoneId(self):
return self.get_query_params().get('ZoneId')
def set_ZoneId(self,ZoneId):
self.add_query_param('ZoneId',ZoneId)
def get_PayType(self):
return self.get_query_params().get('PayType')
def set_PayType(self,PayType):
self.add_query_param('PayType',PayType)
|
leetcode.com/python/876_Middle_of_the_Linked_List.py
|
vansh-tiwari/coding-interview-gym
| 713 |
65911
|
<gh_stars>100-1000
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def middleNode(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head:
return None
fastRuner, slowRuner = head, head
while fastRuner and fastRuner.next:
fastRuner = fastRuner.next.next
slowRuner = slowRuner.next
return slowRuner
|
DQM/SiPixelPhase1Track/python/SiPixelPhase1TrackEfficiency_cfi.py
|
malbouis/cmssw
| 852 |
65919
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
from DQM.SiPixelPhase1Common.HistogramManager_cfi import *
import DQM.SiPixelPhase1Common.TriggerEventFlag_cfi as trigger
SiPixelPhase1TrackEfficiencyValid = DefaultHistoTrack.clone(
name = "valid",
title = "Valid Hits",
range_min = 0, range_max = 50, range_nbins = 50,
xlabel = "valid hits",
dimensions = 0,
specs = VPSet(
StandardSpecifications1D_Num,
#StandardSpecification2DProfile_Num, #for this we have the on track clusters map (i.e the same thing)
Specification().groupBy("PXBarrel/PXLayer/Event") #this will produce inclusive counts per Layer/Disk
.reduce("COUNT")
.groupBy("PXBarrel/PXLayer")
.save(nbins=50, xmin=0, xmax=1500),
Specification().groupBy("PXForward/PXDisk/Event")
.reduce("COUNT")
.groupBy("PXForward/PXDisk/")
.save(nbins=50, xmin=0, xmax=1500),
)
)
SiPixelPhase1TrackEfficiencyInactive = DefaultHistoTrack.clone(
name = "inactive",
title = "Inactive Hits",
xlabel = "inactive hits",
range_min = 0, range_max = 25, range_nbins = 25,
dimensions = 0,
specs = VPSet(
StandardSpecification2DProfile_Num,
Specification().groupBy("PXBarrel/PXLayer/Event") #this will produce inclusive counts per Layer/Disk
.reduce("COUNT")
.groupBy("PXBarrel/PXLayer")
.save(nbins=50, xmin=0, xmax=100),
Specification().groupBy("PXForward/PXDisk/Event")
.reduce("COUNT")
.groupBy("PXForward/PXDisk/")
.save(nbins=50, xmin=0, xmax=100),
)
)
SiPixelPhase1TrackEfficiencyMissing = DefaultHistoTrack.clone(
name = "missing",
title = "Missing Hits",
range_min = 0, range_max = 25, range_nbins = 25,
xlabel = "missing hits",
dimensions = 0,
specs = VPSet(
StandardSpecifications1D_Num,
StandardSpecification2DProfile_Num,
Specification().groupBy("PXBarrel/PXLayer/Event") #this will produce inclusive counts per Layer/Disk
.reduce("COUNT")
.groupBy("PXBarrel/PXLayer")
.save(nbins=50, xmin=0, xmax=100),
Specification().groupBy("PXForward/PXDisk/Event")
.reduce("COUNT")
.groupBy("PXForward/PXDisk/")
.save(nbins=50, xmin=0, xmax=100),
)
)
SiPixelPhase1TrackEfficiencyEfficiency = SiPixelPhase1TrackEfficiencyValid.clone(
name = "hitefficiency",
title = "Hit Efficiency",
xlabel = "#valid/(#valid+#missing)",
dimensions = 1,
specs = VPSet(
StandardSpecification2DProfile,
#profiles per layer and shell
Specification(PerLadder).groupBy("PXBarrel/Shell/PXLayer/SignedLadder")
.reduce("MEAN")
.groupBy("PXBarrel/Shell/PXLayer", "EXTEND_X")
.save(),
Specification(PerLadder).groupBy("PXForward/HalfCylinder/PXRing/PXDisk/SignedBlade")
.reduce("MEAN")
.groupBy("PXForward/HalfCylinder/PXRing/PXDisk", "EXTEND_X")
.save(),
#per layer
Specification().groupBy("PXBarrel/PXLayer")
.reduce("MEAN")
.groupBy("PXBarrel", "EXTEND_X")
.save(),
Specification().groupBy("PXForward/PXDisk")
.reduce("MEAN")
.groupBy("PXForward", "EXTEND_X")
.save(),
Specification(PerLayer2D)
.groupBy("PXBarrel/PXLayer/Lumisection")
.groupBy("PXBarrel/PXLayer", "EXTEND_X")
.groupBy("PXBarrel", "EXTEND_Y")
.reduce("MEAN")
.save(),
Specification(PerLayer2D)
.groupBy("PXForward/PXDisk/Lumisection")
.groupBy("PXForward/PXDisk", "EXTEND_X")
.groupBy("PXForward", "EXTEND_Y")
.reduce("MEAN")
.save(),
)
)
SiPixelPhase1TrackEfficiencyVertices= DefaultHistoTrack.clone(
name = "num_vertices",
title = "PrimaryVertices",
xlabel= "# Vertices",
dimensions = 1,
range_min = -0.5,
range_max = 100.5,
range_nbins =101,
specs = VPSet(
Specification().groupBy("")
.save(),
Specification().groupBy("/Lumisection")
.reduce("MEAN")
.groupBy("","EXTEND_X")
.save()
)
)
from Configuration.Eras.Modifier_run3_common_cff import run3_common
run3_common.toModify(SiPixelPhase1TrackEfficiencyVertices, range_max = 150.5, range_nbins=151)
SiPixelPhase1TrackEfficiencyConf = cms.VPSet(
SiPixelPhase1TrackEfficiencyValid,
SiPixelPhase1TrackEfficiencyMissing,
SiPixelPhase1TrackEfficiencyInactive,
SiPixelPhase1TrackEfficiencyEfficiency,
SiPixelPhase1TrackEfficiencyVertices
)
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
SiPixelPhase1TrackEfficiencyAnalyzer = DQMEDAnalyzer('SiPixelPhase1TrackEfficiency',
clusters = cms.InputTag("siPixelClusters"),
tracks = cms.InputTag("generalTracks"),
trajectoryInput = cms.InputTag("refittedForPixelDQM"),
primaryvertices = cms.InputTag("offlinePrimaryVertices"),
tracker = cms.InputTag("MeasurementTrackerEvent"),
histograms = SiPixelPhase1TrackEfficiencyConf,
geometry = SiPixelPhase1Geometry,
triggerflags = trigger.SiPixelPhase1Triggers,
VertexCut = cms.untracked.bool(True)
)
SiPixelPhase1TrackEfficiencyHarvester = DQMEDHarvester("SiPixelPhase1Harvester",
histograms = SiPixelPhase1TrackEfficiencyConf,
geometry = SiPixelPhase1Geometry
)
|
util/tlgen/elaborate.py
|
asb/opentitan
| 1,375 |
65938
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import logging as log
from .item import Node, NodeType
from .xbar import Xbar
def elaborate(xbar: Xbar) -> bool:
"""elaborate reads all nodes and edges then
construct internal FIFOs, Sockets.
"""
# Condition check
if len(xbar.nodes) <= 1 or len(xbar.edges) == 0:
log.error(
"# of Nodes is less than 2 or no Edge exists. Cannot proceed.")
return False
for host in xbar.hosts:
process_node(host, xbar)
log.info("Node Processed: " + repr(xbar))
# Pipeline
process_pipeline(xbar)
# Build address map
# Each socket_1n should have address map
return True
def process_node(node, xbar): # node: Node -> xbar: Xbar -> Xbar
"""process each node based on algorithm
1. If a node has different clock from main clock and not ASYNC_FIFO:
a. (New Node) Create ASYNC_FIFO node.
b. Revise every edges from the node to have start node as ASYNC_FIFO
node. (New Edge) create a edge from the node to ASYNC_FIFO node.
- Repeat the algorithm with ASYNC_FIFO node.
c. Revise every edges to the node to have end node as ASYNC_FIFO
node. (New Edge) create a edge from ASYNC_FIFO node to the node.
d. If it is not DEVICE, HOST node, raise Error. If it is DEVICE, end
(next item).
2. If a node has multiple edges having it as a end node and not SOCKET_M1:
a. (New node) Create SOCKET_M1 node.
b. Revise every edges to the node to have SOCKET_M1 node as end node.
c. (New Edge) create a edge from SOCKET_M1 to the node.
d. Repeat the algorithm with the node.
3. If a node has multiple edges having it as a start node and not SOCKET_1N:
a. (New node) Create SOCKET_1N node.
b. Revise every edges from the node to have SOCKET_1N node as start node.
c. (New Edge) Create a edge from the node to SOCKET_1N node.
d. (for loop) Repeat the algorithm with SOCKET_1N's other side node.
"""
# If a node has different clock from main clock and not ASYNC_FIFO:
if node.node_type != NodeType.ASYNC_FIFO and node.clocks[0] != xbar.clock:
# (New Node) Create ASYNC_FIFO node
new_node = Node(name="asf_" + str(len(xbar.nodes)),
node_type=NodeType.ASYNC_FIFO,
clock=xbar.clock,
reset=xbar.reset)
# if node is HOST, host clock synchronizes into xbar domain
# if node is DEVICE, xbar synchronizes into device clock domain
if node.node_type == NodeType.HOST:
new_node.clocks.insert(0, node.clocks[0])
new_node.resets.insert(0, node.resets[0])
else:
new_node.clocks.append(node.clocks[0])
new_node.resets.append(node.resets[0])
xbar.insert_node(new_node, node)
process_node(new_node, xbar)
# If a node has multiple edges having it as a end node and not SOCKET_M1:
elif node.node_type != NodeType.SOCKET_M1 and len(node.us) > 1:
# (New node) Create SOCKET_M1 node
new_node = Node(name="sm1_" + str(len(xbar.nodes)),
node_type=NodeType.SOCKET_M1,
clock=xbar.clock,
reset=xbar.reset)
# By default, assume connecting to SOCKET_1N upstream and bypass all FIFOs
# If upstream requires pipelining, it will be added through process pipeline
new_node.hdepth = 0
new_node.hpass = 2**len(node.us) - 1
new_node.ddepth = 0
new_node.dpass = 1
xbar.insert_node(new_node, node)
process_node(new_node, xbar)
# If a node has multiple edges having it as a start node and not SOCKET_1N:
elif node.node_type != NodeType.SOCKET_1N and len(node.ds) > 1:
# (New node) Create SOCKET_1N node
new_node = Node(name="s1n_" + str(len(xbar.nodes)),
node_type=NodeType.SOCKET_1N,
clock=xbar.clock,
reset=xbar.reset)
# By default, assume connecting to SOCKET_M1 downstream and bypass all FIFOs
# If upstream requires pipelining, it will be added through process pipeline
new_node.hdepth = 0
new_node.hpass = 1
new_node.ddepth = 0
new_node.dpass = 2**len(node.ds) - 1
xbar.insert_node(new_node, node)
# (for loop) Repeat the algorithm with SOCKET_1N's other side node
for edge in new_node.ds:
process_node(edge.ds, xbar)
return xbar
def process_pipeline(xbar):
"""Check if HOST, DEVICE has settings different from default, then propagate it to end
"""
for host in xbar.hosts:
# go downstream and change the HReqPass/Depth at the first instance.
# If it is async, skip.
# If Socket 1N,
# if pipeline True and bypass false, set hpass to 0
# if pipeline is False, set depth to 0
# If Socket M1, find position of the host and follow procedure above
# If it is device, it means host and device are directly connected. Ignore now.
log.info("Processing pipeline for host {}".format(host.name))
# FIFO present with no passthrough option
# FIFO present with passthrough option
# FIFO not present and full passthrough
full_fifo = False
fifo_passthru = False
full_passthru = True
if host.pipeline is True and host.pipeline_byp is False:
full_fifo = True
elif host.pipeline is True and host.pipeline_byp is True:
fifo_passthru = True
elif host.pipeline is False:
full_passthru = True
dnode = host.ds[0].ds
if dnode.node_type == NodeType.ASYNC_FIFO:
continue
if dnode.node_type == NodeType.SOCKET_1N:
if full_fifo:
dnode.hpass = 0
dnode.hdepth = 2
elif fifo_passthru:
dnode.hpass = 0
dnode.hdepth = 2
elif full_passthru:
dnode.hpass = 1
dnode.hdepth = 0
log.info(
"Finished processing socket1n {}, pass={}, depth={}".format(
dnode.name, dnode.hpass, dnode.hdepth))
elif dnode.node_type == NodeType.SOCKET_M1:
idx = dnode.us.index(host.ds[0])
if full_fifo:
log.info("fifo present no bypass")
dnode.hpass = dnode.hpass & ~(1 << idx)
dnode.hdepth = dnode.hdepth | (2 << idx * 4)
elif fifo_passthru:
log.info("fifo present with bypass")
dnode.hpass = dnode.hpass | (1 << idx)
dnode.hdepth = dnode.hdepth | (2 << idx * 4)
elif full_passthru:
log.info("fifo not present")
dnode.hpass = dnode.hpass | (1 << idx)
dnode.hdepth = dnode.hdepth & ~(0xF << idx * 4)
log.info(
"Finished processing socketm1 {}, pass={}, depth={}".format(
dnode.name, dnode.hpass, dnode.hdepth))
for device in xbar.devices:
# go upstream and set DReq/RspPass at the first instance.
# If it is async, skip
# If Socket M1
# If pipeline True and bypass False, set dpass to 0
# If pipeline False, set depth to 0
# If Socket 1N, find position of the device and follow procedure above
# If it is host, ignore
log.info("Processing pipeline for device {}".format(device.name))
# FIFO present with no passthrough option
# FIFO present with passthrough option
# FIFO not present and full passthrough
full_fifo = False
fifo_passthru = False
full_passthru = True
if device.pipeline is True and device.pipeline_byp is False:
full_fifo = True
elif device.pipeline is True and device.pipeline_byp is True:
fifo_passthru = True
elif device.pipeline is False:
full_passthru = True
unode = device.us[0].us
if unode.node_type == NodeType.ASYNC_FIFO:
continue
if unode.node_type == NodeType.SOCKET_1N:
idx = unode.ds.index(device.us[0])
if full_fifo:
unode.dpass = unode.dpass & ~(1 << idx)
unode.ddepth = unode.ddepth | (2 << idx * 4)
elif fifo_passthru:
unode.dpass = unode.dpass | (1 << idx)
unode.ddepth = unode.ddepth | (2 << idx * 4)
elif full_passthru:
unode.dpass = unode.dpass | (1 << idx)
unode.ddepth = unode.ddepth & ~(0xF << idx * 4)
log.info("Finished processing socket1n {}, pass={:x}, depth={:x}".
format(unode.name, unode.dpass, unode.ddepth))
elif unode.node_type == NodeType.SOCKET_M1:
if full_fifo:
log.info("Fifo present with no passthrough")
unode.dpass = 0
unode.ddepth = 2
elif fifo_passthru:
log.info("Fifo present with passthrough")
unode.dpass = 0
unode.ddepth = 2
elif full_passthru:
log.info("No Fifo")
unode.dpass = 1
unode.ddepth = 0
log.info("Finished processing socketm1 {}, pass={:x}, depth={:x}".
format(unode.name, unode.dpass, unode.ddepth))
return xbar
|
lassie/compat.py
|
idlesign/lassie
| 354 |
65958
|
<reponame>idlesign/lassie<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
lassie.compat
~~~~~~~~~~~~~
This module contains imports and declarations for seamless Python 2 and
Python 3 compatibility.
"""
import sys
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
if is_py2:
from urlparse import urljoin, urlparse
str = unicode
elif is_py3:
from urllib.parse import urljoin, urlparse
str = str
|
photutils/detection/irafstarfinder.py
|
rosteen/photutils
| 167 |
66003
|
<gh_stars>100-1000
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements the IRAFStarFinder class.
"""
import inspect
import warnings
from astropy.nddata import extract_array
from astropy.table import QTable
from astropy.utils import lazyproperty
import numpy as np
from .core import StarFinderBase, _StarFinderKernel
from ..utils._convolution import _filter_data
from ..utils._misc import _get_version_info
from ..utils._moments import _moments, _moments_central
from ..utils.exceptions import NoDetectionsWarning
__all__ = ['IRAFStarFinder']
class IRAFStarFinder(StarFinderBase):
"""
Detect stars in an image using IRAF's "starfind" algorithm.
`IRAFStarFinder` searches images for local density maxima that have
a peak amplitude greater than ``threshold`` above the local
background and have a PSF full-width at half-maximum similar to the
input ``fwhm``. The objects' centroid, roundness (ellipticity), and
sharpness are calculated using image moments.
Parameters
----------
threshold : float
The absolute image value above which to select sources.
fwhm : float
The full-width half-maximum (FWHM) of the 2D circular Gaussian
kernel in units of pixels.
minsep_fwhm : float, optional
The minimum separation for detected objects in units of
``fwhm``.
sigma_radius : float, optional
The truncation radius of the Gaussian kernel in units of sigma
(standard deviation) [``1 sigma = FWHM /
2.0*sqrt(2.0*log(2.0))``].
sharplo : float, optional
The lower bound on sharpness for object detection.
sharphi : float, optional
The upper bound on sharpness for object detection.
roundlo : float, optional
The lower bound on roundness for object detection.
roundhi : float, optional
The upper bound on roundness for object detection.
sky : float, optional
The background sky level of the image. Inputing a ``sky`` value
will override the background sky estimate. Setting ``sky``
affects only the output values of the object ``peak``, ``flux``,
and ``mag`` values. The default is ``None``, which means the
sky value will be estimated using the `starfind`_ method.
exclude_border : bool, optional
Set to `True` to exclude sources found within half the size of
the convolution kernel from the image borders. The default is
`False`, which is the mode used by `starfind`_.
brightest : int, None, optional
Number of brightest objects to keep after sorting the full object list.
If ``brightest`` is set to `None`, all objects will be selected.
peakmax : float, None, optional
Maximum peak pixel value in an object. Only objects whose peak pixel
values are *strictly smaller* than ``peakmax`` will be selected.
This may be used to exclude saturated sources. By default, when
``peakmax`` is set to `None`, all objects will be selected.
.. warning::
`IRAFStarFinder` automatically excludes objects whose peak
pixel values are negative. Therefore, setting ``peakmax`` to a
non-positive value would result in exclusion of all objects.
xycoords : `None` or Nx2 `~numpy.ndarray`
The (x, y) pixel coordinates of the approximate centroid
positions of identified sources. If ``xycoords`` are input, the
algorithm will skip the source-finding step.
Notes
-----
For the convolution step, this routine sets pixels beyond the image
borders to 0.0. The equivalent parameters in IRAF's `starfind`_ are
``boundary='constant'`` and ``constant=0.0``.
IRAF's `starfind`_ uses ``hwhmpsf``, ``fradius``, and ``sepmin`` as
input parameters. The equivalent input values for
`IRAFStarFinder` are:
* ``fwhm = hwhmpsf * 2``
* ``sigma_radius = fradius * sqrt(2.0*log(2.0))``
* ``minsep_fwhm = 0.5 * sepmin``
The main differences between `~photutils.detection.DAOStarFinder`
and `~photutils.detection.IRAFStarFinder` are:
* `~photutils.detection.IRAFStarFinder` always uses a 2D
circular Gaussian kernel, while
`~photutils.detection.DAOStarFinder` can use an elliptical
Gaussian kernel.
* `~photutils.detection.IRAFStarFinder` calculates the objects'
centroid, roundness, and sharpness using image moments.
See Also
--------
DAOStarFinder
References
----------
.. [1] https://iraf.net/irafhelp.php?val=starfind
.. _starfind: https://iraf.net/irafhelp.php?val=starfind
"""
def __init__(self, threshold, fwhm, sigma_radius=1.5, minsep_fwhm=2.5,
sharplo=0.5, sharphi=2.0, roundlo=0.0, roundhi=0.2, sky=None,
exclude_border=False, brightest=None, peakmax=None,
xycoords=None):
if not np.isscalar(threshold):
raise TypeError('threshold must be a scalar value.')
if not np.isscalar(fwhm):
raise TypeError('fwhm must be a scalar value.')
self.threshold = threshold
self.fwhm = fwhm
self.sigma_radius = sigma_radius
self.minsep_fwhm = minsep_fwhm
self.sharplo = sharplo
self.sharphi = sharphi
self.roundlo = roundlo
self.roundhi = roundhi
self.sky = sky
self.exclude_border = exclude_border
self.brightest = self._validate_brightest(brightest)
self.peakmax = peakmax
if xycoords is not None:
xycoords = np.asarray(xycoords)
if xycoords.ndim != 2 or xycoords.shape[1] != 2:
raise ValueError('xycoords must be shaped as a Nx2 array')
self.xycoords = xycoords
self.kernel = _StarFinderKernel(self.fwhm, ratio=1.0, theta=0.0,
sigma_radius=self.sigma_radius)
self.min_separation = max(2, int((self.fwhm * self.minsep_fwhm) + 0.5))
@staticmethod
def _validate_brightest(brightest):
if brightest is not None:
if brightest <= 0:
raise ValueError('brightest must be >= 0')
bright_int = int(brightest)
if bright_int != brightest:
raise ValueError('brightest must be an integer')
brightest = bright_int
return brightest
def _get_raw_catalog(self, data, mask=None):
convolved_data = _filter_data(data, self.kernel.data, mode='constant',
fill_value=0.0,
check_normalization=False)
if self.xycoords is None:
xypos = self._find_stars(convolved_data, self.kernel,
self.threshold,
min_separation=self.min_separation,
mask=mask,
exclude_border=self.exclude_border)
else:
xypos = self.xycoords
if xypos is None:
warnings.warn('No sources were found.', NoDetectionsWarning)
return None
cat = _IRAFStarFinderCatalog(data, convolved_data, xypos, self.kernel,
sky=self.sky, sharplo=self.sharplo,
sharphi=self.sharphi,
roundlo=self.roundlo,
roundhi=self.roundhi,
brightest=self.brightest,
peakmax=self.peakmax)
return cat
def find_stars(self, data, mask=None):
"""
Find stars in an astronomical image.
Parameters
----------
data : 2D array_like
The 2D image array.
mask : 2D bool array, optional
A boolean mask with the same shape as ``data``, where a
`True` value indicates the corresponding element of ``data``
is masked. Masked pixels are ignored when searching for
stars.
Returns
-------
table : `~astropy.table.QTable` or `None`
A table of found objects with the following parameters:
* ``id``: unique object identification number.
* ``xcentroid, ycentroid``: object centroid.
* ``fwhm``: object FWHM.
* ``sharpness``: object sharpness.
* ``roundness``: object roundness.
* ``pa``: object position angle (degrees counter clockwise from
the positive x axis).
* ``npix``: the total number of (positive) unmasked pixels.
* ``sky``: the local ``sky`` value.
* ``peak``: the peak, sky-subtracted, pixel value of the object.
* ``flux``: the object instrumental flux.
* ``mag``: the object instrumental magnitude calculated as
``-2.5 * log10(flux)``.
`None` is returned if no stars are found.
"""
cat = self._get_raw_catalog(data, mask=mask)
if cat is None:
return None
# apply all selection filters
cat = cat.apply_all_filters()
if cat is None:
return None
# create the output table
return cat.to_table()
class _IRAFStarFinderCatalog:
"""
Class to create a catalog of the properties of each detected star,
as defined by IRAF's ``starfind`` task.
Parameters
----------
data : 2D `~numpy.ndarray`
The 2D image.
convolved_data : 2D `~numpy.ndarray`
The convolved 2D image.
xypos: Nx2 `numpy.ndarray`
A Nx2 array of (x, y) pixel coordinates denoting the central
positions of the stars.
kernel : `_StarFinderKernel`
The convolution kernel. This kernel must match the kernel used
to create the ``convolved_data``.
sky : `None` or float, optional
The local sky level around the source. If sky is ``None``, then
a local sky level will be (crudely) estimated using the IRAF
``starfind`` calculation.
"""
def __init__(self, data, convolved_data, xypos, kernel, sky=None,
sharplo=0.2, sharphi=1.0, roundlo=-1.0, roundhi=1.0,
brightest=None, peakmax=None):
self.data = data
self.convolved_data = convolved_data
self.xypos = xypos
self.kernel = kernel
self._sky = sky
self.sharplo = sharplo
self.sharphi = sharphi
self.roundlo = roundlo
self.roundhi = roundhi
self.brightest = brightest
self.peakmax = peakmax
self.id = np.arange(len(self)) + 1
self.cutout_shape = kernel.shape
self.default_columns = ('id', 'xcentroid', 'ycentroid', 'fwhm',
'sharpness', 'roundness', 'pa', 'npix',
'sky', 'peak', 'flux', 'mag')
def __len__(self):
return len(self.xypos)
def __getitem__(self, index):
newcls = object.__new__(self.__class__)
init_attr = ('data', 'convolved_data', 'kernel', '_sky', 'sharplo',
'sharphi', 'roundlo', 'roundhi', 'brightest', 'peakmax',
'cutout_shape', 'default_columns')
for attr in init_attr:
setattr(newcls, attr, getattr(self, attr))
# xypos determines ordering and isscalar
# NOTE: always keep as a 2D array, even for a single source
attr = 'xypos'
value = getattr(self, attr)[index]
setattr(newcls, attr, np.atleast_2d(value))
keys = set(self.__dict__.keys()) & set(self._lazyproperties)
keys.add('id')
for key in keys:
value = self.__dict__[key]
# do not insert lazy attributes that are always scalar (e.g.,
# isscalar), i.e., not an array/list for each source
if np.isscalar(value):
continue
# value is always at least a 1D array, even for a single source
value = np.atleast_1d(value[index])
newcls.__dict__[key] = value
return newcls
@lazyproperty
def isscalar(self):
"""
Whether the instance is scalar (e.g., a single source).
"""
return self.xypos.shape == (1, 2)
@property
def _lazyproperties(self):
"""
Return all lazyproperties (even in superclasses).
"""
def islazyproperty(obj):
return isinstance(obj, lazyproperty)
return [i[0] for i in inspect.getmembers(self.__class__,
predicate=islazyproperty)]
def reset_ids(self):
"""Reset the ID column to be consecutive integers."""
self.id = np.arange(len(self)) + 1
@lazyproperty
def sky(self):
if self._sky is None:
skymask = ~self.kernel.mask.astype(bool) # 1=sky, 0=obj
nsky = np.count_nonzero(skymask)
axis = (1, 2)
if nsky == 0.:
sky = (np.max(self.cutout_data_nosub, axis=axis)
- np.max(self.cutout_convdata, axis=axis))
else:
sky = (np.sum(self.cutout_data_nosub * skymask, axis=axis)
/ nsky)
else:
sky = np.full(len(self), fill_value=self._sky)
return sky
def make_cutouts(self, data):
cutouts = []
for xpos, ypos in self.xypos:
cutouts.append(extract_array(data, self.cutout_shape, (ypos, xpos),
fill_value=0.0))
return np.array(cutouts)
@lazyproperty
def cutout_data_nosub(self):
return self.make_cutouts(self.data)
@lazyproperty
def cutout_data(self):
data = ((self.cutout_data_nosub - self.sky[:, np.newaxis, np.newaxis])
* self.kernel.mask)
# IRAF starfind discards negative pixels
data[data < 0] = 0.0
return data
@lazyproperty
def cutout_convdata(self):
return self.make_cutouts(self.convolved_data)
@lazyproperty
def npix(self):
return np.count_nonzero(self.cutout_data, axis=(1, 2))
@lazyproperty
def moments(self):
return np.array([_moments(arr, order=1) for arr in self.cutout_data])
@lazyproperty
def cutout_centroid(self):
moments = self.moments
# ignore divide-by-zero RuntimeWarning
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
ycentroid = moments[:, 1, 0] / moments[:, 0, 0]
xcentroid = moments[:, 0, 1] / moments[:, 0, 0]
return np.transpose((ycentroid, xcentroid))
@lazyproperty
def cutout_xcentroid(self):
return np.transpose(self.cutout_centroid)[1]
@lazyproperty
def cutout_ycentroid(self):
return np.transpose(self.cutout_centroid)[0]
@lazyproperty
def cutout_xorigin(self):
return np.transpose(self.xypos)[0] - self.kernel.xradius
@lazyproperty
def cutout_yorigin(self):
return np.transpose(self.xypos)[1] - self.kernel.yradius
@lazyproperty
def xcentroid(self):
return self.cutout_xcentroid + self.cutout_xorigin
@lazyproperty
def ycentroid(self):
return self.cutout_ycentroid + self.cutout_yorigin
@lazyproperty
def peak(self):
return np.array([np.max(arr) for arr in self.cutout_data])
@lazyproperty
def flux(self):
return np.array([np.sum(arr) for arr in self.cutout_data])
@lazyproperty
def mag(self):
return -2.5 * np.log10(self.flux)
@lazyproperty
def moments_central(self):
moments = np.array([_moments_central(arr, center=(xcen_, ycen_),
order=2)
for arr, xcen_, ycen_ in
zip(self.cutout_data, self.cutout_xcentroid,
self.cutout_ycentroid)])
return moments / self.moments[:, 0, 0][:, np.newaxis, np.newaxis]
@lazyproperty
def mu_sum(self):
return self.moments_central[:, 0, 2] + self.moments_central[:, 2, 0]
@lazyproperty
def mu_diff(self):
return self.moments_central[:, 0, 2] - self.moments_central[:, 2, 0]
@lazyproperty
def fwhm(self):
return 2.0 * np.sqrt(np.log(2.0) * self.mu_sum)
@lazyproperty
def roundness(self):
return np.sqrt(self.mu_diff**2
+ 4.0 * self.moments_central[:, 1, 1]**2) / self.mu_sum
@lazyproperty
def sharpness(self):
return self.fwhm / self.kernel.fwhm
@lazyproperty
def pa(self):
pa = np.rad2deg(0.5 * np.arctan2(2.0 * self.moments_central[:, 1, 1],
self.mu_diff))
pa = np.where(pa < 0, pa + 180, pa)
return pa
def apply_filters(self):
"""Filter the catalog."""
mask = np.count_nonzero(self.cutout_data, axis=(1, 2)) > 1
mask &= ((self.sharpness > self.sharplo)
& (self.sharpness < self.sharphi)
& (self.roundness > self.roundlo)
& (self.roundness < self.roundhi))
if self.peakmax is not None:
mask &= (self.peak < self.peakmax)
newcat = self[mask]
if len(newcat) == 0:
warnings.warn('Sources were found, but none pass the sharpness, '
'roundness, or peakmax criteria',
NoDetectionsWarning)
return None
return newcat
def select_brightest(self):
"""
Sort the catalog by the brightest fluxes and select the
top brightest sources.
"""
newcat = self
if self.brightest is not None:
idx = np.argsort(self.flux)[::-1][:self.brightest]
newcat = self[idx]
return newcat
def apply_all_filters(self):
"""
Apply all filters, select the brightest, and reset the source
ids.
"""
cat = self.apply_filters()
if cat is None:
return None
cat = cat.select_brightest()
cat.reset_ids()
return cat
def to_table(self, columns=None):
meta = {'version': _get_version_info()}
table = QTable(meta=meta)
if columns is None:
columns = self.default_columns
for column in columns:
table[column] = getattr(self, column)
return table
|
docs/generate_api_docs.py
|
mxmpl/pykaldi
| 916 |
66004
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import os
import pkgutil
import sys
from subprocess import check_call
import kaldi
parser = argparse.ArgumentParser(
description="Generates autosummary documentation for pykaldi.")
# parser.add_argument('--force', '-f', action='store_true',
# help='Overwrite files. Default: False.')
parser.add_argument('--out_dir', '-o', default='api',
help='Output directory. Default: api' )
parser.add_argument('--include_private', action='store_true',
help='Include private modules. Default: False.')
args = parser.parse_args()
if os.path.exists(args.out_dir):
print("Output directory: {} already exists.".format(args.out_dir),
file=sys.stderr)
sys.exit(1)
os.mkdir(args.out_dir)
##################################################
# Generate autosummary lists and api
##################################################
with open("api.rst", "w") as api, \
open("packages.rst", "w") as packages, \
open("modules.rst", "w") as modules:
print(".. toctree::\n :caption: API Guide\n :hidden:\n", file=api)
# print(" {}/kaldi".format(args.out_dir), file=api)
print(".. autosummary::\n :toctree: {}\n".format(args.out_dir),
file=packages)
print(".. autosummary::\n :toctree: {}\n".format(args.out_dir),
file=modules)
for _, modname, ispkg in pkgutil.walk_packages(path=kaldi.__path__,
prefix=kaldi.__name__+'.',
onerror=lambda x: None):
if modname.split(".")[-1][0] == "_" and not args.include_private:
continue
if modname == "kaldi.itf":
continue
if ispkg:
print(" {}/{}".format(args.out_dir, modname), file=api)
print(" {}".format(modname), file=packages)
else:
if len(modname.split(".")) == 2:
print(" {}/{}".format(args.out_dir, modname), file=api)
print(" {}".format(modname), file=modules)
##################################################
# Call autogen
##################################################
check_call(['sphinx-autogen', '-i', '-o', args.out_dir, 'packages.rst'])
check_call(['sphinx-autogen', '-i', '-o', args.out_dir, 'modules.rst'])
check_call(['rm' , '-f', 'packages.rst', 'modules.rst'])
##################################################
# Include submodules in package documentation
##################################################
for importer, modname, ispkg in pkgutil.walk_packages(path=kaldi.__path__,
prefix=kaldi.__name__+'.',
onerror=lambda x: None):
if modname.split(".")[-1][0] == "_" and not args.include_private:
continue
if modname == "kaldi.itf":
continue
if not ispkg and len(modname.split(".")) > 2:
mod_file = "{}.rst".format(modname)
mod_path = os.path.join(args.out_dir, mod_file)
pkg_file = "{}.rst".format(".".join(modname.split(".")[:-1]))
pkg_path = os.path.join(args.out_dir, pkg_file)
# Edit submodule headers
check_call(['sed', '-i', 's/=/-/g', mod_path])
# Include submodule in pkg.rst
with open(pkg_path, "a") as pkg:
# pkg.write("""\n.. include:: {}\n\n""".format(mod_file))
pkg.write("\n")
pkg.write(open(mod_path).read())
# Remove mod.rst
check_call(['rm', '-f', mod_path])
##################################################
# Add autosummary nosignatures option
##################################################
for importer, modname, ispkg in pkgutil.walk_packages(path=kaldi.__path__,
prefix=kaldi.__name__+'.',
onerror=lambda x: None):
if modname.split(".")[-1][0] == "_" and not args.include_private:
continue
if modname == "kaldi.itf":
continue
if ispkg:
pkg_file = "{}.rst".format(modname)
pkg_path = os.path.join(args.out_dir, pkg_file)
check_call(['sed', '-i',
's/autosummary::/autosummary::\\n :nosignatures:/g',
pkg_path])
|
alipay/aop/api/domain/ExRefRateInfoVO.py
|
snowxmas/alipay-sdk-python-all
| 213 |
66024
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ExRefRateInfoVO(object):
def __init__(self):
self._currency_pair = None
self._datum_currency = None
self._price_type = None
self._pub_date = None
self._pub_time = None
self._rate = None
self._target_currency = None
@property
def currency_pair(self):
return self._currency_pair
@currency_pair.setter
def currency_pair(self, value):
self._currency_pair = value
@property
def datum_currency(self):
return self._datum_currency
@datum_currency.setter
def datum_currency(self, value):
self._datum_currency = value
@property
def price_type(self):
return self._price_type
@price_type.setter
def price_type(self, value):
self._price_type = value
@property
def pub_date(self):
return self._pub_date
@pub_date.setter
def pub_date(self, value):
self._pub_date = value
@property
def pub_time(self):
return self._pub_time
@pub_time.setter
def pub_time(self, value):
self._pub_time = value
@property
def rate(self):
return self._rate
@rate.setter
def rate(self, value):
self._rate = value
@property
def target_currency(self):
return self._target_currency
@target_currency.setter
def target_currency(self, value):
self._target_currency = value
def to_alipay_dict(self):
params = dict()
if self.currency_pair:
if hasattr(self.currency_pair, 'to_alipay_dict'):
params['currency_pair'] = self.currency_pair.to_alipay_dict()
else:
params['currency_pair'] = self.currency_pair
if self.datum_currency:
if hasattr(self.datum_currency, 'to_alipay_dict'):
params['datum_currency'] = self.datum_currency.to_alipay_dict()
else:
params['datum_currency'] = self.datum_currency
if self.price_type:
if hasattr(self.price_type, 'to_alipay_dict'):
params['price_type'] = self.price_type.to_alipay_dict()
else:
params['price_type'] = self.price_type
if self.pub_date:
if hasattr(self.pub_date, 'to_alipay_dict'):
params['pub_date'] = self.pub_date.to_alipay_dict()
else:
params['pub_date'] = self.pub_date
if self.pub_time:
if hasattr(self.pub_time, 'to_alipay_dict'):
params['pub_time'] = self.pub_time.to_alipay_dict()
else:
params['pub_time'] = self.pub_time
if self.rate:
if hasattr(self.rate, 'to_alipay_dict'):
params['rate'] = self.rate.to_alipay_dict()
else:
params['rate'] = self.rate
if self.target_currency:
if hasattr(self.target_currency, 'to_alipay_dict'):
params['target_currency'] = self.target_currency.to_alipay_dict()
else:
params['target_currency'] = self.target_currency
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ExRefRateInfoVO()
if 'currency_pair' in d:
o.currency_pair = d['currency_pair']
if 'datum_currency' in d:
o.datum_currency = d['datum_currency']
if 'price_type' in d:
o.price_type = d['price_type']
if 'pub_date' in d:
o.pub_date = d['pub_date']
if 'pub_time' in d:
o.pub_time = d['pub_time']
if 'rate' in d:
o.rate = d['rate']
if 'target_currency' in d:
o.target_currency = d['target_currency']
return o
|
pmca/usb/driver/windows/__init__.py
|
kratz00/Sony-PMCA-RE
| 1,313 |
66034
|
import re
def parseDeviceId(id):
match = re.search('(#|\\\\)vid_([a-f0-9]{4})&pid_([a-f0-9]{4})(&|#|\\\\)', id, re.IGNORECASE)
return [int(match.group(i), 16) if match else None for i in [2, 3]]
|
ufora/core/JsonPickle.py
|
ufora/ufora
| 571 |
66038
|
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JsonPickle
A simplified form of 'pickle' that only pickles ComputedGraph locations and 'simple' python
objects (e.g. those for whom eval(repr(x)) == x).
In this case, we don't pickle to a string - we pickle to 'simple' python objects, which are
very close to json.
"""
import sys
class Pickleable(object):
"""Mixin class to indicate that a class supports JsonPickle serialization. Classes
must expose methods
__reduce__(self):
return (cls, ({..kwds},))
we will then call cls(**kwds) to inflate. cls must descend from 'Pickleable'.
By default, we just use the dict of the object and its own type
"""
def __reduce__(self):
return (type(self), (self.__dict__,))
#set of other classes we are allowed to unpickle. Mostly to allow for boost::python
#classes, which can't easily descend from 'Pickleable'
unpickleWhitelist_ = set()
def addClassToPickleWhitelist(cls):
"""Add a class that doesn't descend from Pickleable to the pickle whitelist"""
unpickleWhitelist_.add(cls)
ENCODING_OBJECT = 'o'
ENCODING_SIMPLE_PYTHON = 'P'
ENCODING_UNICODE = 'u'
ENCODING_INT = 'i'
ENCODING_LONG = 'l'
ENCODING_TUPLE = '()'
ENCODING_LIST = '[]'
ENCODING_DICT = '{}'
#a dictionary from string to ComputedGraph.Location subclasses
locationTypes_ = {}
#a dictionary from a ComputedGraph type to a key that can be used in place of the usual
#(clsModule, clsName) pair
locationTypeOverrides_ = {}
def addOverride(cls, override):
"""Override the serializer to use 'override' as the identifier for instances of 'cls'
This is primarily to shorted the amount of data in the representation and to allow the
representation to remain constant even if classes are moving around or changing names.
override may not be a tuple
"""
assert cls not in locationTypeOverrides_
assert not isinstance(override, tuple)
locationTypeOverrides_[cls] = override
locationTypes_[override] = cls
def addClassAlias(cls, override):
locationTypes_[override] = cls
def classFromModuleAndName(clsModuleAndName):
if clsModuleAndName in locationTypeOverrides_:
return locationTypeOverrides_[clsModuleAndName]
if clsModuleAndName not in locationTypes_:
__import__(clsModuleAndName[0])
try:
module = sys.modules[clsModuleAndName[0]]
except KeyError:
raise UserWarning("Couldn't import module %s", clsModuleAndName[0])
try:
cls = module.__dict__[clsModuleAndName[1]]
except KeyError:
raise UserWarning("Can't find %s in %s" % (clsModuleAndName[1], module.__name__))
if not issubclass(cls, Pickleable) and cls not in unpickleWhitelist_:
raise UserWarning("%s is not a computed graph location type" % clsModuleAndName)
locationTypes_[clsModuleAndName] = cls
return locationTypes_[clsModuleAndName]
def toSimple(complexObject):
if complexObject is None:
return (ENCODING_SIMPLE_PYTHON, None)
if isinstance(complexObject, (float, str, bool)):
return (ENCODING_SIMPLE_PYTHON, complexObject)
if isinstance(complexObject, int):
return (ENCODING_INT, str(complexObject))
if isinstance(complexObject, long):
return (ENCODING_LONG, str(complexObject))
if isinstance(complexObject, unicode):
return (ENCODING_UNICODE, complexObject.encode('utf-8'))
if isinstance(complexObject, tuple):
subs = []
allArePurePython = True
for x in complexObject:
encoding, simpleForm = toSimple(x)
if encoding != ENCODING_SIMPLE_PYTHON:
allArePurePython = False
subs.append((encoding, simpleForm))
if allArePurePython:
return (ENCODING_SIMPLE_PYTHON, complexObject)
return (ENCODING_TUPLE, tuple(subs))
if isinstance(complexObject, list):
subs = []
return (ENCODING_LIST, tuple([toSimple(x) for x in complexObject]))
if isinstance(complexObject, dict):
subs = []
for key, val in complexObject.iteritems():
keyEncoded = toSimple(key)
valEncoded = toSimple(val)
subs.append((keyEncoded, valEncoded))
return (ENCODING_DICT, tuple(sorted(subs)))
try:
cls, args = complexObject.__reduce__()
except:
raise UserWarning("Couldn't call __reduce__ on %s", complexObject)
if cls in locationTypeOverrides_:
clsKey = locationTypeOverrides_[cls]
else:
clsKey = (cls.__module__, cls.__name__)
return (ENCODING_OBJECT, (clsKey, toSimple(args[0])))
def toComplex(simpleObject):
"""Convert 'x' from a simplified form to the full CG form."""
if simpleObject[0] == ENCODING_SIMPLE_PYTHON:
return simpleObject[1]
if simpleObject[0] == ENCODING_INT:
return int(simpleObject[1])
if simpleObject[0] == ENCODING_UNICODE:
return unicode(simpleObject[1], 'utf-8')
if simpleObject[0] == ENCODING_LONG:
return long(simpleObject[1])
if simpleObject[0] == ENCODING_TUPLE:
return tuple([toComplex(x) for x in simpleObject[1]])
if simpleObject[0] == ENCODING_LIST:
return [toComplex(x) for x in simpleObject[1]]
if simpleObject[0] == ENCODING_DICT:
return dict((toComplex(k), toComplex(v)) for k,v in simpleObject[1])
elif simpleObject[0] == ENCODING_OBJECT:
clsModuleAndName = simpleObject[1][0]
args = simpleObject[1][1]
cls = classFromModuleAndName(clsModuleAndName)
kwds = toComplex(args)
try:
return cls(**kwds)
except:
raise UserWarning("Failed to construct instance of %s with %s" % (cls, kwds))
raise UserWarning("Badly encoded object")
import ufora.native.Json as JsonNative
def toJson(complexObject):
return JsonNative.Json.fromSimple(toSimple(complexObject))
def fromJson(jsonForm):
return toComplex(jsonForm.toSimple())
|
Chapter08/background_substraction_MOG.py
|
debojyoti007/OpenCV
| 105 |
66043
|
import cv2
import numpy as np
# Capture the input frame
def get_frame(cap, scaling_factor=0.5):
ret, frame = cap.read()
# Resize the frame
frame = cv2.resize(frame, None, fx=scaling_factor,
fy=scaling_factor, interpolation=cv2.INTER_AREA)
return frame
if __name__=='__main__':
# Initialize the video capture object
cap = cv2.VideoCapture(1)
# Create the background subtractor object
bgSubtractor = cv2.createBackgroundSubtractorMOG2()
# This factor controls the learning rate of the algorithm.
# The learning rate refers to the rate at which your model
# will learn about the background. Higher value for
# 'history' indicates a slower learning rate. You
# can play with this parameter to see how it affects
# the output.
history = 100
# Iterate until the user presses the ESC key
while True:
frame = get_frame(cap, 0.5)
# Apply the background subtraction model to the input frame
mask = bgSubtractor.apply(frame, learningRate=1.0/history)
# Convert from grayscale to 3-channel RGB
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
cv2.imshow('Input frame', frame)
cv2.imshow('Moving Objects MOG', mask & frame)
# Check if the user pressed the ESC key
c = cv2.waitKey(delay=30)
if c == 27:
break
cap.release()
cv2.destroyAllWindows()
|
idaes/surrogate/alamopy_depr/tests/examples.py
|
carldlaird/idaes-pse
| 112 |
66054
|
<reponame>carldlaird/idaes-pse
#!/usr/bin/python
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
import numpy as np
import sys
def sixcamel(*x):
x1, x2 = x
t1 = np.multiply(
4.0 - 2.1 * np.power(x1, 2) + np.divide(np.power(x1, 4), 3.0), np.power(x1, 2)
)
t2 = np.multiply(4 * np.power(x2, 2) - 4, np.power(x2, 2))
z = t1 + np.multiply(x1, x2) + t2
return z
def ackley(*x):
import numpy as np
x1, x2 = x
a = 20
b = 0.2
c = 2 * 3.14159
z = (
-a * np.exp(-b * np.sqrt(0.5 * (x1 ** 2 + x2 ** 2)))
- np.exp(0.5 * (np.cos(c * x1) + np.cos(c * x2)))
+ a
+ np.exp(1)
)
return z
def branin(*x):
import numpy as np
x1, x2 = x
pi = 3.14159
z = (
(x2 - (5.1 / (4 * pi ** 2)) * x1 ** 2 + (5 / pi) * x1 - 6) ** 2
+ 10 * (1 - (1 / (8 * pi)) * np.cos(x1) + 10)
+ np.random.normal(0, 0.1)
)
return z
if __name__ == "__main__":
sys.stdout.write(" ALAMOpy example functions ")
sys.stdout.write(" call functions with : ")
sys.stdout.write(" examples.<name>")
sys.stdout.write(" <name> = branin ")
sys.stdout.write(" sixcamel ")
sys.stdout.write(" ackley ")
|
src/cocoa/toga_cocoa/widgets/selection.py
|
luizoti/toga
| 1,261 |
66056
|
<reponame>luizoti/toga<gh_stars>1000+
from travertino.size import at_least
from toga_cocoa.libs import SEL, NSPopUpButton, objc_method
from .base import Widget
class TogaPopupButton(NSPopUpButton):
@objc_method
def onSelect_(self, obj) -> None:
if self.interface.on_select:
self.interface.on_select(self.interface)
class Selection(Widget):
def create(self):
self.native = TogaPopupButton.alloc().init()
self.native.interface = self.interface
self.native.target = self.native
self.native.action = SEL('onSelect:')
self.add_constraints()
def rehint(self):
content_size = self.native.intrinsicContentSize()
self.interface.intrinsic.height = content_size.height
self.interface.intrinsic.width = at_least(max(self.interface.MIN_WIDTH, content_size.width))
def remove_all_items(self):
self.native.removeAllItems()
def add_item(self, item):
self.native.addItemWithTitle(item)
def select_item(self, item):
self.native.selectItemWithTitle(item)
def get_selected_item(self):
return str(self.native.titleOfSelectedItem)
def set_on_select(self, handler):
pass
|
src/wavegrad/learner.py
|
CookiePPP/wavegrad
| 182 |
66057
|
# Copyright 2020 LMNT, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import os
import torch
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from wavegrad.dataset import from_path as dataset_from_path
from wavegrad.model import WaveGrad
def _nested_map(struct, map_fn):
if isinstance(struct, tuple):
return tuple(_nested_map(x, map_fn) for x in struct)
if isinstance(struct, list):
return [_nested_map(x, map_fn) for x in struct]
if isinstance(struct, dict):
return { k: _nested_map(v, map_fn) for k, v in struct.items() }
return map_fn(struct)
class WaveGradLearner:
def __init__(self, model_dir, model, dataset, optimizer, params, *args, **kwargs):
os.makedirs(model_dir, exist_ok=True)
self.model_dir = model_dir
self.model = model
self.dataset = dataset
self.optimizer = optimizer
self.params = params
self.autocast = torch.cuda.amp.autocast(enabled=kwargs.get('fp16', False))
self.scaler = torch.cuda.amp.GradScaler(enabled=kwargs.get('fp16', False))
self.step = 0
self.is_master = True
beta = np.array(self.params.noise_schedule)
noise_level = np.cumprod(1 - beta)**0.5
noise_level = np.concatenate([[1.0], noise_level], axis=0)
self.noise_level = torch.tensor(noise_level.astype(np.float32))
self.loss_fn = nn.L1Loss()
self.summary_writer = None
def state_dict(self):
if hasattr(self.model, 'module') and isinstance(self.model.module, nn.Module):
model_state = self.model.module.state_dict()
else:
model_state = self.model.state_dict()
return {
'step': self.step,
'model': { k: v.cpu() if isinstance(v, torch.Tensor) else v for k, v in model_state.items() },
'optimizer': { k: v.cpu() if isinstance(v, torch.Tensor) else v for k, v in self.optimizer.state_dict().items() },
'params': dict(self.params),
'scaler': self.scaler.state_dict(),
}
def load_state_dict(self, state_dict):
if hasattr(self.model, 'module') and isinstance(self.model.module, nn.Module):
self.model.module.load_state_dict(state_dict['model'])
else:
self.model.load_state_dict(state_dict['model'])
self.optimizer.load_state_dict(state_dict['optimizer'])
self.scaler.load_state_dict(state_dict['scaler'])
self.step = state_dict['step']
def save_to_checkpoint(self, filename='weights'):
save_basename = f'{filename}-{self.step}.pt'
save_name = f'{self.model_dir}/{save_basename}'
link_name = f'{self.model_dir}/{filename}.pt'
torch.save(self.state_dict(), save_name)
if os.name == 'nt':
torch.save(self.state_dict(), link_name)
else:
if os.path.islink(link_name):
os.unlink(link_name)
os.symlink(save_basename, link_name)
def restore_from_checkpoint(self, filename='weights'):
try:
checkpoint = torch.load(f'{self.model_dir}/{filename}.pt')
self.load_state_dict(checkpoint)
return True
except FileNotFoundError:
return False
def train(self, max_steps=None):
device = next(self.model.parameters()).device
while True:
for features in tqdm(self.dataset, desc=f'Epoch {self.step // len(self.dataset)}') if self.is_master else self.dataset:
if max_steps is not None and self.step >= max_steps:
return
features = _nested_map(features, lambda x: x.to(device) if isinstance(x, torch.Tensor) else x)
loss = self.train_step(features)
if torch.isnan(loss).any():
raise RuntimeError(f'Detected NaN loss at step {self.step}.')
if self.is_master:
if self.step % 100 == 0:
self._write_summary(self.step, features, loss)
if self.step % len(self.dataset) == 0:
self.save_to_checkpoint()
self.step += 1
def train_step(self, features):
for param in self.model.parameters():
param.grad = None
audio = features['audio']
spectrogram = features['spectrogram']
N, T = audio.shape
S = 1000
device = audio.device
self.noise_level = self.noise_level.to(device)
with self.autocast:
s = torch.randint(1, S + 1, [N], device=audio.device)
l_a, l_b = self.noise_level[s-1], self.noise_level[s]
noise_scale = l_a + torch.rand(N, device=audio.device) * (l_b - l_a)
noise_scale = noise_scale.unsqueeze(1)
noise = torch.randn_like(audio)
noisy_audio = noise_scale * audio + (1.0 - noise_scale**2)**0.5 * noise
predicted = self.model(noisy_audio, spectrogram, noise_scale.squeeze(1))
loss = self.loss_fn(noise, predicted.squeeze(1))
self.scaler.scale(loss).backward()
self.scaler.unscale_(self.optimizer)
self.grad_norm = nn.utils.clip_grad_norm_(self.model.parameters(), self.params.max_grad_norm)
self.scaler.step(self.optimizer)
self.scaler.update()
return loss
def _write_summary(self, step, features, loss):
writer = self.summary_writer or SummaryWriter(self.model_dir, purge_step=step)
writer.add_audio('audio/reference', features['audio'][0], step, sample_rate=self.params.sample_rate)
writer.add_scalar('train/loss', loss, step)
writer.add_scalar('train/grad_norm', self.grad_norm, step)
writer.flush()
self.summary_writer = writer
def _train_impl(replica_id, model, dataset, args, params):
torch.backends.cudnn.benchmark = True
opt = torch.optim.Adam(model.parameters(), lr=params.learning_rate)
learner = WaveGradLearner(args.model_dir, model, dataset, opt, params, fp16=args.fp16)
learner.is_master = (replica_id == 0)
learner.restore_from_checkpoint()
learner.train(max_steps=args.max_steps)
def train(args, params):
dataset = dataset_from_path(args.data_dirs, params)
model = WaveGrad(params).cuda()
_train_impl(0, model, dataset, args, params)
def train_distributed(replica_id, replica_count, port, args, params):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = str(port)
torch.distributed.init_process_group('nccl', rank=replica_id, world_size=replica_count)
device = torch.device('cuda', replica_id)
torch.cuda.set_device(device)
model = WaveGrad(params).to(device)
model = DistributedDataParallel(model, device_ids=[replica_id])
_train_impl(replica_id, model, dataset_from_path(args.data_dirs, params, is_distributed=True), args, params)
|
src/main/python/rlbot/agents/hivemind/drone_agent.py
|
VirxEC/RLBot
| 408 |
66071
|
<gh_stars>100-1000
import os
from rlbot.agents.base_independent_agent import BaseIndependentAgent
from rlbot.botmanager.helper_process_request import HelperProcessRequest
class DroneAgent(BaseIndependentAgent):
# Path to the hivemind helperprocess python file.
hive_path = None
# Bots with the same key will be part of the same hivemind.
hive_key = None
# Name of your hivemind that shows up in the console.
hive_name = None
def __init__(self, name, team, index):
super().__init__(name, team, index)
if self.hive_path is None:
raise NotImplementedError('You need to specify a path to the hivemind file.')
if self.hive_key is None:
raise NotImplementedError('You need to specify a key for your hivemind.')
if self.hive_name is None:
raise NotImplementedError('You need to specify a name for your hivemind.')
def run_independently(self, terminate_request_event):
pass
def get_helper_process_request(self) -> HelperProcessRequest:
if not os.path.isfile(self.hive_path):
raise FileNotFoundError(f'Could not find file: {self.hive_path}')
# Appends hive_path to key so that hiveminds in different places don't compete.
# Appends team to key so that each team has its own hivemind.
key = f'{self.hive_path}{self.hive_key}{self.team}'
# Creates request for helper process.
options = {
'name': self.hive_name
}
return HelperProcessRequest(self.hive_path, key, options=options)
|
gryphon/data_service/exchange_volume_consumer.py
|
qiquanzhijia/gryphon
| 1,109 |
66074
|
<reponame>qiquanzhijia/gryphon
import pyximport; pyximport.install()
import json
import os
import subprocess
from delorean import epoch
from raven import Client
from gryphon.data_service.consts import *
from gryphon.data_service.queue_consumer import QueueConsumer
from gryphon.lib import session
from gryphon.lib.models.emeraldhavoc.exchange_volume import ExchangeVolume
from gryphon.lib.money import Money
s = Client(dsn=os.environ.get('SENTRY_DSN'))
def exchange_volumes_consumer_function(message, db):
subprocess.call(['touch', 'monit/heartbeat/exchange_volumes_consumer.txt'])
exchange_volume_json = json.loads(message)
timestamp = epoch(exchange_volume_json['timestamp']).datetime
exchange = exchange_volume_json['exchange_name']
exch_vol_money = Money(exchange_volume_json['volume'], 'BTC')
t = ExchangeVolume(
exchange_volume=exch_vol_money,
exchange=exchange,
timestamp=timestamp,
)
db.add(t)
session.commit_mysql_session(db)
def main():
db = session.get_a_gds_db_mysql_session()
try:
volume_consumer = QueueConsumer(
os.environ.get('AMPQ_ADDRESS'),
exchange_volumes_consumer_function,
db,
EXCHANGE,
EXCHANGE_TYPE,
EXCHANGE_VOLUME_BINDING_KEY,
EXCHANGE_VOLUME_QUEUE,
)
volume_consumer.run()
except KeyboardInterrupt:
volume_consumer.stop()
except:
s.captureException()
finally:
db.remove()
if __name__ == '__main__':
main()
|
theanets/layers/convolution.py
|
timgates42/theanets
| 314 |
66078
|
<reponame>timgates42/theanets
# -*- coding: utf-8 -*-
'''Convolutional layers "scan" over input data.'''
from __future__ import division
import numpy as np
import theano
import theano.tensor as TT
from . import base
from .. import util
__all__ = [
'Conv1',
'Conv2',
'Pool1',
'Pool2',
]
class Convolution(base.Layer):
'''Convolution layers convolve filters over the input arrays.
Parameters
----------
filter_size : (int, int)
Size of the convolution filters for this layer.
stride : (int, int), optional
Apply convolutions with this stride; i.e., skip this many samples
between convolutions. Defaults to (1, 1)---that is, no skipping.
border_mode : str, optional
Compute convolutions with this border mode. Defaults to 'valid'.
'''
def __init__(self, filter_size, stride=(1, 1), border_mode='valid', **kwargs):
self.filter_size = filter_size
self.stride = stride
self.border_mode = border_mode
super(Convolution, self).__init__(**kwargs)
def log(self):
inputs = ', '.join('"{0}" {1}'.format(*ns) for ns in self._input_shapes.items())
util.log('layer {0.__class__.__name__} "{0.name}" '
'{0.output_shape} {1} {0.border_mode} '
'filters {2}{3} from {4}', self,
getattr(self.activate, 'name', self.activate),
'x'.join(str(i) for i in self.filter_size),
''.join('+{}'.format(i) for i in self.stride),
inputs)
util.log('learnable parameters: {}', self.log_params())
def add_conv_weights(self, name, mean=0, std=None, sparsity=0):
'''Add a convolutional weight array to this layer's parameters.
Parameters
----------
name : str
Name of the parameter to add.
mean : float, optional
Mean value for randomly-initialized weights. Defaults to 0.
std : float, optional
Standard deviation of initial matrix values. Defaults to
:math:`1 / sqrt(n_i + n_o)`.
sparsity : float, optional
Fraction of weights to set to zero. Defaults to 0.
'''
nin = self.input_size
nout = self.output_size
mean = self.kwargs.get(
'mean_{}'.format(name),
self.kwargs.get('mean', mean))
std = self.kwargs.get(
'std_{}'.format(name),
self.kwargs.get('std', std or 1 / np.sqrt(nin + nout)))
sparsity = self.kwargs.get(
'sparsity_{}'.format(name),
self.kwargs.get('sparsity', sparsity))
arr = np.zeros((nout, nin) + self.filter_size, util.FLOAT)
for r in range(self.filter_size[0]):
for c in range(self.filter_size[1]):
arr[:, :, r, c] = util.random_matrix(
nout, nin, mean, std, sparsity=sparsity, rng=self.rng)
self._params.append(theano.shared(arr, name=self._fmt(name)))
class Conv1(Convolution):
'''1-dimensional convolutions run over one data axis.
Notes
-----
One-dimensional convolution layers are typically used in ``theanets`` models
that use recurrent inputs and outputs, i.e.,
:class:`theanets.recurrent.Autoencoder`,
:class:`theanets.recurrent.Predictor`,
:class:`theanets.recurrent.Classifier`, or
:class:`theanets.recurrent.Regressor`.
The convolution will be applied over the "time" dimension (axis 1).
Parameters
----------
filter_size : int
Length of the convolution filters for this layer.
stride : int, optional
Apply convolutions with this stride; i.e., skip this many samples
between convolutions. Defaults to 1, i.e., no skipping.
border_mode : str, optional
Compute convolutions with this border mode. Defaults to 'valid'.
'''
def __init__(self, filter_size, stride=1, border_mode='valid', **kwargs):
super(Conv1, self).__init__(
filter_size=(1, filter_size),
stride=(1, stride),
border_mode=border_mode,
**kwargs)
def setup(self):
self.add_conv_weights('w')
self.add_bias('b', self.output_size)
def resolve_outputs(self):
if self.input_shape is None or self.input_shape[0] is None:
return super(Conv1, self).resolve_outputs()
image = np.array(self.input_shape[:-1])
kernel = np.array(self.filter_size)
result = image
if self.border_mode == 'full':
result = image + kernel - 1
if self.border_mode == 'valid':
result = image - kernel + 1
self._output_shapes['out'] = tuple(result) + (self.kwargs['size'], )
def transform(self, inputs):
# input is: (batch, time, input)
# conv2d wants: (batch, input, 1, time)
x = inputs[self.input_name].dimshuffle(0, 2, 'x', 1)
pre = TT.nnet.conv2d(
x,
self.find('w'),
image_shape=(None, self.input_size, 1, None),
filter_shape=(self.output_size, self.input_size) + self.filter_size,
border_mode=self.border_mode,
subsample=self.stride,
).dimshuffle(0, 3, 1, 2)[:, :, :, 0] + self.find('b')
# conv2d output is: (batch, output, 1, time)
# we want: (batch, time, output)
# (have to do [:, :, :, 0] to remove unused trailing dimension)
return dict(pre=pre, out=self.activate(pre)), []
class Conv2(Convolution):
'''2-dimensional convolutions run over two data axes.
Two-dimensional convolution layers are standard image processing techniques.
In theanets, these layers expect an input consisting of (num-examples,
width, height, num-channels).
Parameters
----------
filter_size : (int, int)
Size of the convolution filters for this layer.
stride : (int, int), optional
Apply convolutions with this stride; i.e., skip this many samples
between convolutions. Defaults to (1, 1), i.e., no skipping.
border_mode : str, optional
Compute convolutions with this border mode. Defaults to 'valid'.
'''
def setup(self):
self.add_conv_weights('w')
self.add_bias('b', self.output_size)
def resolve_outputs(self):
shape = self.input_shape
if shape is None or shape[0] is None or shape[1] is None:
return super(Conv2, self).resolve_outputs()
image = np.array(shape[:-1])
kernel = np.array(self.filter_size)
result = image
if self.border_mode == 'full':
result = image + kernel - 1
if self.border_mode == 'valid':
result = image - kernel + 1
self._output_shapes['out'] = tuple(result) + (self.kwargs['size'], )
def transform(self, inputs):
# input is: (batch, width, height, input)
# conv2d wants: (batch, input, width, height)
x = inputs[self.input_name].dimshuffle(0, 3, 1, 2)
pre = TT.nnet.conv2d(
x,
self.find('w'),
image_shape=(None, self.input_size, None, None),
filter_shape=(self.output_size, self.input_size) + self.filter_size,
border_mode=self.border_mode,
subsample=self.stride,
).dimshuffle(0, 2, 3, 1) + self.find('b')
# conv2d output is: (batch, output, width, height)
# we want: (batch, width, height, output)
return dict(pre=pre, out=self.activate(pre)), []
class Pooling(base.Layer):
'''
'''
class Pool1(Pooling):
'''
'''
def transform(self, inputs):
# input is: (batch, time, input)
# conv2d wants: (batch, input, time, 1)
x = inputs[self.input_name].dimshuffle(0, 2, 1, 'x')
pre = TT.signal.downsample.max_pool_2d(
x, self.pool_size, st=self.stride, mode=self.mode,
).dimshuffle(0, 2, 1, 3)[:, :, :, 0]
# conv2d output is: (batch, output, time, 1)
# we want: (batch, time, output)
return dict(pre=pre, out=self.activate(pre)), []
class Pool2(Pooling):
'''
'''
def transform(self, inputs):
# input is: (batch, width, height, input)
# conv2d wants: (batch, input, width, height)
x = inputs[self.input_name].dimshuffle(0, 3, 1, 2)
pre = TT.signal.downsample.max_pool_2d(
x, self.pool_size, st=self.stride, mode=self.mode,
).dimshuffle(0, 2, 3, 1)
# conv2d output is: (batch, output, width, height)
# we want: (batch, width, height, output)
return dict(pre=pre, out=self.activate(pre)), []
|
s2s-ft/setup.py
|
Maria-philna/unilm
| 5,129 |
66085
|
<filename>s2s-ft/setup.py
from io import open
from setuptools import find_packages, setup
extras = {
'serving': ['pydantic', 'uvicorn', 'fastapi'],
'serving-tf': ['pydantic', 'uvicorn', 'fastapi'],
'serving-torch': ['pydantic', 'uvicorn', 'fastapi', 'torch']
}
extras['all'] = [package for package in extras.values()]
setup(
name="s2s-ft",
version="0.0.1",
author="UniLM Team",
author_email="<EMAIL>",
description="Fine-Tuning Bidirectional Transformers for Sequence-to-Sequence Learning",
long_description=open("README.md", "r", encoding='utf-8').read(),
long_description_content_type="text/markdown",
keywords='Fine-Tuning Bidirectional Transformers for Sequence-to-Sequence Learning',
license='Apache',
url="https://github.com/microsoft/unilm/tree/master/s2s-ft",
packages=find_packages(exclude=["*.tests", "*.tests.*",
"tests.*", "tests"]),
install_requires=['numpy',
'boto3',
'requests',
'tqdm',
'regex != 2019.12.17',
'sentencepiece',
'sacremoses',
'tensorboardX',
'transformers <= 2.10.0'],
extras_require=extras,
python_requires='>=3.5.0',
classifiers=[
'Programming Language :: Python :: 3',
],
)
|
src/python/nimbusml/internal/entrypoints/_ensemblebinarydiversitymeasure_disagreementdiversitymeasure.py
|
michaelgsharp/NimbusML
| 134 |
66094
|
<gh_stars>100-1000
# - Generated by tools/entrypoint_compiler.py: do not edit by hand
"""
DisagreementDiversityMeasure
"""
from ..utils.entrypoints import Component
def disagreement_diversity_measure(
**params):
"""
**Description**
None
"""
entrypoint_name = 'DisagreementDiversityMeasure'
settings = {}
component = Component(
name=entrypoint_name,
settings=settings,
kind='EnsembleBinaryDiversityMeasure')
return component
|
ocr/utils/beam_search.py
|
vee51/Hand
| 435 |
66107
|
<gh_stars>100-1000
# From https://github.com/githubharald/CTCDecoder
#
#MIT License
#Copyright (c) 2018 <NAME>
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
from __future__ import division
from __future__ import print_function
import numpy as np
class BeamEntry:
"information about one single beam at specific time-step"
def __init__(self):
self.prTotal = 0 # blank and non-blank
self.prNonBlank = 0 # non-blank
self.prBlank = 0 # blank
self.prText = 1 # LM score
self.lmApplied = False # flag if LM was already applied to this beam
self.labeling = () # beam-labeling
class BeamState:
"information about the beams at specific time-step"
def __init__(self):
self.entries = {}
def norm(self):
"length-normalise LM score"
for (k, _) in self.entries.items():
labelingLen = len(self.entries[k].labeling)
self.entries[k].prText = self.entries[k].prText ** (1.0 / (labelingLen if labelingLen else 1.0))
def sort(self):
"return beam-labelings, sorted by probability"
beams = [v for (_, v) in self.entries.items()]
sortedBeams = sorted(beams, reverse=True, key=lambda x: x.prTotal*x.prText)
return [x.labeling for x in sortedBeams]
def applyLM(parentBeam, childBeam, classes, lm):
"calculate LM score of child beam by taking score from parent beam and bigram probability of last two chars"
if lm and not childBeam.lmApplied:
c1 = classes[parentBeam.labeling[-1] if parentBeam.labeling else classes.index(' ')] # first char
c2 = classes[childBeam.labeling[-1]] # second char
lmFactor = 0.01 # influence of language model
bigramProb = lm.getCharBigram(c1, c2) ** lmFactor # probability of seeing first and second char next to each other
childBeam.prText = parentBeam.prText * bigramProb # probability of char sequence
childBeam.lmApplied = True # only apply LM once per beam entry
def addBeam(beamState, labeling):
"add beam if it does not yet exist"
if labeling not in beamState.entries:
beamState.entries[labeling] = BeamEntry()
def ctcBeamSearch(mat, classes, lm, beamWidth):
"beam search as described by the paper of Hwang et al. and the paper of Graves et al."
blankIdx = len(classes)
maxT, maxC = mat.shape
# initialise beam state
last = BeamState()
labeling = ()
last.entries[labeling] = BeamEntry()
last.entries[labeling].prBlank = 1
last.entries[labeling].prTotal = 1
# go over all time-steps
for t in range(maxT):
curr = BeamState()
# get beam-labelings of best beams
bestLabelings = last.sort()[0:beamWidth]
# go over best beams
for labeling in bestLabelings:
# probability of paths ending with a non-blank
prNonBlank = 0
# in case of non-empty beam
if labeling:
# probability of paths with repeated last char at the end
try:
prNonBlank = last.entries[labeling].prNonBlank * mat[t, labeling[-1]]
except FloatingPointError:
prNonBlank = 0
# probability of paths ending with a blank
prBlank = (last.entries[labeling].prTotal) * mat[t, blankIdx]
# add beam at current time-step if needed
addBeam(curr, labeling)
# fill in data
curr.entries[labeling].labeling = labeling
curr.entries[labeling].prNonBlank += prNonBlank
curr.entries[labeling].prBlank += prBlank
curr.entries[labeling].prTotal += prBlank + prNonBlank
curr.entries[labeling].prText = last.entries[labeling].prText # beam-labeling not changed, therefore also LM score unchanged from
curr.entries[labeling].lmApplied = True # LM already applied at previous time-step for this beam-labeling
# extend current beam-labeling
for c in range(maxC - 1):
# add new char to current beam-labeling
newLabeling = labeling + (c,)
# if new labeling contains duplicate char at the end, only consider paths ending with a blank
if labeling and labeling[-1] == c:
prNonBlank = mat[t, c] * last.entries[labeling].prBlank
else:
prNonBlank = mat[t, c] * last.entries[labeling].prTotal
# add beam at current time-step if needed
addBeam(curr, newLabeling)
# fill in data
curr.entries[newLabeling].labeling = newLabeling
curr.entries[newLabeling].prNonBlank += prNonBlank
curr.entries[newLabeling].prTotal += prNonBlank
# apply LM
applyLM(curr.entries[labeling], curr.entries[newLabeling], classes, lm)
# set new beam state
last = curr
# normalise LM scores according to beam-labeling-length
last.norm()
# sort by probability
bestLabelings = last.sort()[:beamWidth] # get most probable labeling
output = []
for bestLabeling in bestLabelings:
# map labels to chars
res = ''
for l in bestLabeling:
res += classes[l]
output.append(res)
return output
def testBeamSearch():
"test decoder"
classes = 'ab'
mat = np.array([[0.4, 0, 0.6], [0.4, 0, 0.6]])
print('Test beam search')
expected = 'a'
actual = ctcBeamSearch(mat, classes, None)
print('Expected: "' + expected + '"')
print('Actual: "' + actual + '"')
print('OK' if expected == actual else 'ERROR')
if __name__ == '__main__':
testBeamSearch()
|
saspy/sasexceptions.py
|
metllord/saspy
| 317 |
66130
|
<gh_stars>100-1000
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class SASConfigNotFoundError(Exception):
def __init__(self, path: str):
self.path = path
def __str__(self):
return 'Configuration path {} does not exist.'.format(self.path)
class SASConfigNotValidError(Exception):
def __init__(self, defn: str, msg: str=None):
self.defn = defn if defn else 'N/A'
self.msg = msg
def __str__(self):
return 'Configuration definition {} is not valid. {}'.format(self.defn, self.msg)
class SASIONotSupportedError(Exception):
def __init__(self, method: str, alts: list=None):
self.method = method
self.alts = alts
def __str__(self):
if self.alts is not None:
alt_text = 'Try the following: {}'.format(', '.join(self.alts))
else:
alt_text = ''
return 'Cannot use {} I/O module on Windows. {}'.format(self.method, alt_text)
class SASHTTPauthenticateError(Exception):
def __init__(self, msg: str):
self.msg = msg
def __str__(self):
return 'Failure in GET AuthToken.\n {}'.format(self.msg)
class SASHTTPconnectionError(Exception):
def __init__(self, msg: str):
self.msg = msg
def __str__(self):
return 'Failure in GET Connection.\n {}'.format(self.msg)
class SASHTTPsubmissionError(Exception):
def __init__(self, msg: str):
self.msg = msg
def __str__(self):
return 'Failure in submit().\n {}'.format(self.msg)
|
crow/scripts/find_eigen.py
|
rinelson456/raven
| 159 |
66168
|
#!/bin/env python
from __future__ import division, print_function , unicode_literals, absolute_import
import os, sys, subprocess
crow_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
eigen_cflags = ""
try:
has_pkg_eigen = subprocess.call(["pkg-config","--exists","eigen3"]) == 0
except:
has_pkg_eigen = False
if has_pkg_eigen:
eigen_cflags = subprocess.check_output(["pkg-config","eigen3","--cflags"])
libmesh_eigen = os.path.abspath(os.path.join(crow_dir,os.pardir,"moose","libmesh","contrib","eigen","eigen"))
if os.path.exists(libmesh_eigen):
eigen_cflags = "-I"+libmesh_eigen
if os.path.exists(os.path.join(crow_dir,"contrib","include","Eigen")):
eigen_cflags = ""
print(eigen_cflags)
|
baal/utils/plot_utils.py
|
llv22/baal_tf2.4_mac
| 575 |
66229
|
<reponame>llv22/baal_tf2.4_mac<filename>baal/utils/plot_utils.py
from typing import List
import matplotlib.pyplot as plt
import numpy as np
BG_COLOR = "lavender"
FG_COLORS = [
"b",
"g",
"r",
"c",
"m",
"y",
"tab:orange",
"tab:purple",
"limegreen",
"yellow",
"tab:brown",
]
def make_animation_from_data(
features: np.ndarray, labels: np.ndarray, labelled_at: np.ndarray, classes: List[str]
) -> List[np.ndarray]:
"""
Make an animation that show the progress of labelling.
Args:
features: 2d features representation of the inputs. Shape [samples, 2]
labels: Label id for each inputs. Shape [samples]
labelled_at: Index at which the input was labelled. Shape [samples]
classes: List of classes.
Returns:
Animated frames of the labelling process.
You can then save it locally with:
`imageio.mimsave('output.gif', frames, fps=3)`
"""
assert features.ndim == 2 and features.shape[-1] == 2, "Can only plot 2d points!"
frames = []
for frame_id in reversed(range(np.max(labelled_at))):
# New frame
fig, ax = plt.subplots(figsize=(10, 10))
# Filter stuff
currently_labelled = labelled_at > frame_id
unlabelled_features = features[~currently_labelled]
labelled_features = features[currently_labelled]
labelled_labels = labels[currently_labelled]
unique_labels = np.unique(labelled_labels)
ax.scatter(
unlabelled_features[:, 0],
unlabelled_features[:, 1],
c=BG_COLOR,
label="Unlabelled",
marker="x",
zorder=2,
)
for color, label_name, label_id in zip(FG_COLORS, classes, unique_labels):
label_mask = labelled_labels == label_id
pts = labelled_features[label_mask]
ax.scatter(pts[:, 0], pts[:, 1], c=color, label=label_name, marker="x", zorder=2)
ax.set_title(
"{} : {}/{}".format(
"Labelling progress", currently_labelled.sum(), len(currently_labelled)
)
)
ax.legend(loc="best", ncol=1, prop={"size": 15}, markerscale=3, fancybox=True, shadow=True)
fig.set_size_inches(15, 10.0)
fig.canvas.draw()
image = np.frombuffer(fig.canvas.tostring_rgb(), dtype="uint8")
image = image.reshape(fig.canvas.get_width_height()[::-1] + (3,))
frames.append(image)
plt.close(fig)
return frames
if __name__ == "__main__":
from sklearn.datasets import make_classification
import imageio
# 2D input to mimic a t-SNE-like shape.
X, y = make_classification(
n_features=2,
n_redundant=0,
n_informative=2,
random_state=1,
n_clusters_per_class=1,
n_classes=3,
)
labelled_at = np.random.randint(0, 100, size=[X.shape[0]])
class_name = ["cow", "dog", "cat"]
frames = make_animation_from_data(X, y, labelled_at, class_name)
imageio.mimsave("output.gif", frames, fps=3)
|
test/test_util.py
|
westonsteimel/pip-audit
| 447 |
66239
|
<filename>test/test_util.py
from packaging.version import Version
import pip_audit._util as util
def test_python_version():
v = util.python_version()
assert v is not None
assert isinstance(v, Version)
|
fairseq/legacy_distributed_data_parallel.py
|
blufb/fairseq
| 307 |
66256
|
<gh_stars>100-1000
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
A modified version of the legacy DistributedDataParallel module that uses c10d
communication primitives. This is necessary for models that have conditional
computation (e.g., AdaptiveSoftmax) and which therefore do not work with the
c10d version of DDP.
This version also supports the *accumulate_grads* feature, which allows faster
training with `--update-freq`.
"""
import copy
import torch
from torch import nn
from torch.autograd import Variable
from . import distributed_utils
class LegacyDistributedDataParallel(nn.Module):
"""Implements distributed data parallelism at the module level.
A simplified version of :class:`torch.nn.parallel.DistributedDataParallel`.
This version uses a c10d process group for communication and does not
broadcast buffers.
Args:
module (~torch.nn.Module): module to be parallelized
world_size (int): number of parallel workers
process_group (optional): the c10d process group to be used for
distributed data all-reduction. If None, the default process group
will be used.
buffer_size (int, optional): number of elements to buffer before
performing all-reduce (default: 256M).
"""
def __init__(self, module, world_size, process_group=None, buffer_size=2**28):
super().__init__()
self.module = module
self.world_size = world_size
self.process_group = process_group
# Never use a bigger buffer than the number of model params
self.buffer_size = min(buffer_size, sum(p.numel() for p in module.parameters()))
self.buffer = None
# Flag used by the NCCL backend to make sure we only reduce gradients
# one time in the execution engine
self.need_reduction = False
# We can also forcibly accumulate grads locally and only do the
# all-reduce at some later time
self.accumulate_grads = False
# For NCCL backend, since every single NCCL call is asynchoronous, we
# therefore directly enqueue all the NCCL reduction calls to the
# default CUDA stream without spawning up other reduction threads.
# This achieves the best performance.
self._register_grad_hook()
def __getstate__(self):
attrs = copy.copy(self.__dict__)
return attrs
def __setstate__(self, state):
super().__setstate__(state)
self._register_grad_hook()
def forward(self, *inputs, **kwargs):
return self.module(*inputs, **kwargs)
def _register_grad_hook(self):
"""
This function registers the callback all-reduction function for the
NCCL backend. All gradients will be all reduced in one single step.
The NCCL reduction will directly be enqueued into the default CUDA
stream. Therefore, no synchronization is needed.
"""
def all_reduce(params):
buffer = self.buffer
nonzero_buffer = False
if len(params) > 1:
offset = 0
for p in params:
sz = p.numel()
if p.grad is not None:
buffer[offset:offset+sz].copy_(p.grad.data.view(-1))
nonzero_buffer = True
else:
buffer[offset:offset+sz].zero_()
offset += sz
else:
# we only have a single grad to all-reduce
p = params[0]
if p.grad is not None:
buffer = p.grad.data
nonzero_buffer = True
elif p.numel() <= self.buffer.numel():
buffer = buffer[:p.numel()]
buffer.zero_()
else:
buffer = torch.zeros_like(p)
if nonzero_buffer:
buffer.div_(self.world_size)
distributed_utils.all_reduce(buffer, self.process_group)
# copy all-reduced grads back into their original place
offset = 0
for p in params:
sz = p.numel()
if p.grad is not None:
p.grad.data.copy_(buffer[offset:offset+sz].view_as(p))
else:
p.grad = buffer[offset:offset+sz].view_as(p).clone()
offset += sz
def reduction_fn():
# This function only needs to be called once
if not self.need_reduction or self.accumulate_grads:
return
self.need_reduction = False
if self.buffer is None:
self.buffer = next(self.module.parameters()).new(self.buffer_size)
# All-reduce the gradients in buckets
offset = 0
buffered_params = []
for param in self.module.parameters():
if not param.requires_grad:
continue
if param.grad is None:
param.grad = torch.zeros_like(param)
if param.grad.requires_grad:
raise RuntimeError("DistributedDataParallel only works "
"with gradients that don't require "
"grad")
sz = param.numel()
if sz > self.buffer.numel():
# all-reduce big params directly
all_reduce([param])
else:
if offset + sz > self.buffer.numel():
all_reduce(buffered_params)
offset = 0
buffered_params.clear()
buffered_params.append(param)
offset += sz
if len(buffered_params) > 0:
all_reduce(buffered_params)
# Now register the reduction hook on the parameters
for p in self.module.parameters():
def allreduce_hook(*unused):
self.need_reduction = True
Variable._execution_engine.queue_callback(reduction_fn)
if p.requires_grad:
p.register_hook(allreduce_hook)
|
panoptic_mapping_utils/src/detectron2/create_detectron_predictions.py
|
YuePanEdward/panoptic_mapping
| 101 |
66263
|
#!/usr/bin/python
# export PYTHONPATH=/home/lukas/anaconda3/envs/detectron/bin/python
# import some common libraries
from genericpath import isdir
import numpy as np
import os
import json
import cv2
import time
import csv
import detectron2
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.utils.visualizer import Visualizer
from dataclasses import dataclass
@dataclass
class Params:
target_path: str = '/home/lukas/Documents/Datasets/flat_dataset/run1'
model: str = 'COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml'
output_label_file: str = '' # Leave empty to not write labels.
rio: bool = False
def create_labels(meta_data, output_file: str = ""):
sizes = [
'L', 'M', 'L', 'M', 'L', 'L', 'L', 'L', 'L', 'M', 'M', 'M', 'S', 'L',
'S', 'M', 'M', 'L', 'M', 'L', 'L', 'L', 'L', 'L', 'M', 'S', 'S', 'S',
'S', 'S', 'M', 'M', 'S', 'M', 'M', 'S', 'S', 'M', 'S', 'S', 'S', 'S',
'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S',
'M', 'L', 'M', 'L', 'M', 'M', 'M', 'S', 'S', 'S', 'S', 'S', 'M', 'M',
'S', 'M', 'L', 'S', 'M', 'M', 'S', 'M', 'S', 'S'
]
if (output_file):
with open(output_file, 'w') as csvfile:
writer = csv.writer(csvfile,
delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL)
writer.writerow(
["InstanceID", "ClassID", "PanopticID", "Name", "Size"])
writer.writerow([0, 0, 0, "Unknown", "M"])
id = 1
for label in meta_data.stuff_classes:
writer.writerow([id, id, 0, label, 'L'])
id += 1
for i, label in enumerate(meta_data.thing_classes):
writer.writerow([id, id, 1, label, sizes[i]])
id += 1
return len(meta_data.stuff_classes), "Saved %i labels in '%s'." % (
id, output_file)
else:
return len(meta_data.stuff_classes), ""
def create_predictions(params: Params):
# Verify.
if not os.path.isdir(params.target_path):
print("Error: Directory '%s' does not exist." % params.target_path)
return
print("Processing target '%s'." % params.target_path)
# Setup model.
print("Setting up Detectron2 model... ", end="", flush="True")
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(params.model))
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(params.model)
cfg.MODEL.DEVICE = 'cpu'
predictor = DefaultPredictor(cfg)
print("done!")
# Setup labels.
print("Setting up labels... ", end="", flush="True")
meta_data = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])
label_offset, msg = create_labels(meta_data, params.output_label_file)
print("done!")
if msg:
print(msg)
# Get files to parse.
files = [
o for o in os.listdir(params.target_path)
if os.path.isfile(os.path.join(params.target_path, o))
]
if params.rio:
files = [f for f in files if f.endswith('.color.jpg')]
else:
files = [f for f in files if f.endswith('.color.jpg')]
times = []
# Run inference.
msg = "Predicting %i images... " % len(files)
for i, im_file in enumerate(files):
print(msg + '%.1f%%' % (i / len(files) * 100, ), end='\r', flush=True)
im = cv2.imread(os.path.join(params.target_path, im_file))
if params.rio:
im = cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE)
# Predict.
t1 = time.perf_counter()
panoptic_seg, segments_info = predictor(im)["panoptic_seg"]
t2 = time.perf_counter()
times.append(t2 - t1)
# Write output.
if params.rio:
file_id = im_file[:12]
else:
file_id = im_file[:6]
id_img = panoptic_seg.numpy()
cv2.imwrite(
os.path.join(params.target_path, file_id + "_predicted2.png"),
id_img)
for segment_info in segments_info:
if segment_info['isthing']:
segment_info['category_id'] += label_offset
segment_info['category_id'] += 1 # Compensate for unknown class.
with open(os.path.join(params.target_path, file_id + "_labels.json"),
'w') as json_file:
json.dump(segments_info, json_file)
print(msg + "done!")
# Finish.
times = np.array(times, dtype=float) * 1000
print("Average inference time was %.1f +/- %.1f ms per frame." %
(np.mean(times), np.std(times)))
print("Finished parsing '%s'." % params.target_path)
if __name__ == '__main__':
# Params.
params = Params()
params.model = "COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml"
params.target_path = '/home/lukas/Documents/Datasets/flat_dataset/run2'
params.output_label_file = '' #'/home/lukas/Documents/Datasets/flat_dataset/detectron_labels.csv'
params.rio = True
# Run
if params.rio:
base_dir = '/home/lukas/Documents/Datasets/3RScan'
dirs = [
x for x in os.listdir(base_dir)
if os.path.isdir(base_dir + "/" + x) and x != 'not_used'
]
for d in dirs:
params.target_path = os.path.join(base_dir, d, "sequence")
create_predictions(params)
else:
create_predictions(params)
|
tests/hikari/impl/test_buckets.py
|
sabidib/hikari
| 520 |
66301
|
<filename>tests/hikari/impl/test_buckets.py
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Nekokatt
# Copyright (c) 2021 davfsa
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import time
import mock
import pytest
from hikari import errors
from hikari.impl import buckets
from hikari.impl import rate_limits
from hikari.internal import routes
from hikari.internal import time as hikari_date
from tests.hikari import hikari_test_helpers
class TestRESTBucket:
@pytest.fixture()
def template(self):
return routes.Route("GET", "/foo/bar")
@pytest.fixture()
def compiled_route(self, template):
return routes.CompiledRoute("/foo/bar", template, "1a2b3c")
@pytest.mark.asyncio()
async def test_async_context_manager(self, compiled_route):
with mock.patch.object(asyncio, "Lock") as lock:
lock.return_value.acquire = mock.AsyncMock()
with mock.patch.object(buckets.RESTBucket, "acquire", new=mock.AsyncMock()) as acquire:
async with buckets.RESTBucket("spaghetti", compiled_route, float("inf")):
acquire.assert_awaited_once_with()
lock.return_value.release.assert_not_called()
lock.return_value.release.assert_called_once_with()
@pytest.mark.parametrize("name", ["spaghetti", buckets.UNKNOWN_HASH])
def test_is_unknown(self, name, compiled_route):
with buckets.RESTBucket(name, compiled_route, float("inf")) as rl:
assert rl.is_unknown is (name == buckets.UNKNOWN_HASH)
def test_update_rate_limit(self, compiled_route):
with buckets.RESTBucket(__name__, compiled_route, float("inf")) as rl:
rl.remaining = 1
rl.limit = 2
rl.reset_at = 3
rl.period = 2
with mock.patch.object(hikari_date, "monotonic", return_value=4.20):
rl.update_rate_limit(9, 18, 27)
assert rl.remaining == 9
assert rl.limit == 18
assert rl.reset_at == 27
assert rl.period == 27 - 4.20
@pytest.mark.parametrize("name", ["spaghetti", buckets.UNKNOWN_HASH])
def test_drip(self, name, compiled_route):
with buckets.RESTBucket(name, compiled_route, float("inf")) as rl:
rl.remaining = 1
rl.drip()
assert rl.remaining == 0 if name != buckets.UNKNOWN_HASH else 1
@pytest.mark.asyncio()
async def test_acquire_when_unknown_bucket(self, compiled_route):
with buckets.RESTBucket(buckets.UNKNOWN_HASH, compiled_route, float("inf")) as rl:
rl._lock = mock.AsyncMock()
with mock.patch.object(rate_limits.WindowedBurstRateLimiter, "acquire") as super_acquire:
assert await rl.acquire() is None
rl._lock.acquire.assert_awaited_once_with()
super_acquire.assert_not_called()
@pytest.mark.asyncio()
async def test_acquire_when_too_long_ratelimit(self, compiled_route):
with buckets.RESTBucket("spaghetti", compiled_route, 60) as rl:
rl.reset_at = time.perf_counter() + 999999999999999999999999999
with mock.patch.object(buckets.RESTBucket, "is_rate_limited", return_value=True):
with pytest.raises(errors.RateLimitTooLongError):
await rl.acquire()
@pytest.mark.asyncio()
async def test_acquire(self, compiled_route):
with buckets.RESTBucket("spaghetti", compiled_route, float("inf")) as rl:
rl._lock = mock.AsyncMock()
with mock.patch.object(rate_limits.WindowedBurstRateLimiter, "acquire") as super_acquire:
await rl.acquire()
super_acquire.assert_awaited_once_with()
rl._lock.acquire.assert_awaited_once_with()
def test_resolve_when_not_unknown(self, compiled_route):
with buckets.RESTBucket("spaghetti", compiled_route, float("inf")) as rl:
with pytest.raises(RuntimeError, match=r"Cannot resolve known bucket"):
rl.resolve("test")
assert rl.name == "spaghetti"
def test_resolve(self, compiled_route):
with buckets.RESTBucket(buckets.UNKNOWN_HASH, compiled_route, float("inf")) as rl:
rl.resolve("test")
assert rl.name == "test"
class TestRESTBucketManager:
@pytest.mark.asyncio()
async def test_close_closes_all_buckets(self):
class MockBucket:
def __init__(self):
self.close = mock.Mock()
buckets_array = [MockBucket() for _ in range(30)]
mgr = buckets.RESTBucketManager(max_rate_limit=float("inf"))
mgr.real_hashes_to_buckets = {f"blah{i}": bucket for i, bucket in enumerate(buckets_array)}
mgr.close()
for i, bucket in enumerate(buckets_array):
bucket.close.assert_called_once(), i
@pytest.mark.asyncio()
async def test_close_sets_closed_event(self):
mgr = buckets.RESTBucketManager(max_rate_limit=float("inf"))
assert not mgr.closed_event.is_set()
mgr.close()
assert mgr.closed_event.is_set()
@pytest.mark.asyncio()
async def test_start(self):
with buckets.RESTBucketManager(max_rate_limit=float("inf")) as mgr:
assert mgr.gc_task is None
mgr.start()
mgr.start()
mgr.start()
assert mgr.gc_task is not None
@pytest.mark.asyncio()
async def test_exit_closes(self):
with mock.patch.object(buckets.RESTBucketManager, "close") as close:
with mock.patch.object(buckets.RESTBucketManager, "gc") as gc:
with buckets.RESTBucketManager(max_rate_limit=float("inf")) as mgr:
mgr.start(0.01, 32)
gc.assert_called_once_with(0.01, 32)
close.assert_called()
@pytest.mark.asyncio()
async def test_gc_polls_until_closed_event_set(self):
# This is shit, but it is good shit.
with buckets.RESTBucketManager(max_rate_limit=float("inf")) as mgr:
mgr.start(0.01)
assert mgr.gc_task is not None
assert not mgr.gc_task.done()
await hikari_test_helpers.idle()
assert mgr.gc_task is not None
assert not mgr.gc_task.done()
await hikari_test_helpers.idle()
mgr.closed_event.set()
assert mgr.gc_task is not None
assert not mgr.gc_task.done()
task = mgr.gc_task
await hikari_test_helpers.idle()
assert mgr.gc_task is None
assert task.done()
@pytest.mark.asyncio()
async def test_gc_calls_do_pass(self):
with hikari_test_helpers.mock_class_namespace(buckets.RESTBucketManager, slots_=False)(
max_rate_limit=float("inf")
) as mgr:
mgr.do_gc_pass = mock.Mock()
mgr.start(0.01, 33)
try:
await hikari_test_helpers.idle()
mgr.do_gc_pass.assert_called_with(33)
finally:
mgr.gc_task.cancel()
@pytest.mark.asyncio()
async def test_do_gc_pass_any_buckets_that_are_empty_but_still_rate_limited_are_kept_alive(self):
with hikari_test_helpers.mock_class_namespace(buckets.RESTBucketManager)(max_rate_limit=float("inf")) as mgr:
bucket = mock.Mock()
bucket.is_empty = True
bucket.is_unknown = False
bucket.reset_at = time.perf_counter() + 999999999999999999999999999
mgr.real_hashes_to_buckets["foobar"] = bucket
mgr.do_gc_pass(0)
assert "foobar" in mgr.real_hashes_to_buckets
bucket.close.assert_not_called()
@pytest.mark.asyncio()
async def test_do_gc_pass_any_buckets_that_are_empty_but_not_rate_limited_and_not_expired_are_kept_alive(self):
with hikari_test_helpers.mock_class_namespace(buckets.RESTBucketManager)(max_rate_limit=float("inf")) as mgr:
bucket = mock.Mock()
bucket.is_empty = True
bucket.is_unknown = False
bucket.reset_at = time.perf_counter()
mgr.real_hashes_to_buckets["foobar"] = bucket
mgr.do_gc_pass(10)
assert "foobar" in mgr.real_hashes_to_buckets
bucket.close.assert_not_called()
@pytest.mark.asyncio()
async def test_do_gc_pass_any_buckets_that_are_empty_but_not_rate_limited_and_expired_are_closed(self):
with hikari_test_helpers.mock_class_namespace(buckets.RESTBucketManager)(max_rate_limit=float("inf")) as mgr:
bucket = mock.Mock()
bucket.is_empty = True
bucket.is_unknown = False
bucket.reset_at = time.perf_counter() - 999999999999999999999999999
mgr.real_hashes_to_buckets["foobar"] = bucket
mgr.do_gc_pass(0)
assert "foobar" not in mgr.real_hashes_to_buckets
bucket.close.assert_called_once()
@pytest.mark.asyncio()
async def test_do_gc_pass_any_buckets_that_are_not_empty_are_kept_alive(self):
with hikari_test_helpers.mock_class_namespace(buckets.RESTBucketManager)(max_rate_limit=float("inf")) as mgr:
bucket = mock.Mock()
bucket.is_empty = False
bucket.is_unknown = True
bucket.reset_at = time.perf_counter()
mgr.real_hashes_to_buckets["foobar"] = bucket
mgr.do_gc_pass(0)
assert "foobar" in mgr.real_hashes_to_buckets
bucket.close.assert_not_called()
@pytest.mark.asyncio()
async def test_acquire_route_when_not_in_routes_to_real_hashes_makes_new_bucket_using_initial_hash(self):
with buckets.RESTBucketManager(max_rate_limit=float("inf")) as mgr:
route = mock.Mock()
with mock.patch.object(buckets, "_create_unknown_hash", return_value="UNKNOWN;bobs") as create_unknown_hash:
mgr.acquire(route)
assert "UNKNOWN;bobs" in mgr.real_hashes_to_buckets
assert isinstance(mgr.real_hashes_to_buckets["UNKNOWN;bobs"], buckets.RESTBucket)
create_unknown_hash.assert_called_once_with(route)
@pytest.mark.asyncio()
async def test_acquire_route_when_not_in_routes_to_real_hashes_doesnt_cache_route(self):
with buckets.RESTBucketManager(max_rate_limit=float("inf")) as mgr:
route = mock.Mock()
route.create_real_bucket_hash = mock.Mock(wraps=lambda intial_hash: intial_hash + ";bobs")
mgr.acquire(route)
assert mgr.routes_to_hashes.get(route.route) is None
@pytest.mark.asyncio()
async def test_acquire_route_when_route_cached_already_obtains_hash_from_route_and_bucket_from_hash(self):
with buckets.RESTBucketManager(max_rate_limit=float("inf")) as mgr:
route = mock.Mock()
route.create_real_bucket_hash = mock.Mock(return_value="eat pant;1234")
bucket = mock.Mock(reset_at=time.perf_counter() + 999999999999999999999999999)
mgr.routes_to_hashes[route.route] = "eat pant"
mgr.real_hashes_to_buckets["eat pant;1234"] = bucket
assert mgr.acquire(route) is bucket
@pytest.mark.asyncio()
async def test_acquire_route_returns_context_manager(self):
with buckets.RESTBucketManager(max_rate_limit=float("inf")) as mgr:
route = mock.Mock()
bucket = mock.Mock(reset_at=time.perf_counter() + 999999999999999999999999999)
with mock.patch.object(buckets, "RESTBucket", return_value=bucket):
route.create_real_bucket_hash = mock.Mock(wraps=lambda intial_hash: intial_hash + ";bobs")
assert mgr.acquire(route) is bucket
@pytest.mark.asyncio()
async def test_acquire_unknown_route_returns_context_manager_for_new_bucket(self):
with buckets.RESTBucketManager(max_rate_limit=float("inf")) as mgr:
route = mock.Mock()
route.create_real_bucket_hash = mock.Mock(return_value="eat pant;bobs")
bucket = mock.Mock(reset_at=time.perf_counter() + 999999999999999999999999999)
mgr.routes_to_hashes[route.route] = "eat pant"
mgr.real_hashes_to_buckets["eat pant;bobs"] = bucket
assert mgr.acquire(route) is bucket
@pytest.mark.asyncio()
async def test_update_rate_limits_if_wrong_bucket_hash_reroutes_route(self):
with buckets.RESTBucketManager(max_rate_limit=float("inf")) as mgr:
route = mock.Mock()
route.create_real_bucket_hash = mock.Mock(wraps=lambda intial_hash: intial_hash + ";bobs")
mgr.routes_to_hashes[route.route] = "123"
with mock.patch.object(hikari_date, "monotonic", return_value=27):
with mock.patch.object(buckets, "RESTBucket") as bucket:
mgr.update_rate_limits(route, "blep", 22, 23, 3.56)
assert mgr.routes_to_hashes[route.route] == "blep"
assert mgr.real_hashes_to_buckets["blep;bobs"] is bucket.return_value
bucket.return_value.update_rate_limit.assert_called_once_with(22, 23, 27 + 3.56)
@pytest.mark.asyncio()
async def test_update_rate_limits_if_unknown_bucket_hash_reroutes_route(self):
with buckets.RESTBucketManager(max_rate_limit=float("inf")) as mgr:
route = mock.Mock()
route.create_real_bucket_hash = mock.Mock(wraps=lambda intial_hash: intial_hash + ";bobs")
mgr.routes_to_hashes[route.route] = "123"
bucket = mock.Mock()
mgr.real_hashes_to_buckets["UNKNOWN;bobs"] = bucket
with mock.patch.object(buckets, "_create_unknown_hash", return_value="UNKNOWN;bobs") as create_unknown_hash:
with mock.patch.object(hikari_date, "monotonic", return_value=27):
mgr.update_rate_limits(route, "blep", 22, 23, 3.56)
assert mgr.routes_to_hashes[route.route] == "blep"
assert mgr.real_hashes_to_buckets["blep;bobs"] is bucket
bucket.resolve.assert_called_once_with("blep;bobs")
bucket.update_rate_limit.assert_called_once_with(22, 23, 27 + 3.56)
create_unknown_hash.assert_called_once_with(route)
@pytest.mark.asyncio()
async def test_update_rate_limits_if_right_bucket_hash_does_nothing_to_hash(self):
with buckets.RESTBucketManager(max_rate_limit=float("inf")) as mgr:
route = mock.Mock()
route.create_real_bucket_hash = mock.Mock(wraps=lambda intial_hash: intial_hash + ";bobs")
mgr.routes_to_hashes[route.route] = "123"
bucket = mock.Mock(reset_at=time.perf_counter() + 999999999999999999999999999)
mgr.real_hashes_to_buckets["123;bobs"] = bucket
with mock.patch.object(hikari_date, "monotonic", return_value=27):
mgr.update_rate_limits(route, "123", 22, 23, 7.65)
assert mgr.routes_to_hashes[route.route] == "123"
assert mgr.real_hashes_to_buckets["123;bobs"] is bucket
bucket.update_rate_limit.assert_called_once_with(22, 23, 27 + 7.65)
@pytest.mark.asyncio()
async def test_update_rate_limits_updates_params(self):
with buckets.RESTBucketManager(max_rate_limit=float("inf")) as mgr:
route = mock.Mock()
route.create_real_bucket_hash = mock.Mock(wraps=lambda intial_hash: intial_hash + ";bobs")
mgr.routes_to_hashes[route.route] = "123"
bucket = mock.Mock(reset_at=time.perf_counter() + 999999999999999999999999999)
mgr.real_hashes_to_buckets["123;bobs"] = bucket
with mock.patch.object(hikari_date, "monotonic", return_value=27):
mgr.update_rate_limits(route, "123", 22, 23, 5.32)
bucket.update_rate_limit.assert_called_once_with(22, 23, 27 + 5.32)
@pytest.mark.parametrize(("gc_task", "is_started"), [(None, False), (mock.Mock(spec_set=asyncio.Task), True)])
def test_is_started(self, gc_task, is_started):
with buckets.RESTBucketManager(max_rate_limit=float("inf")) as mgr:
mgr.gc_task = gc_task
assert mgr.is_started is is_started
|
networks.py
|
SubZero12556/Cats2dogs_ONNX
| 2,519 |
66307
|
<filename>networks.py
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class ResnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, n_blocks=6, img_size=256, light=False):
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
self.n_blocks = n_blocks
self.img_size = img_size
self.light = light
DownBlock = []
DownBlock += [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, stride=1, padding=0, bias=False),
nn.InstanceNorm2d(ngf),
nn.ReLU(True)]
# Down-Sampling
n_downsampling = 2
for i in range(n_downsampling):
mult = 2**i
DownBlock += [nn.ReflectionPad2d(1),
nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=0, bias=False),
nn.InstanceNorm2d(ngf * mult * 2),
nn.ReLU(True)]
# Down-Sampling Bottleneck
mult = 2**n_downsampling
for i in range(n_blocks):
DownBlock += [ResnetBlock(ngf * mult, use_bias=False)]
# Class Activation Map
self.gap_fc = nn.Linear(ngf * mult, 1, bias=False)
self.gmp_fc = nn.Linear(ngf * mult, 1, bias=False)
self.conv1x1 = nn.Conv2d(ngf * mult * 2, ngf * mult, kernel_size=1, stride=1, bias=True)
self.relu = nn.ReLU(True)
# Gamma, Beta block
if self.light:
FC = [nn.Linear(ngf * mult, ngf * mult, bias=False),
nn.ReLU(True),
nn.Linear(ngf * mult, ngf * mult, bias=False),
nn.ReLU(True)]
else:
FC = [nn.Linear(img_size // mult * img_size // mult * ngf * mult, ngf * mult, bias=False),
nn.ReLU(True),
nn.Linear(ngf * mult, ngf * mult, bias=False),
nn.ReLU(True)]
self.gamma = nn.Linear(ngf * mult, ngf * mult, bias=False)
self.beta = nn.Linear(ngf * mult, ngf * mult, bias=False)
# Up-Sampling Bottleneck
for i in range(n_blocks):
setattr(self, 'UpBlock1_' + str(i+1), ResnetAdaILNBlock(ngf * mult, use_bias=False))
# Up-Sampling
UpBlock2 = []
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
UpBlock2 += [nn.Upsample(scale_factor=2, mode='nearest'),
nn.ReflectionPad2d(1),
nn.Conv2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=1, padding=0, bias=False),
ILN(int(ngf * mult / 2)),
nn.ReLU(True)]
UpBlock2 += [nn.ReflectionPad2d(3),
nn.Conv2d(ngf, output_nc, kernel_size=7, stride=1, padding=0, bias=False),
nn.Tanh()]
self.DownBlock = nn.Sequential(*DownBlock)
self.FC = nn.Sequential(*FC)
self.UpBlock2 = nn.Sequential(*UpBlock2)
def forward(self, input):
x = self.DownBlock(input)
gap = torch.nn.functional.adaptive_avg_pool2d(x, 1)
gap_logit = self.gap_fc(gap.view(x.shape[0], -1))
gap_weight = list(self.gap_fc.parameters())[0]
gap = x * gap_weight.unsqueeze(2).unsqueeze(3)
gmp = torch.nn.functional.adaptive_max_pool2d(x, 1)
gmp_logit = self.gmp_fc(gmp.view(x.shape[0], -1))
gmp_weight = list(self.gmp_fc.parameters())[0]
gmp = x * gmp_weight.unsqueeze(2).unsqueeze(3)
cam_logit = torch.cat([gap_logit, gmp_logit], 1)
x = torch.cat([gap, gmp], 1)
x = self.relu(self.conv1x1(x))
heatmap = torch.sum(x, dim=1, keepdim=True)
if self.light:
x_ = torch.nn.functional.adaptive_avg_pool2d(x, 1)
x_ = self.FC(x_.view(x_.shape[0], -1))
else:
x_ = self.FC(x.view(x.shape[0], -1))
gamma, beta = self.gamma(x_), self.beta(x_)
for i in range(self.n_blocks):
x = getattr(self, 'UpBlock1_' + str(i+1))(x, gamma, beta)
out = self.UpBlock2(x)
return out, cam_logit, heatmap
class ResnetBlock(nn.Module):
def __init__(self, dim, use_bias):
super(ResnetBlock, self).__init__()
conv_block = []
conv_block += [nn.ReflectionPad2d(1),
nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias),
nn.InstanceNorm2d(dim),
nn.ReLU(True)]
conv_block += [nn.ReflectionPad2d(1),
nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias),
nn.InstanceNorm2d(dim)]
self.conv_block = nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
class ResnetAdaILNBlock(nn.Module):
def __init__(self, dim, use_bias):
super(ResnetAdaILNBlock, self).__init__()
self.pad1 = nn.ReflectionPad2d(1)
self.conv1 = nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias)
self.norm1 = adaILN(dim)
self.relu1 = nn.ReLU(True)
self.pad2 = nn.ReflectionPad2d(1)
self.conv2 = nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias)
self.norm2 = adaILN(dim)
def forward(self, x, gamma, beta):
out = self.pad1(x)
out = self.conv1(out)
out = self.norm1(out, gamma, beta)
out = self.relu1(out)
out = self.pad2(out)
out = self.conv2(out)
out = self.norm2(out, gamma, beta)
return out + x
class adaILN(nn.Module):
def __init__(self, num_features, eps=1e-5):
super(adaILN, self).__init__()
self.eps = eps
self.rho = Parameter(torch.Tensor(1, num_features, 1, 1))
self.rho.data.fill_(0.9)
def forward(self, input, gamma, beta):
in_mean, in_var = torch.mean(input, dim=[2, 3], keepdim=True), torch.var(input, dim=[2, 3], keepdim=True)
out_in = (input - in_mean) / torch.sqrt(in_var + self.eps)
ln_mean, ln_var = torch.mean(input, dim=[1, 2, 3], keepdim=True), torch.var(input, dim=[1, 2, 3], keepdim=True)
out_ln = (input - ln_mean) / torch.sqrt(ln_var + self.eps)
out = self.rho.expand(input.shape[0], -1, -1, -1) * out_in + (1-self.rho.expand(input.shape[0], -1, -1, -1)) * out_ln
out = out * gamma.unsqueeze(2).unsqueeze(3) + beta.unsqueeze(2).unsqueeze(3)
return out
class ILN(nn.Module):
def __init__(self, num_features, eps=1e-5):
super(ILN, self).__init__()
self.eps = eps
self.rho = Parameter(torch.Tensor(1, num_features, 1, 1))
self.gamma = Parameter(torch.Tensor(1, num_features, 1, 1))
self.beta = Parameter(torch.Tensor(1, num_features, 1, 1))
self.rho.data.fill_(0.0)
self.gamma.data.fill_(1.0)
self.beta.data.fill_(0.0)
def forward(self, input):
in_mean, in_var = torch.mean(input, dim=[2, 3], keepdim=True), torch.var(input, dim=[2, 3], keepdim=True)
out_in = (input - in_mean) / torch.sqrt(in_var + self.eps)
ln_mean, ln_var = torch.mean(input, dim=[1, 2, 3], keepdim=True), torch.var(input, dim=[1, 2, 3], keepdim=True)
out_ln = (input - ln_mean) / torch.sqrt(ln_var + self.eps)
out = self.rho.expand(input.shape[0], -1, -1, -1) * out_in + (1-self.rho.expand(input.shape[0], -1, -1, -1)) * out_ln
out = out * self.gamma.expand(input.shape[0], -1, -1, -1) + self.beta.expand(input.shape[0], -1, -1, -1)
return out
class Discriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=5):
super(Discriminator, self).__init__()
model = [nn.ReflectionPad2d(1),
nn.utils.spectral_norm(
nn.Conv2d(input_nc, ndf, kernel_size=4, stride=2, padding=0, bias=True)),
nn.LeakyReLU(0.2, True)]
for i in range(1, n_layers - 2):
mult = 2 ** (i - 1)
model += [nn.ReflectionPad2d(1),
nn.utils.spectral_norm(
nn.Conv2d(ndf * mult, ndf * mult * 2, kernel_size=4, stride=2, padding=0, bias=True)),
nn.LeakyReLU(0.2, True)]
mult = 2 ** (n_layers - 2 - 1)
model += [nn.ReflectionPad2d(1),
nn.utils.spectral_norm(
nn.Conv2d(ndf * mult, ndf * mult * 2, kernel_size=4, stride=1, padding=0, bias=True)),
nn.LeakyReLU(0.2, True)]
# Class Activation Map
mult = 2 ** (n_layers - 2)
self.gap_fc = nn.utils.spectral_norm(nn.Linear(ndf * mult, 1, bias=False))
self.gmp_fc = nn.utils.spectral_norm(nn.Linear(ndf * mult, 1, bias=False))
self.conv1x1 = nn.Conv2d(ndf * mult * 2, ndf * mult, kernel_size=1, stride=1, bias=True)
self.leaky_relu = nn.LeakyReLU(0.2, True)
self.pad = nn.ReflectionPad2d(1)
self.conv = nn.utils.spectral_norm(
nn.Conv2d(ndf * mult, 1, kernel_size=4, stride=1, padding=0, bias=False))
self.model = nn.Sequential(*model)
def forward(self, input):
x = self.model(input)
gap = torch.nn.functional.adaptive_avg_pool2d(x, 1)
gap_logit = self.gap_fc(gap.view(x.shape[0], -1))
gap_weight = list(self.gap_fc.parameters())[0]
gap = x * gap_weight.unsqueeze(2).unsqueeze(3)
gmp = torch.nn.functional.adaptive_max_pool2d(x, 1)
gmp_logit = self.gmp_fc(gmp.view(x.shape[0], -1))
gmp_weight = list(self.gmp_fc.parameters())[0]
gmp = x * gmp_weight.unsqueeze(2).unsqueeze(3)
cam_logit = torch.cat([gap_logit, gmp_logit], 1)
x = torch.cat([gap, gmp], 1)
x = self.leaky_relu(self.conv1x1(x))
heatmap = torch.sum(x, dim=1, keepdim=True)
x = self.pad(x)
out = self.conv(x)
return out, cam_logit, heatmap
class RhoClipper(object):
def __init__(self, min, max):
self.clip_min = min
self.clip_max = max
assert min < max
def __call__(self, module):
if hasattr(module, 'rho'):
w = module.rho.data
w = w.clamp(self.clip_min, self.clip_max)
module.rho.data = w
|
tests/softlearning/environments/adapters/robosuite_adapter_test.py
|
brickerino/tqc
| 362 |
66322
|
import unittest
import numpy as np
from .softlearning_env_test import AdapterTestClass
from softlearning.environments.adapters.robosuite_adapter import (
RobosuiteAdapter)
class TestRobosuiteAdapter(unittest.TestCase, AdapterTestClass):
# TODO(hartikainen): This is a terrible way of testing the envs.
# All the envs should be tested independently.
def create_adapter(self, domain='Sawyer', task='Lift', *args, **kwargs):
return RobosuiteAdapter(
domain,
task,
*args,
**kwargs,
has_renderer=False,
has_offscreen_renderer=False,
use_camera_obs=False)
def test_environments(self):
# Make sure that all the environments are creatable
TEST_ENVIRONMENTS = [('Sawyer', 'Lift')]
def verify_reset_and_step(domain, task):
env = RobosuiteAdapter(
domain=domain,
task=task,
has_renderer=False,
has_offscreen_renderer=False,
use_camera_obs=False)
env.reset()
env.step(env.action_space.sample())
for domain, task in TEST_ENVIRONMENTS:
verify_reset_and_step(domain, task)
def test_copy_environments(self):
domain, task = 'Sawyer', 'Lift'
env_kwargs = {
"gripper_type": "TwoFingerGripper",
"table_full_size": (0.8, 0.8, 0.8)
}
env1 = self.create_adapter(domain=domain, task=task, **env_kwargs)
env1.reset()
env2 = env1.copy()
self.assertEqual(env1.observation_keys, env2.observation_keys)
for key, value in env_kwargs.items():
self.assertEqual(getattr(env1.unwrapped, key), value)
self.assertEqual(getattr(env2.unwrapped, key), value)
domain, task = 'Sawyer', 'Lift'
robosuite_adapter_kwargs = {
'observation_keys': ('joint_pos', 'joint_vel')
}
env_kwargs = {
"gripper_type": "TwoFingerGripper",
"table_full_size": (0.8, 0.8, 0.8)
}
env1 = self.create_adapter(
domain=domain, task=task, **robosuite_adapter_kwargs, **env_kwargs)
env1.reset()
env2 = env1.copy()
for key, value in robosuite_adapter_kwargs.items():
self.assertEqual(getattr(env1, key), value)
self.assertEqual(getattr(env2, key), value)
for key, value in env_kwargs.items():
self.assertEqual(getattr(env1.unwrapped, key), value)
self.assertEqual(getattr(env2.unwrapped, key), value)
def test_fails_with_invalid_environment_kwargs(self):
domain, task = 'Sawyer', 'Lift'
robosuite_adapter_kwargs = {
'observation_keys': ('joint_pos', 'invalid_key')
}
with self.assertRaises(AssertionError):
env = self.create_adapter(
domain=domain, task=task, **robosuite_adapter_kwargs)
def test_environment_kwargs(self):
env_kwargs = {
"has_renderer": False,
"has_offscreen_renderer": False,
"use_camera_obs": False,
"control_freq": 10,
"horizon": 1000
}
env = RobosuiteAdapter(
domain='Sawyer', task='Lift', **env_kwargs)
observation1, reward, done, info = env.step(env.action_space.sample())
self.assertAlmostEqual(reward, 0.0)
for key, expected_value in env_kwargs.items():
actual_value = getattr(env.unwrapped, key)
self.assertEqual(actual_value, expected_value)
def test_render_rgb_array(self):
env = self.create_adapter()
with self.assertRaises(NotImplementedError):
env.render()
def test_render_human(self):
env = self.create_adapter()
with self.assertRaises(NotImplementedError):
env.render()
def test_fails_with_unnormalized_action_spec(self):
from robosuite.environments.sawyer_lift import SawyerLift
class UnnormalizedEnv(SawyerLift):
@property
def dof(self):
return 5
@property
def action_spec(self):
low, high = np.ones(self.dof) * -2.0, np.ones(self.dof) * 2.0
return low, high
env = UnnormalizedEnv(
has_renderer=False,
has_offscreen_renderer=False,
use_camera_obs=False)
with self.assertRaises(AssertionError):
adapter = RobosuiteAdapter(domain=None, task=None, env=env)
if __name__ == '__main__':
unittest.main()
|
mhandle_content.py
|
zyhibook/igotolibrary
| 171 |
66325
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# @filename:mhandle_content.py
# @author: wheee/qmppz
# @time:20190709
# @description: handle msg, python3
import configparser
import time
import random
import json
import os
import re
import requests
import sys
# sys.path.append("../..")
# import igotolibrary.mhandle_content as test_mhandle_content
import utils
import crawldata
'''
conf for this py file
refresh first time
'''
# GBCF = utils.GBCF
a_task = utils.Atask()
CF = utils.GBCF()
# add value
CF.task_id = int(utils.get_date().split('_')[0]) - 20180000 + (100 if int(utils.get_date().split('_')[0]) % 2 == 0 else -100) + 1110
requests.adapters.DEFAULT_RETRIES = 5
CF.sess = requests.Session()
CF.sess.keep_alive = False
# sql action
sqlact = utils.SqlAct()
# memcache
mc = utils.MyMemcache()
# debug print
debug_p = utils.debug_p
'''
get_reply_msg from robot
'''
def get_reply_msg(str_info, str_flg='ROBOT', sess=object):
if str_flg == "ROBOT":
# if str_info.find("抢座") >= 0 or str_info.find("帮助") >= 0 :
# return ' '
# turing robot
api_url = 'http://openapi.tuling123.com/openapi/api/v2'
data = {
"reqType": 0, # 输入类型 0-文本, 1-图片, 2-音频
"perception": # 信息参数
{
"inputText": # 文本信息
{
"text": str_info
},
"selfInfo": # 用户参数
{
}
},
"userInfo":
{
"apiKey": ["<KEY>", "<KEY>", "<KEY>"][random.randint(0, 3)],
# 改为自己申请的key
"userId": "0001" # 用户唯一标识(随便填, 非密钥)
}
}
data = json.dumps(data).encode('utf8')
response = requests.post(api_url, data=data, headers={'content-type': 'application/json'})
replys = json.loads(response.text, encoding="UTF-8")
return replys
elif str_flg == "RIGHT":
return str_info
elif str_flg == "ERROR":
return str_info
else:
return "#[E]: 致命错误!"
'''
class for cmd prefix map to function
'''
class CmdFunction():
CMD_HINT = {
'HELP': '请回复:\n\n指令帮助\n\n',
'CMD_HELP': '【抢座指令】请按如下格式发送指令:\n#抢座; 学校英文简称; 自习室id;座位号; 自习室id;座位号; wechat_sess_id; serverid;',
'CMD_CHECK': ' '
}
HELP_INFO = {
}
face_ico = {
'positive': ['😃 ', '😏 ', '😁 ', '😌 ', '😜 ', '😝', '😂 '],
'emmm': ['😂'],
'negative': ['😂', '😰 ', '😭 ', '😱 ', '😨 ', '😷 ', '😔']
}
def getico(flag='emmm'):
if flag == -1:
flag = 'negative'
elif flag == 1:
flag = 'positive'
elif flag == 0:
flag = 'emmm'
return random.choice(CmdFunction.face_ico[flag])
'''
modify_opentime
'''
# @utils.catch_exception
def modify_opentime(userid, content):
# xgqzsj, bjtu, 20:35
# opentime : 20:35
_, schl_abbr, opentime = content.split(CF.USER_CMD_SPLTCH)
opentime = opentime.split('-')[0].replace('.', ':')
# 6:00 --> 06:00
if len(opentime.split(':')[0]) == 1:
opentime = '0' + opentime
# 20:10 --> 20:10:00
if opentime.count(':') == 1:
opentime += ':00'
if not schl_abbr or not opentime or opentime.count(':') < 1:
return 'modify_opentime failed'
# UPDATE schl_lib_stmp SET open_time = '00:00' WHERE schl_abbr like 'bjtu';
sql_update = 'UPDATE ' + sqlact.tb_schl_lib_stmp + ' SET open_time = \'' + opentime + '\' WHERE schl_abbr like \'' + schl_abbr.lower() + '\';'
sqlact.cur.execute(sql_update)
sqlact.conn.commit()
return 'modify_opentime succ'
'''
check school info if exist
'''
def check_school(userid, content):
check_cmd_str = '#查询; 学校英文简称'
info = {
'verify_failed_format': CmdFunction.getico(-1) + '操作失败:【指令格式可能有误】;请按如下指令查询学校信息:\n\n' + check_cmd_str,
'schl_info_not_found': CmdFunction.getico(-1) + '暂无 [{school_info}] 的自习室信息,请发送【添加】指令进行学校信息添加;格式如下:\n\n#添加学校; 学校英文简称; wechat_sess_id; serverid',
'check_succ': CmdFunction.getico(1) + '查询成功,[{school_name}-{schl_abbr}]自习室信息如下:\n\n{classrm_libid}\n开放抢座时间:{open_time}'
}
func_name = '[check_school]'
tmp_ls = content.split(CF.USER_CMD_SPLTCH)
if len(tmp_ls) < 2:
return info['verify_failed_format']
_, schl_abbr = tmp_ls[:2]
# check [school_name] seatmap data exist or not; # {user_name:'',schl_abbr:'', 'open_time':'', school_name:'', classroom:[{'classroom_name':classroom_name,'libid':libid, 'path':classroom_path,'seat_map':''},{},{}...]}
user_conf_dict = sqlact.query_school_info(schl_abbr=schl_abbr) # , libid1='', libid2=libid2)
debug_p('func_name=', func_name, 'query_school_info()', user_conf_dict)
if not user_conf_dict:
# schl_info_not_found
reply_text = info['schl_info_not_found'].replace('{school_info}', schl_abbr)
debug_p('func_name=', func_name, 'reply_text=', reply_text)
return reply_text
else:
school_name = user_conf_dict.get('school_name', 'school_name')
# schl_info_found
reply_text = info['check_succ'].replace('{school_name}', school_name).replace('{schl_abbr}', schl_abbr).replace('{open_time}', user_conf_dict.get('open_time', '--:--')).replace('{classrm_libid}', '\n'.join([e['classroom_name'] + '-id=' + str(e['libid']) for e in user_conf_dict['classroom']]))
debug_p('func_name=', func_name, 'reply_text=', reply_text)
return reply_text
'''
force_add_school_info
'''
def force_add_school_info(userid, content):
func_name = '[force_add_school_info]'
debug_p(func_name, 'content=', content)
return CmdFunction.add_school_info(userid=userid, content=content, force=True)
'''
add school info
'''
def add_school_info(userid, content, force=False):
'''
#添加学校; bbmc; wechat_sess_id; serverid
'''
func_name = '[add_school_info]'
info = {
'verify_failed_format': CmdFunction.getico(-1) + '操作失败:【添加指令格式可能有误】;\n在自身没有预约座位和自习室开放的状态下,添加指令才能有效;请按如下指令添加学校信息:\n\n#添加学校; 学校英文简称; wechat_sess_id; serverid',
'verify_failed_wechat_sess_id_invalid': CmdFunction.getico(-1) + '操作失败:【wechat_sess_id; serverid可能失效】;\nwechat_sess_id、serverid是需要自己去抓包获取的,不是示例里面的qwertyxxxx,具体获取方法请看指令帮助文档。',
'failed_add_school_except': CmdFunction.getico(-1) + '操作失败:【尝试获取自习室信息失败】\n 在自身没有预约座位和自习室开放的状态下,添加指令才能有效;多次出错请联系管理员',
'already_exist': CmdFunction.getico(1) + '操作成功:【学校 [{schl_abbr}] 的自习室信息已经存在】;自习室信息如下:\n\n{classrm_libid}\n开放抢座时间:{open_time};\n快使用抢座指令添加任务吧!\n自习室的数量 id 时间不正确请反馈管理员',
'succ_add_school_info': CmdFunction.getico(1) + '操作成功:【成功添加学校 [{school_name}-{schl_abbr}] 的自习室信息】;信息如下:\n\n{classrm_libid}\n开放抢座时间:{open_time}\n自习室的数量 id 时间不正确请反馈管理员'
}
# #添加学校, schl_abbr, sess_id, - 平台=来选座
tmp_ls = content.split(CF.USER_CMD_SPLTCH)
# if len(tmp_ls) < 4:
if len(tmp_ls) < 3:
return info['verify_failed_format']
# _, schl_abbr, wechat_sess_id, serverid = tmp_ls[:4]
_, schl_abbr, wechat_sess_id = tmp_ls[:3]
cmd_dict = utils.parse_extra_cmd(extra_cmd=content)
# init a_task
# if cmd_dict.get('platform') == 'CTRS':
a_task = utils.Atask(platform=cmd_dict.get('platform', CF.PLATFORM['IGTL']))
# schl_abbr transfer to lower
schl_abbr = str(schl_abbr).replace('[', '').replace(']', '').lower()
# verify_key = '您好'
# url_homepage = 'https://wechat.v2.traceint.com/index.php/reserve/index.html?f=wechat'
# # fill cookies
# if serverid.split('|') != 3:
# serverid = serverid.split('|')[0] + '|' + '1234567890' + '|' + a_task.M_COOKIES['SERVERID'].split('|')[-1]
# a_task.M_COOKIES = utils.fill_cookies(cookies=a_task.M_COOKIES, serverid=serverid, wechat_sess_id=wechat_sess_id)
a_task.M_COOKIES = utils.fill_cookies(cookies=a_task.M_COOKIES, wechat_sess_id=wechat_sess_id, platform=a_task.platform)
# entry homepage
homepage_response = utils.get_response(url=a_task.CURRENT_URL['home_page'],
sess=CF.sess,
m_headers=a_task.M_HEADERS,
m_cookies=a_task.M_COOKIES,
verify_key=a_task.VERIFYKEY_OF_HOMEPAGE)
if not homepage_response:
# verify failed; cmd is invalid
return info['verify_failed_wechat_sess_id_invalid']
debug_p('homepage_response=', homepage_response[:200])
# parse homepage_response get user_name, school_name
user_name, school_name = crawldata.get_name(homepage_response)
# check [school_name] seatmap data exist or not; # {user_name:'',schl_abbr:'', school_name:'', 'open_time':'', classroom:[{'classroom_name':classroom_name,'libid':libid, 'path':classroom_path,'seat_map':''},{},{}...]}
user_conf_dict = sqlact.query_school_info(schl_abbr=schl_abbr, libid1='', libid2='')
# if query failed, refresh school info
if force == True or not user_conf_dict:
# school info not exist, refresh this school; # {user_name:'',schl_abbr:'', school_name:'', 'open_time':'', classroom:[{'classroom_name':classroom_name,'libid':libid, 'path':classroom_path,'seat_map':''},{},{}...]}
# user_conf_dict = crawldata.refresh_school_info(homepage_url='', homepage_response=homepage_response,
# sess=CF.sess, m_headers=a_task.M_HEADERS,
# m_cookies=a_task.M_COOKIES,
# verify_key='',
# schl_abbr=schl_abbr,
# platform=a_task.platform,
# sql_conn=sqlact.conn
# )
user_conf_dict = crawldata.refresh_school_info(homepage_response=homepage_response,
a_task=a_task,
schl_abbr=schl_abbr,
sess=CF.sess, m_headers=a_task.M_HEADERS,
m_cookies=a_task.M_COOKIES,
sql_conn=sqlact.conn
)
else:
# already exist
reply_text = info['already_exist'].replace('{schl_abbr}', schl_abbr).replace('{open_time}', user_conf_dict.get('open_time', '--:--')).replace('{classrm_libid}', '\n'.join([e['classroom_name'] + '-id=' + str(e['libid']) for e in user_conf_dict['classroom']]))
debug_p('func_name=', func_name, 'reply_text=', reply_text)
return reply_text
if not user_conf_dict.get('classroom', []):
return info['failed_add_school_except']
reply_text = info['succ_add_school_info'].replace('{school_name}', user_conf_dict.get('school_name', 'school_name')).replace('{schl_abbr}', schl_abbr).replace('{open_time}', user_conf_dict.get('open_time', '--:--')).replace('{classrm_libid}', '\n'.join([e['classroom_name'] + '-id=' + str(e['libid']) for e in user_conf_dict['classroom']]))
debug_p('func_name=', func_name, 'reply_text=', reply_text)
return reply_text
'''
parse trace,return serverid wechat_sess_id # and two time value
'''
def parse_trace(userid, content):
# verify content format
info = {
'verify_failed': CmdFunction.getico(-1) + '您发送的 trace 校验格式不通过,请重新获取后再尝试!'
}
if len(content) < 100:
return info['verify_failed']
if content.find('wechatSESS_ID') < 0:
return info['verify_failed'] + '\n' + '没有找解析出 wechatSESS_ID 字段'
# elif content.find('SERVERID')<0:
# return info['verify_failed']+'\n'+'没有找解析出 SERVERID 字段'
try:
content += ' ;'
# pattern = re.compile(r'SERVERID\=\w+\|\d{10}\|\d{10}')
# SERVERID = pattern.search(content).group(0)
pattern = re.compile(r'wechatSESS_ID\=\w+(?=[\s;])')
wechatSESS_ID = pattern.search(content).group(0)
# pattern = re.compile(r'(?<=Hm_lvt_\w{32}\=)\d{10}(?=[\s;])')
# Hm_lvt_time = pattern.search(content).group(0)
#
# SERVERID_time_2 = re.compile(r'(?<=SERVERID\=\w{32}\|\d{10}\|)\d{10}(?=[\s;])')
# SERVERID_time_2 = pattern.search(content).group(0)
return '\n' + wechatSESS_ID + '\n' # +SERVERID
except Exception as e:
debug_p('[E]: action [%s] failed, exception is %s' % ('parse_trace', repr(e)))
return info['verify_failed'] + '[wechatSESS_ID 没有找到]'
'''
realtime
'''
def realtime(userid, content):
func_name = '#realtime'
debug_p('func_name=', func_name, 'userid, content', userid, content)
return CmdFunction.grab_seat(userid, content, task_kind=CF.TASK_KIND['realtime'])
'''
grab_seat
'''
def grab_seat(userid, content, task_kind=CF.TASK_KIND['reserve']):
'''
实时预定 | 捡漏 | jl | #jl | 明日预约 | 抢座 | #qz | qz ;
学校英文简称 | 首拼;
自习室id1;座位号1;自习室id2,座位号2;
serverid;wechat_sess_id
extra_info:
exetime 首次执行时间 | 开抢时间;
pre_today 当日即时预订 | 明日预约;
lgtl_or_ctrs 我去图书馆 | 来选座;
unknown_cmd 扩展指令
'''
func_name = '#grab_seat'
debug_p('func_name=', func_name, 'userid, content', userid, content)
task_kind_str = '[准点抢座] ' if task_kind == CF.TASK_KIND['reserve'] else '[实时捡漏] '
info = {
'grab_cmd_help': 'help info',
'verify_failed_format': CmdFunction.getico(-1) + task_kind_str +'task提交失败:【抢座指令格式可能有误】\n请仔细检查并按如下顺序重新编辑发送:\n\n#抢座; 学校英文简称; 自习室id;座位号;自习室id;座位号; wechat_sess_id; serverid',
'verify_failed_wechat_sess_id_invalid': CmdFunction.getico(-1) + task_kind_str + 'task提交失败:【wechat_sess_id; serverid可能失效】\nwechat_sess_id、serverid是需要自己去抓包获取的,不是示例里面的qwertyxxxx,更不是wechat_sess_id,serverid这两个单词;具体获取方法请看指令帮助文档。',
'verify_failed_get_school_info': CmdFunction.getico(-1) + task_kind_str + 'task提交失败:【座位表信息不匹配】请确认自习室信息存在且自习室id正确\n如需帮助请联系管理员处理',
'verify_failed_seatnum_not_found': CmdFunction.getico(-1) + task_kind_str + 'task提交失败:【自习室id不匹配或不存在此座位号】请检查后再试\n支持的自习室的id信息:{classrm_libid}',
'unknown_error': CmdFunction.getico(-1) + task_kind_str + 'task提交失败;未知错误;\n请联系管理员并提供如下信息:\n\n{unknown_error}',
'verify_succ': CmdFunction.getico(1) + task_kind_str + 'task提交成功:task_id={task_id};\n您的任务信息如下:\n{task_info}',
}
if not content:
reply_text = info['help_info']
debug_p('func_name=', func_name, 'reply_text=', reply_text)
return reply_text
# cmd type = user
# verify format, cmd_dict : # {schl_abbr: '', libid1: '', seat_num1: '', libid2: '', seat_num2: '',serverid:'', wechat_sess_id:''}
cmd_dict = utils.parse_grab_seat_cmd(command=content)
debug_p('func_name=', func_name, 'parse_grab_seat_cmd()', cmd_dict)
if not cmd_dict:
reply_text = info['verify_failed_format']
debug_p('func_name=', func_name, 'reply_text=', reply_text)
return reply_text
# normal cmd
# schl_abbr, libid1, seat_num1, libid2, seat_num2, wechat_sess_id, serverid = cmd_dict['schl_abbr'], cmd_dict['libid1'], cmd_dict['seat_num1'], cmd_dict['libid2'], cmd_dict['seat_num2'], cmd_dict['wechat_sess_id'], cmd_dict['serverid']
schl_abbr, libid1, seat_num1, libid2, seat_num2, wechat_sess_id, = cmd_dict['schl_abbr'], cmd_dict['libid1'], cmd_dict['seat_num1'], cmd_dict['libid2'], cmd_dict['seat_num2'], cmd_dict['wechat_sess_id'] # , cmd_dict['serverid']
# cmd
exe_time = cmd_dict.get('exe_time', '') # open_time
# pattern = cmd_dict.get('pattern', CF.PATTERN['PRE']) # pre
# a task , a Atask, init
a_task = utils.Atask(platform=cmd_dict.get('platform', CF.PLATFORM['IGTL']),
pattern=cmd_dict.get('pattern', CF.PATTERN['TODAY']))
# verify serverid and wechat_sess_id
# fill cookies
# a_task.M_COOKIES = utils.fill_cookies(cookies=a_task.M_COOKIES, serverid=serverid, wechat_sess_id=wechat_sess_id, platform=a_task.platform)
a_task.M_COOKIES = utils.fill_cookies(cookies=a_task.M_COOKIES, wechat_sess_id=wechat_sess_id, platform=a_task.platform)
debug_p('func_name=', func_name, 'fill_cookies()', a_task.M_COOKIES)
# entry homepage
# test
homepage_response = utils.get_response(url=a_task.CURRENT_URL['home_page'], sess=CF.sess,
m_headers=a_task.M_HEADERS,
m_cookies=a_task.M_COOKIES,
verify_key=a_task.VERIFYKEY_OF_HOMEPAGE)
debug_p('func_name=', func_name, 'get_response()', homepage_response[:300])
if not homepage_response:
# verify failed; cmd is invalid
reply_text = info['verify_failed_wechat_sess_id_invalid']
debug_p('func_name=', func_name, 'reply_text=', reply_text)
return reply_text
# debug_p('homepage_response=', homepage_response)
# parse homepage_response get user_name, school_name
user_name, school_name = crawldata.get_name(homepage_response)
# check [school_name] seatmap data exist or not; # {user_name:'',schl_abbr:'', 'open_time':'', school_name:'', classroom:[{'classroom_name':classroom_name,'libid':libid, 'path':classroom_path,'seat_map':''},{},{}...]}
user_conf_dict = sqlact.query_school_info(schl_abbr=schl_abbr) # , libid1='', libid2=libid2)
debug_p('func_name=', func_name, 'query_school_info()', str(user_conf_dict)[:400])
# # if query failed, refresh school info
# if not user_conf_dict:
# # school info not exist, refresh this school; # {user_name:'',schl_abbr:'', 'open_time':'', school_name:'', classroom:[{'classroom_name':classroom_name,'libid':libid, 'path':classroom_path,'seat_map':''},{},{}...]}
# user_conf_dict = crawldata.refresh_school_info(homepage_url='', homepage_response=homepage_response,
# sess=CF.sess, m_headers=CF.M_HEADERS, m_cookies=CF.M_COOKIES,
# verify_key='',
# schl_abbr=schl_abbr,
# sql_conn=sqlact.conn
# )
# debug_p('func_name=', func_name, 'refresh_school_info()', user_conf_dict)
# action query and refresh both failed
if not user_conf_dict:
reply_text = info['verify_failed_get_school_info']
debug_p('func_name=', func_name, 'reply_text=', reply_text)
return reply_text
# get school info succ and then construct [re_reserve_cmd] data: task_id;userid; 323;21,31; 324;41,51; wechat_sess_id; serverid; comment_info
user_conf_dict['user_name'] = user_name
# get seat coordinate and classroom_name
# all_lib_clssrm dict{libid: clssrm}
all_lib_clssrm = dict([(classroom['libid'], classroom['classroom_name']) for classroom in user_conf_dict['classroom']])
lib_seat_ls = [(libid1, seat_num1), (libid2, seat_num2)]
clssrm_crdnt = CmdFunction.verify_seat(lib_seat_ls, user_conf_dict)
# if coordinate not match, exception
if not clssrm_crdnt:
reply_text = info['verify_failed_seatnum_not_found'].replace('{classrm_libid}', '\n'.join([e['classroom_name'] + '-id=' + str(e['libid']) for e in user_conf_dict['classroom']]))
debug_p('func_name=', func_name, 'reply_text=', reply_text)
return reply_text
classroom_name1, coordinate1 = clssrm_crdnt[0]
classroom_name2, coordinate2 = clssrm_crdnt[1]
debug_p('func_name=', func_name, 'get coordinate1 and coordinate2', 'classroom_name1=', classroom_name1,
'coordinate1=',
coordinate1, 'classroom_name2=', classroom_name2, 'coordinate2=', coordinate2)
# construct[re_reserve_cmd] task_id; userid; user_name; school_name; classroom_name1;323;seat_num; 21,31; classroom_name2; 324; seat_num2; 41,51; wechat_sess_id; serverid; comment_info
open_time = user_conf_dict.get('open_time', '00:00-00:00') if task_kind == CF.TASK_KIND['reserve'] else utils.get_date(format="%H:%M:%S")
submit_time = utils.get_date(format='%Y-%m-%d %H:%M:%S')
open_time = exe_time if exe_time else open_time
wechat_sess_id = wechat_sess_id
succ_failed, detail_info, others_result_info = '', '', ''
task_id = CF.TASK_ID
# others_info is json format
others_info = {}
others_info['all_lib_clssrm'] = all_lib_clssrm
comment_info = ''
serverid = CF.SERVERID if a_task.platform == CF.PLATFORM['IGTL'] else ''
# print('serverid', serverid)
param = (
userid, task_kind, wechat_sess_id, succ_failed, detail_info, others_result_info, task_id,
user_name, school_name, schl_abbr, open_time, classroom_name1, libid1, seat_num1, coordinate1,
classroom_name2, libid2, seat_num2, coordinate2, serverid, comment_info, submit_time,
a_task.pattern, a_task.platform, json.dumps(others_info)
)
#
tb_today_task = 'today_task'
# replace will delete the exist trace and insert a new trace, then the id will change
# insert into tb_today_task
# REPLACE into today_task (userid, task_kind, wechat_sess_id, succ_failed, detail_info, others_result_info , task_id, user_name, school_name, schl_abbr, open_time, classroom_name1, libid1, seat_num1, coordinate1, classroom_name2, libid2, seat_num2, coordinate2, serverid, comment_info, submit_time, pattern, platform, others_info )
sql_today_task = 'REPLACE INTO ' + tb_today_task + \
'(userid, task_kind, wechat_sess_id, succ_failed, detail_info, others_result_info, task_id,' \
'user_name, school_name, schl_abbr, open_time, classroom_name1, libid1, seat_num1, coordinate1,' \
'classroom_name2, libid2, seat_num2, coordinate2, serverid, comment_info, submit_time,' \
'pattern, platform, others_info) ' + \
' VALUES(' + '?,' * (len(param) - 1) + '?)'
sqlact.cur.execute(sql_today_task, param)
sqlact.conn.commit()
debug_p('func_name=', func_name, 'REPLACE and INSERT action; param=', param)
reply_text = info['verify_succ'].replace('{task_id}', str(CF.TASK_ID)).replace('{task_info}', '\n[' + school_name + '-' + schl_abbr + ']' +
'的\n[' + classroom_name1 + '-id=' + libid1 + ']的[' + str(seat_num1) + ']号座位\n' +
'[' + classroom_name2 + '-id=' + libid2 + ']的[' + str(seat_num2) + ']号座位\n执行时间:' + open_time + '') + \
'\n模式:' + ('预定当日💺' if a_task.pattern == CF.PATTERN['TODAY'] else '预约明天💺') + '\n平台:' + ('<我去图书馆>' if a_task.platform == CF.PLATFORM['IGTL'] else '<来选座>')
CF.TASK_ID += 1
debug_p('func_name=', func_name, 'TASK_ID=', CF.TASK_ID, 'grab_seat action over, reply_text=', reply_text)
return reply_text
'''
query_realtime_result
'''
def query_realtime_result(userid, content):
func_name = '[query_realtime_result]'
debug_p(func_name, 'userid, content', userid, content)
return CmdFunction.query_result(userid, content, task_kind=CF.TASK_KIND['realtime'])
'''
parse the dict from memcache
return reply str
'''
def parse_dct_from_mc(result_dct={}, char_limit=CF.CHAR_LIMIT):
# exe trace format
# TRACE_FORMAT = {
# 'head': '状态:{status}\n[{school_name}-{schl_abbr}_{task_id}]\n{submit_time} 提交\n',
# 'exe_trace': '{emoji}{try_cnt}. {exe_time} [{classroom_name}]-[{seat_num}]号座位:{feedback}\n',
# }
default_value = ''
flag = {
'SUCC': '✅',
'FAILED': '❌',
# 'Ongoing': '🔄',
'Ongoing': '🌀',
# 'exe_trace_failed': '⏬'
'exe_trace_failed': '🔸'
}
status = 'Ongoing'
reply_str = '...\n'
reply_str += CF.TRACE_FORMAT['head'].format(status=flag[status] + status, school_name=result_dct.get('school_name', default_value),
schl_abbr=result_dct.get('schl_abbr', default_value), task_id=result_dct.get('task_id', default_value),
submit_time=result_dct.get('submit_time', default_value))
if len(result_dct['exe_trace']) < 1:
return reply_str
code = result_dct['exe_trace'][-1].get('code', default_value)
completed_flag = result_dct['exe_trace'][-1].get('completed_flag', default_value)
if completed_flag == 'completed':
status = 'SUCC' if str(code) == '0' else 'FAILED'
for i, trace in enumerate(result_dct['exe_trace']):
reply_str += CF.TRACE_FORMAT['exe_trace'].format(
emoji=flag['exe_trace_failed'] if str(trace.get('code', default_value)) != '0' else flag['SUCC'],
try_cnt=i, exe_time=trace.get('exe_time', default_value),
classroom_name=trace.get('clssrm', default_value),
seat_num=trace.get('seat_num', default_value), feedback=trace.get('msg', default_value))
return reply_str[-1*char_limit:]
'''
query task result
'''
def query_result(userid, content, task_kind=CF.TASK_KIND['reserve']):
func_name = '[query_result]'
debug_p('func_name=', func_name, 'userid, content', userid, content)
info = {
'default': '没有查询到最近这段时间抢座任务执行状态信息',
}
reply_str = info['default']
result = mc.get_value(key=task_kind + '_' + userid, default='')
if result:
reply_str = CmdFunction.parse_dct_from_mc(result)
# parse the dict from memcache
debug_p(func_name, 'task result reply_str=', reply_str)
# return {'kind': 'no_prefix', 'reply_str': reply_str}
return reply_str
'''
FUNCTION_MAP
'''
FUNCTION_MAP = {
'#check_schl': check_school,
'#add_school_info': add_school_info,
'#force_add_school_info': force_add_school_info,
'#parse_trace': parse_trace,
'#grab_seat': grab_seat,
'#modify_opentime': modify_opentime,
# '#needhelp': needhelp,
'#query_result': query_result,
'#realtime': realtime,
'#query_realtime_result': query_realtime_result,
}
# verify_seat, return clssrm_crdnt=[(classroom_name, coordinate), () ... ]
def verify_seat(lib_seat_ls, user_conf_dict, num_0_value='任意'):
clssrm_crdnt = []
for libid, seatnum in lib_seat_ls:
if int(libid) <= 0:
seatnum = '0'
# user_conf_dict['classroom']:[{'classroom_name':classroom_name,'libid':libid, 'path':classroom_path,'seat_map':''}
# if libid == 0:
classroom_name, coordinate = num_0_value, '0'
for classroom in user_conf_dict['classroom']:
# if int(libid) == 0: classroom_name = "任意"; coordinate = '0'; break
if int(libid) != 0 and coordinate == '0' and classroom['libid'] == libid.replace('-', ''):
classroom_name = classroom['classroom_name']
if seatnum == '0':
coordinate = '0'
break
for pre_0 in ['', '0', '00', '000']:
coordinate = classroom['seat_map'].get(pre_0 + seatnum, coordinate)
if libid != '0' and classroom_name == num_0_value:
# error: libid not found
return []
clssrm_crdnt.append((classroom_name, coordinate))
return clssrm_crdnt
'''
extra help info
'''
class ExtraInfo(object):
prefix = '\n\nℹ️随机帮助信息ℹ️\n'
I = {
# 'help': '强调:wechat_sess_id和serverid是需要自己抓包获取的,不是示例里面的qwertyxxx,请仔细阅读说明\n为了避免id失效,抢座任务请尽量在开抢前的5-30分钟时间段内提交\ngithub:https://github.com/qmppz/igotolibrary',
# 'administrator_info': '如果出现指令无响应无反馈、添加学校失败、多次任务失败...等等摸不着头脑的问题请联系管理员处理。\nwx: turing_01110101',
}
others = ['查看<为了学习>抢座工程的更新进度和即时通知,请看管理员朋友圈。wx: turing_01110101',
'<为了学习>已经向<我去图书馆>官方反馈了抢座漏洞,官方答复:正在修复中。',
'wechat_sess_id、serverid是需要自己去抓包获取的,不是示例里面的qwertyxxxx,具体获取方法请看指令帮助文档',
'指令分隔符可以是逗号或句号或分号或空格或回车,。;,.; 且支持中文符号和英文符号。',
'<为了学习>工程抢座原理已经开源,且无收费的服务、不买卖程序!只为非计算机的同学提供近似公平的抢座。',
'服务器已经升级,抢座task实际测试速度提升明显。',
'服务器指令解析需要时间,请等待几秒钟。',
'有什么意见或者建议请向管理员反馈。',
'指令中的[学校简称]是英文简称,而不是学校名字的首拼。'
'为避免抓包获取的serverid失效以及抢座任务遗漏,请在开抢前5-30分钟时间段提交抢座任务。',
'如果出现指令无响应无反馈、添加学校失败、多次任务失败...等等摸不着头脑的问题请联系管理员。',
'注意不要把抓包获取到的trace发到<我去图书馆>...请认准<为了学习>',
'后台消息过多,反馈问题或者建议意见请发送到管理员的微信 turing_01110101',
'抓包的意思就是进行网络监听并将请求的数据记录显示出来,所以开启抓包软件的时候手机会有风险提示',
'使用[添加指令]需要满足:1, 在自身没有预定座位的状态下; 2, 自习室都开放的状态下',
'自习室数量、开抢时间等不正确请反馈管理员wx:turing_01110101',
'抢座任务在开抢前5-30分钟时间段内提交才能有效',
# '接下来尝试更新'
]
# cmd_help = '\n指令帮助文档:https://mp.weixin.qq.com/s/1FVTjlDunfngwMip3TFakA'
cmd_help = '\n<a href="https://mp.weixin.qq.com/s/8HmS4Ct02ZQIcBYRnhTl9Q"> ☞☞指令帮助文档 </a>'
# get_random_info
def get_random_info(whichone=-1):
info = list(ExtraInfo.I.values()) + ExtraInfo.others
return ExtraInfo.prefix + random.choice(info) + ExtraInfo.cmd_help
'''
parse msg from wechat handle; verify if is cmd and execute the cmd`s function
return response
'''
@utils.catch_exception
def handle_msg(userid, content, my_id, LOCAL=False):
# transfer content from byte to str
m_content = content
if isinstance(content, bytes):
m_content = content.decode(encoding='utf-8')
func_name = '#handle_msg'
debug_p('func_name=', func_name, 'userid=', userid, 'content=', content)
'''
check if is test, discard test flag
'''
if str(m_content[:4].split()[0]).lower() in {'test', '内测', '测试'}:
m_content = m_content[:4].replace('test', '').replace('内测', '').replace('测试', '') +\
m_content[4:]
# else:
# # old version entrance function
# return old_version_entrance(userid, content, my_id)
# content is none
content = m_content
if not content:
# return get_reply_msg(str_info=content)
reply_text = CmdFunction.getico(1) + '\n'
return reply_text + ExtraInfo.get_random_info()
# parse, if command
cmd_pre_flag = {
# 'igotolibrary': {'我去图书馆', '来选座'},
# qiangzuo task
'#grab_seat': {'抢座', '明日预约', '预约座位', '抢座位', '抢坐', '#抢坐', '抢位置', 'grab_seat', '#抢座', 'qz', '#qz'},
# realtime greb seat
'#realtime': {'捡漏', '实时预定', '即时预订', '实时预订', '即时预定', 'jl', 'ssyd', 'jsyd', 'realtime'},
'#check_schl': {'查询', '#查询', 'cx', '#cx', 'chaxun', '#查询学校', '查询学校'},
# parse trace
'#parse_trace': {'jx', '#jx', '解析', '#解析', 'wechatsess_id=', 'get'},
# status query
'#add_school_info': {'#添加学校', '添加学校', 'tj', '#tj', '#添加', '添加'},
# force add school
'#force_add_school_info': {'强制添加', '强制添加学校', '强制添加学校信息', 'qztj', 'qztjxxxx'},
# '#needhelp':{'帮助', 'help', 'bz', '帮助信息', '提示'},
# admin cmd
'#gengxin': {},
# modify opentime
'#modify_opentime': {'修改抢座时间', 'xgqzsj', '修改开抢时间', 'xgkqsj'},
# query reserve result
'#query_result': {'查询结果', '结果', 'jg', 'cxjg', '抢座结果', 'qzjg', '查询抢座结果', '查询抢座'},
# query realtime result
'#query_realtime_result': {'查询捡漏结果', '捡漏结果', 'jljg', 'cxjljg', 'jlqzjg', 'jl结果', '实时预定结果', '实时预订结果'}
}
# formatting split_ch to blank
frmt_content = re.sub(r'[(()),;。;,\.]', ' ', content.replace(u'#', '')
.replace(u'#', '')
.replace(u'-', '-').replace(u'➖', '-').replace('- -', '--')
.replace('=', '=')
.replace('\n', CF.USER_CMD_SPLTCH)
)
# del all \n \r and blank
frmt_content = re.sub(r'\s+', CF.USER_CMD_SPLTCH, frmt_content.strip())
content = frmt_content
# judge which kind cmd from index 0
cmd_ls = content.split(CF.USER_CMD_SPLTCH)
cmd_kind = ''
for pre_flag in cmd_pre_flag.keys():
if cmd_ls[0].lower().replace('#', '').strip() in cmd_pre_flag[pre_flag]:
cmd_kind = pre_flag
break
if not cmd_kind:
# specify parse trace
if len(content) > 100 and content.find('wechatSESS_ID') >= 0: # and content.find('SERVERID') >= 0:
# parse trace
cmd_kind = '#parse_trace'
else:
# content is not cmd
no_match_cmd_reply = ['没有匹配到指令...不知道该回应什么',
'没有匹配到指令...反馈问题请联系管理员']
reply_text = CmdFunction.getico(1) * 3 + random.choice(no_match_cmd_reply) + '\n'
return reply_text + ExtraInfo.get_random_info()
# swap wechatSESS_ID and SERVERID to ...;wechatSESS_ID; SERVERID
# if len(cmd_ls) > 2 and cmd_ls[-1].find('wechatSESS_ID') >= 0 and cmd_ls[-2].find('SERVERID') >= 0:
# cmd_ls[-1], cmd_ls[-2] = cmd_ls[-2], cmd_ls[-1]
# content = CF.USER_CMD_SPLTCH.join(cmd_ls)
# print('cmd_ls=', cmd_ls)
# content is cmd then save cmd log
a_cmd_log = utils.get_date() + '|from_user=' + userid + '|cmd_kind=' + cmd_kind + '|content=' + content + '\n'
debug_p('func_name=', func_name, 'cmd_kind=', cmd_kind, 'a_cmd_log=', a_cmd_log)
# content is cmd then exe cmd function
reply_text = CmdFunction.FUNCTION_MAP[cmd_kind](userid, content)
# return reply text
if reply_text.find('状态') < 0:
reply_text = reply_text + ExtraInfo.get_random_info() if cmd_kind != '#parse_trace' else reply_text
return reply_text
'''
test
'''
if __name__ == '__main__':
LOCAL = utils.LOCAL
# zl_ls = [
# # '#抢座; bjtu; 323;81; 324;80; d3936289adfff6c3874a2579058ac651|1563028695|1563028690; 12cb1a0ebdb4f4260e4d2527110a2959491c24eccf287d75',
# # '#抢座; bbmc; 323;81; 324;80; d3936289adfff6c3874a2579058ac651|1563028695|1563028690; 12cb1a0ebdb4f4260e4d2527110a2959491c24eccf287d75',
# # '#抢座; pku; 323;81; 324;80; d3936289adfff6c3874a2579058ac651|1563028695|1563028690; 12cb1a0ebdb4f4260e4d2527110a2959491c24eccf287d75',
# # '查询;bbmc',
#
# # '添加;hbucm; wechatSESS_ID=5c4b33b34a20e0e0fea9864a253bd3575dcf545689ce9c0e SERVERID=b9fc7bd86d2eed91b23d7347e0ee995e|1565443472|1565443470'
#
# # '#xgqzsj, bjtu,21:22'
# 'jl, bjtu, 323, 7, 324 77 ' + \
# # 'tj, bjtu ' + \
# 'wechatSESS_ID=26443f7ddc48027297ce0e4330308d17f4b7d624aff7b416 ' + \
# 'SERVERID=b9fc7bd86d2eed91b23d7347e0ee995e|1570237808|1570237801 ' + \
# '-- t=07:00. 平台=lxz; 今明=明'
#
# # 'cxqwejg,'
# ]
for i in range(1, 2):
# zl = random.choice(['捡漏', '实时预定', '即时预订', '实时预订', '即时预定', 'jl', 'ssyd', 'jsyd', 'realtime',
# '抢座', '抢座位', '抢坐', '#抢坐', '抢位置', 'grab_seat', '#抢座', 'qz', '#qz']) + \
# ' bjtu ' + \
# ' ' + random.choice(['323 ', '324 ']) + random.choice([str(_) for _ in range(1, 100)]) + \
# ' ' + random.choice(['323 ', '324 ']) + random.choice([str(_) for _ in range(1, 100)]) + \
# ' wechatSESS_ID=ssid'+random.choice([str(_) for _ in range(111, 999)]) + \
# ' SERVERID=serid|1231232321|1321234' + random.choice([str(_) for _ in range(111, 999)]) + \
# ' -- ' + \
# random.choice(['开抢时间', '时间', 't', 'T', 'time'])+'' \
# '='+str(random.randint(6,23))+':'+str(random.randint(0,59))+':'+str(random.randint(0,59))+' ' + \
# random.choice(['预约模式', '今明', '哪天', '模式'])+'='+random.choice(['pre', '明', '明天','today', '今', '今天']) + ' ' + \
# random.choice(['平台', '公众号'])+'='+random.choice(['我去图书馆', 'igtl', 'wqtsg','来选座', 'lxz']) + ' '
zl = 'jl;bjtu;323;1 323 0 ,,;;' \
'SERVERID=d3936289adfff6c3874a2579058ac651|1570612694|1570612692 ' \
'wechatSESS_ID=5ef6f21dde35722c92e4595b2100b6fef8f08f50adfe6cb3;' \
' -- 时间=12:00;模式=明;平台=我去图书馆'
zl = '抢座;ycgxy;1234;355;' \
'wechatSESS_ID=672c5459adb7c20f3a3f64e677dfdfebac2455b49c34e280;SERVERID=b9fc7bd86d2eed91b23d7347e0ee995e|1570632661|1570631371' \
';—-;时间=6:00;模式=明;平台=我去图书馆'
zl = '捡漏, bjtu,0,0 wechatSESS_ID=14a69992ca6af9a2e11b4c3ba270a752a6d28a49fc116272'
zl = '#抢座; bjtu; 0; 046; 0; 045; ' \
'wechatSESS_ID=d251fce0daa72515a1d71eefb5b55debc1cbae9d1a32d721; ' \
'SERVERID=d3936289adfff6c3874a2579058ac651|1570707938|1570707927 ' \
'-- t=17:20 模式=今'
zl = 'test 捡漏, tyut, 323, 0, 324,0, wechatSESS_ID=0db7db1b5250d65e4d1c2af0a707296c0f689afc5f901273 SERVERID=b9fc7bd86d2eed91b23d7347e0ee995e|1570926044|1570924907 -- 时间=08:40, 模式=今天'
#
# zl = '添加学校;sxau;wechatSESS_ID=65dece8f05041ee8c849e5ec5c622a14 -- pt=lxz'
# 'SERVERID=b9fc7bd86d2eed91b23d7347e0ee995e|1570237808|1570237801 ' + \
# ' SERVERID=d3936289adfff6c3874a2579058ac651|1570237808|1570237801 ' + \
# zl = '添加; ycgxy; wechat_sess_id=672c5459adb7c20f3a3f64e677dfdfebac2455b49c34e280;'
# zl = '抢座;bjtu;324;10;323;85;SERVERID=b9fc7bd86d2eed91b23d7347e0ee995e|1570448431|1570448430;wechatSESS_ID=65bf8d12c374bf3b1fc466a279bd5ba04f2d9fe375ee717f;'
# zl = '#jl; tyut; 311; 100; 313; 91;' + \
# ' wechatSESS_ID=ed024e28d954710784abf2f385eb9ee1d7de4c53bfdfd898; SERVERID=d3936289adfff6c3874a2579058ac651|1570400154|1570400153;' +\
# '-- t=07:00 平台=wqtsg; 今明=j'
# zl = 'jljg'
# zl = '''
# GET /index.php/url/auth.html?r=%2Findex.php%2Freserve%2Findex.html%3Ff%3Dwechat%26n%3D5d9bd23e7dc9a&code=081elvY90k3kSy1WSDW90ZsgY90elvY6&state=1 HTTP/1.1 Host: wechat.laixuanzuo.com Connection: keep-alive Upgrade-Insecure-Requests: 1 User-Agent: Mozilla/5.0 (Linux; Android 7.0; PRO 7 Plus Build/NRD90M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/66.0.3359.126 MQQBrowser/6.2 TBS/044904 Mobile Safari/537.36 MMWEBID/4071 MicroMessenger/7.0.7.1521(0x27000736) Process/tools NetType/4G Language/zh_CN Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,image/wxpic,image/sharpp,image/apng,image/tpg,*/*;q=0.8 Accept-Encoding: gzip, deflate, br Accept-Language: zh-CN,en-US;q=0.9 Cookie: FROM_TYPE=weixin; Hm_lvt_7838cef374eb966ae9ff502c68d6f098=1570464181; Hm_lpvt_7838cef374eb966ae9ff502c68d6f098=1570489433; wechatSESS_ID=85807fb3863be66e8b868e4dfce18da0
# '''
# zl = 'test 捡漏 sxau; 10281, 0; 0,0; wechatSESS_ID=89040c2998084ed651a8a7991ce11264 -- 时间=21:40 模式=今天 平台=来选座'
# zl = 'test tj sxau; wechatSESS_ID=89040c2998084ed651a8a7991ce11264 -- 时间=21:40 模式=今天 平台=来选座'
# zl = 'test jl, bjtu 323, 0, 323, 1 wechatSESS_ID=de2e1d47c50c59709ebb5ee102ea6f738092499495a61e5e SERVERID=b9fc7bd86d2eed91b23d7347e0ee995e|1572577791|1572577787 -- 模式=今天'
zl = 'test tj, sxau wechatSESS_ID=0d9a58a026826c2f6aebb2d3926eb01d -- 平台=来选座'
# zl = 'test cx, wnsfxy'
# zl = 'test jl,wnsfxy, 10110, 0, 0 ,0, wechatSESS_ID=35ed243f92be7b748a21d53cce7179b9 -- 平台=来选座 模式=今天'
zl = 'test jl;sxau;10238;086;10238;004;wechatSESS_ID=0d9a58a026826c2f6aebb2d3926eb01d -- 平台=来选座'
res = handle_msg(userid='userid_test_' + str(i), content=zl, my_id='my_id_' + str(i), LOCAL=LOCAL)
mc.client_close()
debug_p('complete!\n', res)
|
twilio/rest/trusthub/v1/trust_products/trust_products_channel_endpoint_assignment.py
|
BrimmingDev/twilio-python
| 1,362 |
66336
|
<reponame>BrimmingDev/twilio-python
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class TrustProductsChannelEndpointAssignmentList(ListResource):
def __init__(self, version, trust_product_sid):
"""
Initialize the TrustProductsChannelEndpointAssignmentList
:param Version version: Version that contains the resource
:param trust_product_sid: The unique string that identifies the CustomerProfile resource.
:returns: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentList
:rtype: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentList
"""
super(TrustProductsChannelEndpointAssignmentList, self).__init__(version)
# Path Solution
self._solution = {'trust_product_sid': trust_product_sid, }
self._uri = '/TrustProducts/{trust_product_sid}/ChannelEndpointAssignments'.format(**self._solution)
def create(self, channel_endpoint_type, channel_endpoint_sid):
"""
Create the TrustProductsChannelEndpointAssignmentInstance
:param unicode channel_endpoint_type: The type of channel endpoint
:param unicode channel_endpoint_sid: The sid of an channel endpoint
:returns: The created TrustProductsChannelEndpointAssignmentInstance
:rtype: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentInstance
"""
data = values.of({
'ChannelEndpointType': channel_endpoint_type,
'ChannelEndpointSid': channel_endpoint_sid,
})
payload = self._version.create(method='POST', uri=self._uri, data=data, )
return TrustProductsChannelEndpointAssignmentInstance(
self._version,
payload,
trust_product_sid=self._solution['trust_product_sid'],
)
def stream(self, channel_endpoint_sid=values.unset,
channel_endpoint_sids=values.unset, limit=None, page_size=None):
"""
Streams TrustProductsChannelEndpointAssignmentInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode channel_endpoint_sid: The sid of an channel endpoint
:param unicode channel_endpoint_sids: comma separated list of channel endpoint sids
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
channel_endpoint_sid=channel_endpoint_sid,
channel_endpoint_sids=channel_endpoint_sids,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'])
def list(self, channel_endpoint_sid=values.unset,
channel_endpoint_sids=values.unset, limit=None, page_size=None):
"""
Lists TrustProductsChannelEndpointAssignmentInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode channel_endpoint_sid: The sid of an channel endpoint
:param unicode channel_endpoint_sids: comma separated list of channel endpoint sids
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentInstance]
"""
return list(self.stream(
channel_endpoint_sid=channel_endpoint_sid,
channel_endpoint_sids=channel_endpoint_sids,
limit=limit,
page_size=page_size,
))
def page(self, channel_endpoint_sid=values.unset,
channel_endpoint_sids=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of TrustProductsChannelEndpointAssignmentInstance records from the API.
Request is executed immediately
:param unicode channel_endpoint_sid: The sid of an channel endpoint
:param unicode channel_endpoint_sids: comma separated list of channel endpoint sids
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of TrustProductsChannelEndpointAssignmentInstance
:rtype: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentPage
"""
data = values.of({
'ChannelEndpointSid': channel_endpoint_sid,
'ChannelEndpointSids': channel_endpoint_sids,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(method='GET', uri=self._uri, params=data, )
return TrustProductsChannelEndpointAssignmentPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of TrustProductsChannelEndpointAssignmentInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of TrustProductsChannelEndpointAssignmentInstance
:rtype: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return TrustProductsChannelEndpointAssignmentPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a TrustProductsChannelEndpointAssignmentContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentContext
:rtype: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentContext
"""
return TrustProductsChannelEndpointAssignmentContext(
self._version,
trust_product_sid=self._solution['trust_product_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a TrustProductsChannelEndpointAssignmentContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentContext
:rtype: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentContext
"""
return TrustProductsChannelEndpointAssignmentContext(
self._version,
trust_product_sid=self._solution['trust_product_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Trusthub.V1.TrustProductsChannelEndpointAssignmentList>'
class TrustProductsChannelEndpointAssignmentPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the TrustProductsChannelEndpointAssignmentPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param trust_product_sid: The unique string that identifies the CustomerProfile resource.
:returns: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentPage
:rtype: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentPage
"""
super(TrustProductsChannelEndpointAssignmentPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of TrustProductsChannelEndpointAssignmentInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentInstance
:rtype: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentInstance
"""
return TrustProductsChannelEndpointAssignmentInstance(
self._version,
payload,
trust_product_sid=self._solution['trust_product_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Trusthub.V1.TrustProductsChannelEndpointAssignmentPage>'
class TrustProductsChannelEndpointAssignmentContext(InstanceContext):
def __init__(self, version, trust_product_sid, sid):
"""
Initialize the TrustProductsChannelEndpointAssignmentContext
:param Version version: Version that contains the resource
:param trust_product_sid: The unique string that identifies the resource.
:param sid: The unique string that identifies the resource
:returns: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentContext
:rtype: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentContext
"""
super(TrustProductsChannelEndpointAssignmentContext, self).__init__(version)
# Path Solution
self._solution = {'trust_product_sid': trust_product_sid, 'sid': sid, }
self._uri = '/TrustProducts/{trust_product_sid}/ChannelEndpointAssignments/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch the TrustProductsChannelEndpointAssignmentInstance
:returns: The fetched TrustProductsChannelEndpointAssignmentInstance
:rtype: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return TrustProductsChannelEndpointAssignmentInstance(
self._version,
payload,
trust_product_sid=self._solution['trust_product_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the TrustProductsChannelEndpointAssignmentInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete(method='DELETE', uri=self._uri, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Trusthub.V1.TrustProductsChannelEndpointAssignmentContext {}>'.format(context)
class TrustProductsChannelEndpointAssignmentInstance(InstanceResource):
def __init__(self, version, payload, trust_product_sid, sid=None):
"""
Initialize the TrustProductsChannelEndpointAssignmentInstance
:returns: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentInstance
:rtype: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentInstance
"""
super(TrustProductsChannelEndpointAssignmentInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'trust_product_sid': payload.get('trust_product_sid'),
'account_sid': payload.get('account_sid'),
'channel_endpoint_type': payload.get('channel_endpoint_type'),
'channel_endpoint_sid': payload.get('channel_endpoint_sid'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {'trust_product_sid': trust_product_sid, 'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: TrustProductsChannelEndpointAssignmentContext for this TrustProductsChannelEndpointAssignmentInstance
:rtype: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentContext
"""
if self._context is None:
self._context = TrustProductsChannelEndpointAssignmentContext(
self._version,
trust_product_sid=self._solution['trust_product_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def trust_product_sid(self):
"""
:returns: The unique string that identifies the CustomerProfile resource.
:rtype: unicode
"""
return self._properties['trust_product_sid']
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def channel_endpoint_type(self):
"""
:returns: The type of channel endpoint
:rtype: unicode
"""
return self._properties['channel_endpoint_type']
@property
def channel_endpoint_sid(self):
"""
:returns: The sid of an channel endpoint
:rtype: unicode
"""
return self._properties['channel_endpoint_sid']
@property
def date_created(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def url(self):
"""
:returns: The absolute URL of the Identity resource
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch the TrustProductsChannelEndpointAssignmentInstance
:returns: The fetched TrustProductsChannelEndpointAssignmentInstance
:rtype: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the TrustProductsChannelEndpointAssignmentInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Trusthub.V1.TrustProductsChannelEndpointAssignmentInstance {}>'.format(context)
|
fastapi_sqlmodel_typer/routes/security.py
|
tzengwei/fastapi-sqlmodel-typer
| 123 |
66342
|
<filename>fastapi_sqlmodel_typer/routes/security.py
from datetime import timedelta
from fastapi import APIRouter, Depends, HTTPException, status
from fastapi.security import OAuth2PasswordRequestForm
from ..config import settings
from ..security import (
Token,
User,
authenticate_user,
create_access_token,
get_user,
)
ACCESS_TOKEN_EXPIRE_MINUTES = settings.security.access_token_expire_minutes
router = APIRouter()
@router.post("/token", response_model=Token)
async def login_for_access_token(
form_data: OAuth2PasswordRequestForm = Depends(),
):
user = authenticate_user(get_user, form_data.username, form_data.password)
if not user or not isinstance(user, User):
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(
data={"sub": user.username}, expires_delta=access_token_expires
)
return {"access_token": access_token, "token_type": "bearer"}
|
systrace/systrace/tracing_controller.py
|
tingshao/catapult
| 138 |
66347
|
<gh_stars>100-1000
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Tracing controller class. This class manages
multiple tracing agents and collects data from all of them. It also
manages the clock sync process.
'''
import ast
import json
import sys
import tempfile
import uuid
import py_utils
from systrace import trace_result
from systrace import tracing_agents
from py_trace_event import trace_event
TRACE_DATA_CONTROLLER_NAME = 'systraceController'
def ControllerAgentClockSync(issue_ts, name):
"""Record the clock sync marker for controller tracing agent.
Unlike with the other tracing agents, the tracing controller should not
call this directly. Rather, it is called via callback from the other
tracing agents when they write a trace.
"""
trace_event.clock_sync(name, issue_ts=issue_ts)
class TracingControllerAgent(tracing_agents.TracingAgent):
def __init__(self):
super(TracingControllerAgent, self).__init__()
self._log_path = None
@py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
def StartAgentTracing(self, config, timeout=None):
"""Start tracing for the controller tracing agent.
Start tracing for the controller tracing agent. Note that
the tracing controller records the "controller side"
of the clock sync records, and nothing else.
"""
del config
if not trace_event.trace_can_enable():
raise RuntimeError, ('Cannot enable trace_event;'
' ensure py_utils is in PYTHONPATH')
controller_log_file = tempfile.NamedTemporaryFile(delete=False)
self._log_path = controller_log_file.name
controller_log_file.close()
trace_event.trace_enable(self._log_path)
return True
@py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
def StopAgentTracing(self, timeout=None):
"""Stops tracing for the controller tracing agent.
"""
# pylint: disable=no-self-use
# This function doesn't use self, but making it a member function
# for consistency with the other TracingAgents
trace_event.trace_disable()
return True
@py_utils.Timeout(tracing_agents.GET_RESULTS_TIMEOUT)
def GetResults(self, timeout=None):
"""Gets the log output from the controller tracing agent.
This output only contains the "controller side" of the clock sync records.
"""
with open(self._log_path, 'r') as outfile:
data = ast.literal_eval(outfile.read() + ']')
# Explicitly set its own clock domain. This will stop the Systrace clock
# domain from incorrectly being collapsed into the on device clock domain.
formatted_data = {
'traceEvents': data,
'metadata': {
'clock-domain': 'SYSTRACE',
}
}
return trace_result.TraceResult(TRACE_DATA_CONTROLLER_NAME,
json.dumps(formatted_data))
def SupportsExplicitClockSync(self):
"""Returns whether this supports explicit clock sync.
Although the tracing controller conceptually supports explicit clock
sync, it is not an agent controlled by other controllers so it does not
define RecordClockSyncMarker (rather, the recording of the "controller
side" of the clock sync marker is done in _IssueClockSyncMarker). Thus,
SupportsExplicitClockSync must return false.
"""
return False
# pylint: disable=unused-argument
def RecordClockSyncMarker(self, sync_id, callback):
raise NotImplementedError
class TracingController(object):
def __init__(self, agents_with_config, controller_config):
"""Create tracing controller.
Create a tracing controller object. Note that the tracing
controller is also a tracing agent.
Args:
agents_with_config: List of tracing agents for this controller with the
corresponding tracing configuration objects.
controller_config: Configuration options for the tracing controller.
"""
self._child_agents = None
self._child_agents_with_config = agents_with_config
self._controller_agent = TracingControllerAgent()
self._controller_config = controller_config
self._trace_in_progress = False
self.all_results = None
@property
def get_child_agents(self):
return self._child_agents
def StartTracing(self):
"""Start tracing for all tracing agents.
This function starts tracing for both the controller tracing agent
and the child tracing agents.
Returns:
Boolean indicating whether or not the start tracing succeeded.
Start tracing is considered successful if at least the
controller tracing agent was started.
"""
assert not self._trace_in_progress, 'Trace already in progress.'
self._trace_in_progress = True
# Start the controller tracing agents. Controller tracing agent
# must be started successfully to proceed.
if not self._controller_agent.StartAgentTracing(
self._controller_config,
timeout=self._controller_config.timeout):
print 'Unable to start controller tracing agent.'
return False
# Start the child tracing agents.
succ_agents = []
for agent_and_config in self._child_agents_with_config:
agent = agent_and_config.agent
config = agent_and_config.config
if agent.StartAgentTracing(config,
timeout=self._controller_config.timeout):
succ_agents.append(agent)
else:
print 'Agent %s not started.' % str(agent)
# Print warning if all agents not started.
na = len(self._child_agents_with_config)
ns = len(succ_agents)
if ns < na:
print 'Warning: Only %d of %d tracing agents started.' % (ns, na)
self._child_agents = succ_agents
return True
def StopTracing(self):
"""Issue clock sync marker and stop tracing for all tracing agents.
This function stops both the controller tracing agent
and the child tracing agents. It issues a clock sync marker prior
to stopping tracing.
Returns:
Boolean indicating whether or not the stop tracing succeeded
for all agents.
"""
assert self._trace_in_progress, 'No trace in progress.'
self._trace_in_progress = False
# Issue the clock sync marker and stop the child tracing agents.
self._IssueClockSyncMarker()
succ_agents = []
for agent in self._child_agents:
if agent.StopAgentTracing(timeout=self._controller_config.timeout):
succ_agents.append(agent)
else:
print 'Agent %s not stopped.' % str(agent)
# Stop the controller tracing agent. Controller tracing agent
# must be stopped successfully to proceed.
if not self._controller_agent.StopAgentTracing(
timeout=self._controller_config.timeout):
print 'Unable to stop controller tracing agent.'
return False
# Print warning if all agents not stopped.
na = len(self._child_agents)
ns = len(succ_agents)
if ns < na:
print 'Warning: Only %d of %d tracing agents stopped.' % (ns, na)
self._child_agents = succ_agents
# Collect the results from all the stopped tracing agents.
all_results = []
for agent in self._child_agents + [self._controller_agent]:
try:
result = agent.GetResults(
timeout=self._controller_config.collection_timeout)
if not result:
print 'Warning: Timeout when getting results from %s.' % str(agent)
continue
if result.source_name in [r.source_name for r in all_results]:
print ('Warning: Duplicate tracing agents named %s.' %
result.source_name)
all_results.append(result)
# Check for exceptions. If any exceptions are seen, reraise and abort.
# Note that a timeout exception will be swalloed by the timeout
# mechanism and will not get to that point (it will return False instead
# of the trace result, which will be dealt with above)
except:
print 'Warning: Exception getting results from %s:' % str(agent)
print sys.exc_info()[0]
raise
self.all_results = all_results
return all_results
def GetTraceType(self):
"""Return a string representing the child agents that are being traced."""
sorted_agents = sorted(map(str, self._child_agents))
return ' + '.join(sorted_agents)
def _IssueClockSyncMarker(self):
"""Issue clock sync markers to all the child tracing agents."""
for agent in self._child_agents:
if agent.SupportsExplicitClockSync():
sync_id = GetUniqueSyncID()
agent.RecordClockSyncMarker(sync_id, ControllerAgentClockSync)
def GetUniqueSyncID():
"""Get a unique sync ID.
Gets a unique sync ID by generating a UUID and converting it to a string
(since UUIDs are not JSON serializable)
"""
return str(uuid.uuid4())
class AgentWithConfig(object):
def __init__(self, agent, config):
self.agent = agent
self.config = config
def CreateAgentsWithConfig(options, modules):
"""Create tracing agents.
This function will determine which tracing agents are valid given the
options and create those agents along with their corresponding configuration
object.
Args:
options: The command-line options.
modules: The modules for either Systrace or profile_chrome.
TODO(washingtonp): After all profile_chrome agents are in
Systrace, this parameter will no longer be valid.
Returns:
A list of AgentWithConfig options containing agents and their corresponding
configuration object.
"""
result = []
for module in modules:
config = module.get_config(options)
agent = module.try_create_agent(config)
if agent and config:
result.append(AgentWithConfig(agent, config))
return [x for x in result if x and x.agent]
class TracingControllerConfig(tracing_agents.TracingConfig):
def __init__(self, output_file, trace_time, write_json,
link_assets, asset_dir, timeout, collection_timeout,
device_serial_number, target, trace_buf_size):
tracing_agents.TracingConfig.__init__(self)
self.output_file = output_file
self.trace_time = trace_time
self.write_json = write_json
self.link_assets = link_assets
self.asset_dir = asset_dir
self.timeout = timeout
self.collection_timeout = collection_timeout
self.device_serial_number = device_serial_number
self.target = target
self.trace_buf_size = trace_buf_size
def GetControllerConfig(options):
return TracingControllerConfig(options.output_file, options.trace_time,
options.write_json,
options.link_assets, options.asset_dir,
options.timeout, options.collection_timeout,
options.device_serial_number, options.target,
options.trace_buf_size)
def GetChromeStartupControllerConfig(options):
return TracingControllerConfig(None, options.trace_time,
options.write_json, None, None, None, None,
None, None, options.trace_buf_size)
|
edl/Library/gpt.py
|
koyuyesil/edl
| 514 |
66348
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# (c) B.Kerler 2018-2021
import argparse
import os
import sys
import logging
from enum import Enum
from struct import unpack, pack
from binascii import hexlify
try:
from edl.Library.utils import LogBase, structhelper
except:
from utils import LogBase, structhelper
class gpt(metaclass=LogBase):
class gpt_header:
def __init__(self, data):
sh = structhelper(data)
self.signature = sh.bytes(8)
self.revision = sh.dword()
self.header_size = sh.dword()
self.crc32 = sh.dword()
self.reserved = sh.dword()
self.current_lba = sh.qword()
self.backup_lba = sh.qword()
self.first_usable_lba = sh.qword()
self.last_usable_lba = sh.qword()
self.disk_guid = sh.bytes(16)
self.part_entry_start_lba = sh.qword()
self.num_part_entries = sh.dword()
self.part_entry_size = sh.dword()
class gpt_partition:
def __init__(self, data):
sh = structhelper(data)
self.type = sh.bytes(16)
self.unique = sh.bytes(16)
self.first_lba = sh.qword()
self.last_lba = sh.qword()
self.flags = sh.qword()
self.name = sh.string(72)
class efi_type(Enum):
EFI_UNUSED = 0x00000000
EFI_MBR = 0x024DEE41
EFI_SYSTEM = 0xC12A7328
EFI_BIOS_BOOT = 0x21686148
EFI_IFFS = 0xD3BFE2DE
EFI_SONY_BOOT = 0xF4019732
EFI_LENOVO_BOOT = 0xBFBFAFE7
EFI_MSR = 0xE3C9E316
EFI_BASIC_DATA = 0xEBD0A0A2
EFI_LDM_META = 0x5808C8AA
EFI_LDM = 0xAF9B60A0
EFI_RECOVERY = 0xDE94BBA4
EFI_GPFS = 0x37AFFC90
EFI_STORAGE_SPACES = 0xE75CAF8F
EFI_HPUX_DATA = 0x75894C1E
EFI_HPUX_SERVICE = 0xE2A1E728
EFI_LINUX_DAYA = 0x0FC63DAF
EFI_LINUX_RAID = 0xA19D880F
EFI_LINUX_ROOT32 = 0x44479540
EFI_LINUX_ROOT64 = 0x4F68BCE3
EFI_LINUX_ROOT_ARM32 = 0x69DAD710
EFI_LINUX_ROOT_ARM64 = 0xB921B045
EFI_LINUX_SWAP = 0x0657FD6D
EFI_LINUX_LVM = 0xE6D6D379
EFI_LINUX_HOME = 0x933AC7E1
EFI_LINUX_SRV = 0x3B8F8425
EFI_LINUX_DM_CRYPT = 0x7FFEC5C9
EFI_LINUX_LUKS = 0xCA7D7CCB
EFI_LINUX_RESERVED = 0x8DA63339
EFI_FREEBSD_BOOT = 0x83BD6B9D
EFI_FREEBSD_DATA = 0x516E7CB4
EFI_FREEBSD_SWAP = 0x516E7CB5
EFI_FREEBSD_UFS = 0x516E7CB6
EFI_FREEBSD_VINUM = 0x516E7CB8
EFI_FREEBSD_ZFS = 0x516E7CBA
EFI_OSX_HFS = 0x48465300
EFI_OSX_UFS = 0x55465300
EFI_OSX_ZFS = 0x6A898CC3
EFI_OSX_RAID = 0x52414944
EFI_OSX_RAID_OFFLINE = 0x52414944
EFI_OSX_RECOVERY = 0x426F6F74
EFI_OSX_LABEL = 0x4C616265
EFI_OSX_TV_RECOVERY = 0x5265636F
EFI_OSX_CORE_STORAGE = 0x53746F72
EFI_SOLARIS_BOOT = 0x6A82CB45
EFI_SOLARIS_ROOT = 0x6A85CF4D
EFI_SOLARIS_SWAP = 0x6A87C46F
EFI_SOLARIS_BACKUP = 0x6A8B642B
EFI_SOLARIS_USR = 0x6A898CC3
EFI_SOLARIS_VAR = 0x6A8EF2E9
EFI_SOLARIS_HOME = 0x6A90BA39
EFI_SOLARIS_ALTERNATE = 0x6A9283A5
EFI_SOLARIS_RESERVED1 = 0x6A945A3B
EFI_SOLARIS_RESERVED2 = 0x6A9630D1
EFI_SOLARIS_RESERVED3 = 0x6A980767
EFI_SOLARIS_RESERVED4 = 0x6A96237F
EFI_SOLARIS_RESERVED5 = 0x6A8D2AC7
EFI_NETBSD_SWAP = 0x49F48D32
EFI_NETBSD_FFS = 0x49F48D5A
EFI_NETBSD_LFS = 0x49F48D82
EFI_NETBSD_RAID = 0x49F48DAA
EFI_NETBSD_CONCAT = 0x2DB519C4
EFI_NETBSD_ENCRYPT = 0x2DB519EC
EFI_CHROMEOS_KERNEL = 0xFE3A2A5D
EFI_CHROMEOS_ROOTFS = 0x3CB8E202
EFI_CHROMEOS_FUTURE = 0x2E0A753D
EFI_HAIKU = 0x42465331
EFI_MIDNIGHTBSD_BOOT = 0x85D5E45E
EFI_MIDNIGHTBSD_DATA = 0x85D5E45A
EFI_MIDNIGHTBSD_SWAP = 0x85D5E45B
EFI_MIDNIGHTBSD_UFS = 0x0394EF8B
EFI_MIDNIGHTBSD_VINUM = 0x85D5E45C
EFI_MIDNIGHTBSD_ZFS = 0x85D5E45D
EFI_CEPH_JOURNAL = 0x45B0969E
EFI_CEPH_ENCRYPT = 0x45B0969E
EFI_CEPH_OSD = 0x4FBD7E29
EFI_CEPH_ENCRYPT_OSD = 0x4FBD7E29
EFI_CEPH_CREATE = 0x89C57F98
EFI_CEPH_ENCRYPT_CREATE = 0x89C57F98
EFI_OPENBSD = 0x824CC7A0
EFI_QNX = 0xCEF5A9AD
EFI_PLAN9 = 0xC91818F9
EFI_VMWARE_VMKCORE = 0x9D275380
EFI_VMWARE_VMFS = 0xAA31E02A
EFI_VMWARE_RESERVED = 0x9198EFFC
def __init__(self, num_part_entries=0, part_entry_size=0, part_entry_start_lba=0, loglevel=logging.INFO, *args,
**kwargs):
self.num_part_entries = num_part_entries
self.__logger = self.__logger
self.part_entry_size = part_entry_size
self.part_entry_start_lba = part_entry_start_lba
self.totalsectors = None
self.header = None
self.sectorsize = None
self.partentries = []
self.error = self.__logger.error
self.__logger.setLevel(loglevel)
if loglevel == logging.DEBUG:
logfilename = "log.txt"
fh = logging.FileHandler(logfilename)
self.__logger.addHandler(fh)
def parseheader(self, gptdata, sectorsize=512):
return self.gpt_header(gptdata[sectorsize:sectorsize + 0x5C])
def parse(self, gptdata, sectorsize=512):
self.header = self.gpt_header(gptdata[sectorsize:sectorsize + 0x5C])
self.sectorsize = sectorsize
if self.header.signature != b"EFI PART":
return False
if self.header.revision != 0x10000:
self.error("Unknown GPT revision.")
return False
if self.part_entry_start_lba != 0:
start = self.part_entry_start_lba
else:
start = self.header.part_entry_start_lba * sectorsize
entrysize = self.header.part_entry_size
self.partentries = []
class partf:
unique = b""
first_lba = 0
last_lba = 0
flags = 0
sector = 0
sectors = 0
type = b""
name = ""
num_part_entries = self.header.num_part_entries
for idx in range(0, num_part_entries):
data = gptdata[start + (idx * entrysize):start + (idx * entrysize) + entrysize]
if int(hexlify(data[16:32]), 16) == 0:
break
partentry = self.gpt_partition(data)
pa = partf()
guid1 = unpack("<I", partentry.unique[0:0x4])[0]
guid2 = unpack("<H", partentry.unique[0x4:0x6])[0]
guid3 = unpack("<H", partentry.unique[0x6:0x8])[0]
guid4 = unpack("<H", partentry.unique[0x8:0xA])[0]
guid5 = hexlify(partentry.unique[0xA:0x10]).decode('utf-8')
pa.unique = "{:08x}-{:04x}-{:04x}-{:04x}-{}".format(guid1, guid2, guid3, guid4, guid5)
pa.sector = partentry.first_lba
pa.sectors = partentry.last_lba - partentry.first_lba + 1
pa.flags = partentry.flags
type = int(unpack("<I", partentry.type[0:0x4])[0])
try:
pa.type = self.efi_type(type).name
except:
pa.type = hex(type)
pa.name = partentry.name.replace(b"\x00\x00", b"").decode('utf-16')
if pa.type == "EFI_UNUSED":
continue
self.partentries.append(pa)
self.totalsectors = self.header.last_usable_lba + 34
return True
def print(self):
print(self.tostring())
def tostring(self):
mstr = "\nGPT Table:\n-------------\n"
for partition in self.partentries:
mstr += ("{:20} Offset 0x{:016x}, Length 0x{:016x}, Flags 0x{:08x}, UUID {}, Type {}\n".format(
partition.name + ":", partition.sector * self.sectorsize, partition.sectors * self.sectorsize,
partition.flags, partition.unique, partition.type))
mstr += ("\nTotal disk size:0x{:016x}, sectors:0x{:016x}\n".format(self.totalsectors * self.sectorsize,
self.totalsectors))
return mstr
def generate_rawprogram(self, lun, sectorsize, directory):
fname = "rawprogram" + str(lun) + ".xml"
with open(os.path.join(directory, fname), "wb") as wf:
mstr = "<?xml version=\"1.0\" ?>\n<data>\n"
partofsingleimage = "false"
readbackverify = "false"
sparse = "false"
for partition in self.partentries:
filename = partition.name + ".bin"
mstr += f"\t<program SECTOR_SIZE_IN_BYTES=\"{sectorsize}\" " + \
f"file_sector_offset=\"0\" " \
f"filename=\"{filename}\" " + \
f"label=\"{partition.name}\" " \
f"num_partition_sectors=\"{partition.sectors}\" " + \
f"partofsingleimage=\"{partofsingleimage}\" " \
f"physical_partition_number=\"{str(lun)}\" " + \
f"readbackverify=\"{readbackverify}\" " \
f"size_in_KB=\"{(partition.sectors * sectorsize / 1024):.1f}\" " \
f"sparse=\"{sparse}\" " + \
f"start_byte_hex=\"{hex(partition.sector * sectorsize)}\" " \
f"start_sector=\"{partition.sector}\"/>\n"
partofsingleimage = "true"
sectors = self.header.first_usable_lba
mstr += f"\t<program SECTOR_SIZE_IN_BYTES=\"{sectorsize}\" " + \
f"file_sector_offset=\"0\" " + \
f"filename=\"gpt_main{str(lun)}.bin\" " + \
f"label=\"PrimaryGPT\" " + \
f"num_partition_sectors=\"{sectors}\" " + \
f"partofsingleimage=\"{partofsingleimage}\" " + \
f"physical_partition_number=\"{str(lun)}\" " + \
f"readbackverify=\"{readbackverify}\" " + \
f"size_in_KB=\"{(sectors * sectorsize / 1024):.1f}\" " + \
f"sparse=\"{sparse}\" " + \
f"start_byte_hex=\"0x0\" " + \
f"start_sector=\"0\"/>\n"
sectors = self.header.first_usable_lba - 1
mstr += f"\t<program SECTOR_SIZE_IN_BYTES=\"{sectorsize}\" " + \
f"file_sector_offset=\"0\" " + \
f"filename=\"gpt_backup{str(lun)}.bin\" " + \
f"label=\"BackupGPT\" " + \
f"num_partition_sectors=\"{sectors}\" " + \
f"partofsingleimage=\"{partofsingleimage}\" " + \
f"physical_partition_number=\"{str(lun)}\" " + \
f"readbackverify=\"{readbackverify}\" " + \
f"size_in_KB=\"{(sectors * sectorsize / 1024):.1f}\" " + \
f"sparse=\"{sparse}\" " + \
f"start_byte_hex=\"({sectorsize}*NUM_DISK_SECTORS)-{sectorsize * sectors}.\" " + \
f"start_sector=\"NUM_DISK_SECTORS-{sectors}.\"/>\n"
mstr += "</data>"
wf.write(bytes(mstr, 'utf-8'))
print(f"Wrote partition xml as {fname}")
def print_gptfile(self, filename):
try:
filesize = os.stat(filename).st_size
with open(filename, "rb") as rf:
size = min(32 * 4096, filesize)
data = rf.read(size)
for sectorsize in [512, 4096]:
result = self.parse(data, sectorsize)
if result:
break
if result:
print(self.tostring())
return result
except Exception as e:
self.error(str(e))
return ""
def test_gpt(self):
res = self.print_gptfile(os.path.join("TestFiles", "gpt_sm8180x.bin"))
assert res, "GPT Partition wasn't decoded properly"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="GPT utils")
subparsers = parser.add_subparsers(dest="command", help='sub-command help')
parser_print = subparsers.add_parser("print", help="Print the gpt table")
parser_print.add_argument("image", help="The path of the GPT disk image")
parser_test = subparsers.add_parser("test", help="Run self-test")
parser_extract = subparsers.add_parser("extract", help="Extract the partitions")
parser_extract.add_argument("image", help="The path of the GPT disk image")
parser_extract.add_argument("-out", "-o", help="The path to extract the partitions")
parser_extract.add_argument("-partition", "-p", help="Extract specific partitions (separated by comma)")
args = parser.parse_args()
if args.command not in ["print", "extract", "test"]:
parser.error("Command is mandatory")
gp = gpt()
if args.command == "print":
if not os.path.exists(args.image):
print(f"File {args.image} does not exist. Aborting.")
sys.exit(1)
gp.print_gptfile(args.image)
elif args.command == "test":
gp.test_gpt()
elif args.command == "extract":
if not os.path.exists(args.image):
print(f"File {args.image} does not exist. Aborting.")
sys.exit(1)
filesize = os.stat(args.image).st_size
with open(args.image, "rb", buffering=1024 * 1024) as rf:
data = rf.read(min(32 * 4096, filesize))
ssize = None
for sectorsize in [512, 4096]:
result = gp.parse(data, sectorsize)
if result:
ssize = sectorsize
break
if ssize is not None:
for partition in gp.partentries:
if args.partition is not None:
if partition != args.partition:
continue
name = partition.name
start = partition.sector * ssize
length = partition.sectors * ssize
out = args.out
if out is None:
out = "."
if not os.path.exists(out):
os.makedirs(out)
filename = os.path.join(out, name)
rf.seek(start)
bytestoread = length
with open(filename, "wb", buffering=1024 * 1024) as wf:
while bytestoread > 0:
size = min(bytestoread, 0x200000)
rf.read(size)
wf.write(size)
bytestoread -= size
print(f"Extracting {name} to {filename} at {hex(start)}, length {hex(length)}")
|
lib/matplotlib/pylab.py
|
jbbrokaw/matplotlib
| 113 |
66367
|
<gh_stars>100-1000
"""
This is a procedural interface to the matplotlib object-oriented
plotting library.
The following plotting commands are provided; the majority have
MATLAB |reg| [*]_ analogs and similar arguments.
.. |reg| unicode:: 0xAE
_Plotting commands
acorr - plot the autocorrelation function
annotate - annotate something in the figure
arrow - add an arrow to the axes
axes - Create a new axes
axhline - draw a horizontal line across axes
axvline - draw a vertical line across axes
axhspan - draw a horizontal bar across axes
axvspan - draw a vertical bar across axes
axis - Set or return the current axis limits
autoscale - turn axis autoscaling on or off, and apply it
bar - make a bar chart
barh - a horizontal bar chart
broken_barh - a set of horizontal bars with gaps
box - set the axes frame on/off state
boxplot - make a box and whisker plot
violinplot - make a violin plot
cla - clear current axes
clabel - label a contour plot
clf - clear a figure window
clim - adjust the color limits of the current image
close - close a figure window
colorbar - add a colorbar to the current figure
cohere - make a plot of coherence
contour - make a contour plot
contourf - make a filled contour plot
csd - make a plot of cross spectral density
delaxes - delete an axes from the current figure
draw - Force a redraw of the current figure
errorbar - make an errorbar graph
figlegend - make legend on the figure rather than the axes
figimage - make a figure image
figtext - add text in figure coords
figure - create or change active figure
fill - make filled polygons
findobj - recursively find all objects matching some criteria
gca - return the current axes
gcf - return the current figure
gci - get the current image, or None
getp - get a graphics property
grid - set whether gridding is on
hist - make a histogram
hold - set the axes hold state
ioff - turn interaction mode off
ion - turn interaction mode on
isinteractive - return True if interaction mode is on
imread - load image file into array
imsave - save array as an image file
imshow - plot image data
ishold - return the hold state of the current axes
legend - make an axes legend
locator_params - adjust parameters used in locating axis ticks
loglog - a log log plot
matshow - display a matrix in a new figure preserving aspect
margins - set margins used in autoscaling
pause - pause for a specified interval
pcolor - make a pseudocolor plot
pcolormesh - make a pseudocolor plot using a quadrilateral mesh
pie - make a pie chart
plot - make a line plot
plot_date - plot dates
plotfile - plot column data from an ASCII tab/space/comma delimited file
pie - pie charts
polar - make a polar plot on a PolarAxes
psd - make a plot of power spectral density
quiver - make a direction field (arrows) plot
rc - control the default params
rgrids - customize the radial grids and labels for polar
savefig - save the current figure
scatter - make a scatter plot
setp - set a graphics property
semilogx - log x axis
semilogy - log y axis
show - show the figures
specgram - a spectrogram plot
spy - plot sparsity pattern using markers or image
stem - make a stem plot
subplot - make one subplot (numrows, numcols, axesnum)
subplots - make a figure with a set of (numrows, numcols) subplots
subplots_adjust - change the params controlling the subplot positions of current figure
subplot_tool - launch the subplot configuration tool
suptitle - add a figure title
table - add a table to the plot
text - add some text at location x,y to the current axes
thetagrids - customize the radial theta grids and labels for polar
tick_params - control the appearance of ticks and tick labels
ticklabel_format - control the format of tick labels
title - add a title to the current axes
tricontour - make a contour plot on a triangular grid
tricontourf - make a filled contour plot on a triangular grid
tripcolor - make a pseudocolor plot on a triangular grid
triplot - plot a triangular grid
xcorr - plot the autocorrelation function of x and y
xlim - set/get the xlimits
ylim - set/get the ylimits
xticks - set/get the xticks
yticks - set/get the yticks
xlabel - add an xlabel to the current axes
ylabel - add a ylabel to the current axes
autumn - set the default colormap to autumn
bone - set the default colormap to bone
cool - set the default colormap to cool
copper - set the default colormap to copper
flag - set the default colormap to flag
gray - set the default colormap to gray
hot - set the default colormap to hot
hsv - set the default colormap to hsv
jet - set the default colormap to jet
pink - set the default colormap to pink
prism - set the default colormap to prism
spring - set the default colormap to spring
summer - set the default colormap to summer
winter - set the default colormap to winter
spectral - set the default colormap to spectral
_Event handling
connect - register an event handler
disconnect - remove a connected event handler
_Matrix commands
cumprod - the cumulative product along a dimension
cumsum - the cumulative sum along a dimension
detrend - remove the mean or besdt fit line from an array
diag - the k-th diagonal of matrix
diff - the n-th differnce of an array
eig - the eigenvalues and eigen vectors of v
eye - a matrix where the k-th diagonal is ones, else zero
find - return the indices where a condition is nonzero
fliplr - flip the rows of a matrix up/down
flipud - flip the columns of a matrix left/right
linspace - a linear spaced vector of N values from min to max inclusive
logspace - a log spaced vector of N values from min to max inclusive
meshgrid - repeat x and y to make regular matrices
ones - an array of ones
rand - an array from the uniform distribution [0,1]
randn - an array from the normal distribution
rot90 - rotate matrix k*90 degress counterclockwise
squeeze - squeeze an array removing any dimensions of length 1
tri - a triangular matrix
tril - a lower triangular matrix
triu - an upper triangular matrix
vander - the Vandermonde matrix of vector x
svd - singular value decomposition
zeros - a matrix of zeros
_Probability
normpdf - The Gaussian probability density function
rand - random numbers from the uniform distribution
randn - random numbers from the normal distribution
_Statistics
amax - the maximum along dimension m
amin - the minimum along dimension m
corrcoef - correlation coefficient
cov - covariance matrix
mean - the mean along dimension m
median - the median along dimension m
norm - the norm of vector x
prod - the product along dimension m
ptp - the max-min along dimension m
std - the standard deviation along dimension m
asum - the sum along dimension m
ksdensity - the kernel density estimate
_Time series analysis
bartlett - M-point Bartlett window
blackman - M-point Blackman window
cohere - the coherence using average periodiogram
csd - the cross spectral density using average periodiogram
fft - the fast Fourier transform of vector x
hamming - M-point Hamming window
hanning - M-point Hanning window
hist - compute the histogram of x
kaiser - M length Kaiser window
psd - the power spectral density using average periodiogram
sinc - the sinc function of array x
_Dates
date2num - convert python datetimes to numeric representation
drange - create an array of numbers for date plots
num2date - convert numeric type (float days since 0001) to datetime
_Other
angle - the angle of a complex array
griddata - interpolate irregularly distributed data to a regular grid
load - Deprecated--please use loadtxt.
loadtxt - load ASCII data into array.
polyfit - fit x, y to an n-th order polynomial
polyval - evaluate an n-th order polynomial
roots - the roots of the polynomial coefficients in p
save - Deprecated--please use savetxt.
savetxt - save an array to an ASCII file.
trapz - trapezoidal integration
__end
.. [*] MATLAB is a registered trademark of The MathWorks, Inc.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import sys, warnings
from matplotlib.cbook import flatten, is_string_like, exception_to_str, \
silent_list, iterable, dedent
import matplotlib as mpl
# make mpl.finance module available for backwards compatability, in case folks
# using pylab interface depended on not having to import it
import matplotlib.finance
from matplotlib.dates import date2num, num2date,\
datestr2num, strpdate2num, drange,\
epoch2num, num2epoch, mx2num,\
DateFormatter, IndexDateFormatter, DateLocator,\
RRuleLocator, YearLocator, MonthLocator, WeekdayLocator,\
DayLocator, HourLocator, MinuteLocator, SecondLocator,\
rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY, MONTHLY,\
WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY, relativedelta
import matplotlib.dates # Do we need this at all?
# bring all the symbols in so folks can import them from
# pylab in one fell swoop
## We are still importing too many things from mlab; more cleanup is needed.
from matplotlib.mlab import griddata, stineman_interp, slopes, \
inside_poly, poly_below, poly_between, \
is_closed_polygon, path_length, distances_along_curve, vector_lengths
from matplotlib.mlab import window_hanning, window_none, detrend, demean, \
detrend_mean, detrend_none, detrend_linear, entropy, normpdf, \
find, longest_contiguous_ones, longest_ones, prepca, \
prctile, prctile_rank, \
center_matrix, rk4, bivariate_normal, get_xyz_where, \
get_sparse_matrix, dist, \
dist_point_to_segment, segments_intersect, fftsurr, movavg, \
exp_safe, \
amap, rms_flat, l1norm, l2norm, norm_flat, frange, identity, \
base_repr, binary_repr, log2, ispower2, \
rec_append_fields, rec_drop_fields, rec_join, csv2rec, rec2csv, isvector
import matplotlib.mlab as mlab
import matplotlib.cbook as cbook
from numpy import *
from numpy.fft import *
from numpy.random import *
from numpy.linalg import *
from matplotlib.pyplot import *
# provide the recommended module abbrevs in the pylab namespace
import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
# don't let numpy's datetime hide stdlib
import datetime
# This is needed, or bytes will be numpy.random.bytes from
# "from numpy.random import *" above
bytes = __builtins__['bytes']
|
pymagnitude/third_party/allennlp/tests/models/esim_test.py
|
tpeng/magnitude
| 1,520 |
66397
|
# pylint: disable=no-self-use,invalid-name
from __future__ import division
from __future__ import absolute_import
import numpy
from numpy.testing import assert_almost_equal
from allennlp.common.testing import ModelTestCase
class TestESIM(ModelTestCase):
def setUp(self):
super(TestESIM, self).setUp()
self.set_up_model(self.FIXTURES_ROOT / u'esim' / u'experiment.json',
self.FIXTURES_ROOT / u'data' / u'snli.jsonl')
def test_forward_pass_runs_correctly(self):
training_tensors = self.dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
assert_almost_equal(numpy.sum(output_dict[u"label_probs"][0].data.numpy(), -1), 1, decimal=6)
def test_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
def test_batch_predictions_are_consistent(self):
self.ensure_batch_predictions_are_consistent()
|
Exercise7/utils.py
|
camilodelvalle/machine-learning-coursera
| 118 |
66401
|
import sys
import numpy as np
from matplotlib import pyplot
from matplotlib.animation import FuncAnimation
import matplotlib as mpl
sys.path.append('..')
from submission import SubmissionBase
def displayData(X, example_width=None, figsize=(10, 10)):
"""
Displays 2D data in a nice grid.
Parameters
----------
X : array_like
The input data of size (m x n) where m is the number of examples and n is the number of
features.
example_width : int, optional
THe width of each 2-D image in pixels. If not provided, the image is assumed to be square,
and the width is the floor of the square root of total number of pixels.
figsize : tuple, optional
A 2-element tuple indicating the width and height of figure in inches.
"""
# Compute rows, cols
if X.ndim == 2:
m, n = X.shape
elif X.ndim == 1:
n = X.size
m = 1
X = X[None] # Promote to a 2 dimensional array
else:
raise IndexError('Input X should be 1 or 2 dimensional.')
example_width = example_width or int(np.round(np.sqrt(n)))
example_height = int(n / example_width)
# Compute number of items to display
display_rows = int(np.floor(np.sqrt(m)))
display_cols = int(np.ceil(m / display_rows))
fig, ax_array = pyplot.subplots(display_rows, display_cols, figsize=figsize)
fig.subplots_adjust(wspace=0.025, hspace=0.025)
ax_array = [ax_array] if m == 1 else ax_array.ravel()
for i, ax in enumerate(ax_array):
ax.imshow(X[i].reshape(example_height, example_width, order='F'), cmap='gray')
ax.axis('off')
def featureNormalize(X):
"""
Normalizes the features in X returns a normalized version of X where the mean value of each
feature is 0 and the standard deviation is 1. This is often a good preprocessing step to do when
working with learning algorithms.
Parameters
----------
X : array_like
An dataset which is a (m x n) matrix, where m is the number of examples,
and n is the number of dimensions for each example.
Returns
-------
X_norm : array_like
The normalized input dataset.
mu : array_like
A vector of size n corresponding to the mean for each dimension across all examples.
sigma : array_like
A vector of size n corresponding to the standard deviations for each dimension across
all examples.
"""
mu = np.mean(X, axis=0)
X_norm = X - mu
sigma = np.std(X_norm, axis=0, ddof=1)
X_norm /= sigma
return X_norm, mu, sigma
def plotProgresskMeans(i, X, centroid_history, idx_history):
"""
A helper function that displays the progress of k-Means as it is running. It is intended for use
only with 2D data. It plots data points with colors assigned to each centroid. With the
previous centroids, it also plots a line between the previous locations and current locations
of the centroids.
Parameters
----------
i : int
Current iteration number of k-means. Used for matplotlib animation function.
X : array_like
The dataset, which is a matrix (m x n). Note since the plot only supports 2D data, n should
be equal to 2.
centroid_history : list
A list of computed centroids for all iteration.
idx_history : list
A list of computed assigned indices for all iterations.
"""
K = centroid_history[0].shape[0]
pyplot.gcf().clf()
cmap = pyplot.cm.rainbow
norm = mpl.colors.Normalize(vmin=0, vmax=2)
for k in range(K):
current = np.stack([c[k, :] for c in centroid_history[:i+1]], axis=0)
pyplot.plot(current[:, 0], current[:, 1],
'-Xk',
mec='k',
lw=2,
ms=10,
mfc=cmap(norm(k)),
mew=2)
pyplot.scatter(X[:, 0], X[:, 1],
c=idx_history[i],
cmap=cmap,
marker='o',
s=8**2,
linewidths=1,)
pyplot.grid(False)
pyplot.title('Iteration number %d' % (i+1))
def runkMeans(X, centroids, findClosestCentroids, computeCentroids,
max_iters=10, plot_progress=False):
"""
Runs the K-means algorithm.
Parameters
----------
X : array_like
The data set of size (m, n). Each row of X is a single example of n dimensions. The
data set is a total of m examples.
centroids : array_like
Initial centroid location for each clusters. This is a matrix of size (K, n). K is the total
number of clusters and n is the dimensions of each data point.
findClosestCentroids : func
A function (implemented by student) reference which computes the cluster assignment for
each example.
computeCentroids : func
A function(implemented by student) reference which computes the centroid of each cluster.
max_iters : int, optional
Specifies the total number of interactions of K-Means to execute.
plot_progress : bool, optional
A flag that indicates if the function should also plot its progress as the learning happens.
This is set to false by default.
Returns
-------
centroids : array_like
A (K x n) matrix of the computed (updated) centroids.
idx : array_like
A vector of size (m,) for cluster assignment for each example in the dataset. Each entry
in idx is within the range [0 ... K-1].
anim : FuncAnimation, optional
A matplotlib animation object which can be used to embed a video within the jupyter
notebook. This is only returned if `plot_progress` is `True`.
"""
K = centroids.shape[0]
idx = None
idx_history = []
centroid_history = []
for i in range(max_iters):
idx = findClosestCentroids(X, centroids)
if plot_progress:
idx_history.append(idx)
centroid_history.append(centroids)
centroids = computeCentroids(X, idx, K)
if plot_progress:
fig = pyplot.figure()
anim = FuncAnimation(fig, plotProgresskMeans,
frames=max_iters,
interval=500,
repeat_delay=2,
fargs=(X, centroid_history, idx_history))
return centroids, idx, anim
return centroids, idx
class Grader(SubmissionBase):
# Random Test Cases
X = np.sin(np.arange(1, 166)).reshape(15, 11, order='F')
Z = np.cos(np.arange(1, 122)).reshape(11, 11, order='F')
C = Z[:5, :]
idx = np.arange(1, 16) % 3
def __init__(self):
part_names = ['Find Closest Centroids (k-Means)',
'Compute Centroid Means (k-Means)',
'PCA',
'Project Data (PCA)',
'Recover Data (PCA)']
super().__init__('k-means-clustering-and-pca', part_names)
def __iter__(self):
for part_id in range(1, 6):
try:
func = self.functions[part_id]
# Each part has different expected arguments/different function
if part_id == 1:
res = 1 + func(self.X, self.C)
elif part_id == 2:
res = func(self.X, self.idx, 3)
elif part_id == 3:
U, S = func(self.X)
res = np.hstack([U.ravel('F'), np.diag(S).ravel('F')]).tolist()
elif part_id == 4:
res = func(self.X, self.Z, 5)
elif part_id == 5:
res = func(self.X[:, :5], self.Z, 5)
else:
raise KeyError
yield part_id, res
except KeyError:
yield part_id, 0
|
kivy/uix/carousel.py
|
Sentient07/kivy
| 317 |
66404
|
<reponame>Sentient07/kivy<filename>kivy/uix/carousel.py
'''
Carousel
========
.. versionadded:: 1.4.0
The :class:`Carousel` widget provides the classic mobile-friendly carousel view
where you can swipe between slides.
You can add any content to the carousel and use it horizontally or verticaly.
The carousel can display pages in loop or not.
Example::
class Example1(App):
def build(self):
carousel = Carousel(direction='right')
for i in range(10):
src = "http://placehold.it/480x270.png&text=slide-%d&.png" % i
image = Factory.AsyncImage(source=src, allow_stretch=True)
carousel.add_widget(image)
return carousel
Example1().run()
.. versionchanged:: 1.5.0
The carousel now supports active children, like the
:class:`~kivy.uix.scrollview.ScrollView`. It will detect a swipe gesture
according to :attr:`Carousel.scroll_timeout` and
:attr:`Carousel.scroll_distance`.
In addition, the container used for adding a slide is now hidden in
the API. We made a mistake by exposing it to the user. The impacted
properties are:
:attr:`Carousel.slides`, :attr:`Carousel.current_slide`,
:attr:`Carousel.previous_slide` and :attr:`Carousel.next_slide`.
'''
__all__ = ('Carousel', )
from functools import partial
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.animation import Animation
from kivy.uix.stencilview import StencilView
from kivy.uix.relativelayout import RelativeLayout
from kivy.properties import BooleanProperty, OptionProperty, AliasProperty, \
NumericProperty, ListProperty, ObjectProperty, StringProperty
class Carousel(StencilView):
'''Carousel class. See module documentation for more information.
'''
slides = ListProperty([])
'''List of slides inside the Carousel. The slides are added when a
widget is added to Carousel using add_widget().
:attr:`slides` is a :class:`~kivy.properties.ListProperty` and is
read-only.
'''
def _get_slides_container(self):
return [x.parent for x in self.slides]
slides_container = AliasProperty(_get_slides_container, None,
bind=('slides', ))
direction = OptionProperty('right',
options=('right', 'left', 'top', 'bottom'))
'''Specifies the direction in which the slides are ordered i.e. the
direction from which the user swipes to go from one slide to the next.
Can be `right`, `left`, 'top', or `bottom`. For example, with
the default value of `right`, the second slide is to the right
of the first and the user would swipe from the right towards the
left to get to the second slide.
:attr:`direction` is a :class:`~kivy.properties.OptionProperty` and
defaults to 'right'.
'''
min_move = NumericProperty(0.2)
'''Defines the minimal distance from the edge where the movement is
considered a swipe gesture and the Carousel will change its content.
This is a percentage of the Carousel width.
If the movement doesn't reach this minimal value, then the movement is
cancelled and the content is restored to its original position.
:attr:`min_move` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.2.
'''
anim_move_duration = NumericProperty(0.5)
'''Defines the duration of the Carousel animation between pages.
:attr:`anim_move_duration` is a :class:`~kivy.properties.NumericProperty`
and defaults to 0.5.
'''
anim_cancel_duration = NumericProperty(0.3)
'''Defines the duration of the animation when a swipe movement is not
accepted. This is generally when the user doesnt swipe enough.
See :attr:`min_move`.
:attr:`anim_cancel_duration` is a :class:`~kivy.properties.NumericProperty`
and defaults to 0.3.
'''
loop = BooleanProperty(False)
'''Allow the Carousel to swipe infinitely. When the user reaches the last
page, they will return to first page when trying to swipe to the next.
:attr:`loop` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
def _get_index(self):
if self.slides:
return self._index % len(self.slides)
return None
def _set_index(self, value):
if self.slides:
self._index = value % len(self.slides)
else:
self._index = None
index = AliasProperty(_get_index, _set_index, bind=('_index', 'slides'))
'''Get/Set the current visible slide based on the index.
:attr:`index` is a :class:`~kivy.properties.AliasProperty` and defaults
to 0 (the first item).
'''
def _prev_slide(self):
slides = self.slides
len_slides = len(slides)
index = self.index
if len_slides < 2: # None, or 1 slide
return None
if len_slides == 2:
if index == 0:
return None
if index == 1:
return slides[0]
if self.loop and index == 0:
return slides[-1]
if index > 0:
return slides[index - 1]
previous_slide = AliasProperty(_prev_slide, None, bind=('slides', 'index'))
'''The previous slide in the Carousel. It is None if the current slide is
the first slide in the Carousel. If :attr:`orientation` is 'horizontal',
the previous slide is to the left. If :attr:`orientation` is 'vertical',
the previous slide towards the bottom.
:attr:`previous_slide` is a :class:`~kivy.properties.AliasProperty`.
.. versionchanged:: 1.5.0
This property doesn't expose the container used for storing the slide.
It returns the widget you have added.
'''
def _curr_slide(self):
if len(self.slides):
return self.slides[self.index]
current_slide = AliasProperty(_curr_slide, None, bind=('slides', 'index'))
'''The currently shown slide.
:attr:`current_slide` is an :class:`~kivy.properties.AliasProperty`.
.. versionchanged:: 1.5.0
The property doesn't expose the container used for storing the slide.
It returns widget you have added.
'''
def _next_slide(self):
if len(self.slides) < 2: # None, or 1 slide
return None
if len(self.slides) == 2:
if self.index == 0:
return self.slides[1]
if self.index == 1:
return None
if self.loop and self.index == len(self.slides) - 1:
return self.slides[0]
if self.index < len(self.slides) - 1:
return self.slides[self.index + 1]
next_slide = AliasProperty(_next_slide, None, bind=('slides', 'index'))
'''The next slide in the Carousel. It is None if the current slide is
the last slide in the Carousel. If :attr:`orientation` is 'horizontal',
the next slide is to the right. If :attr:`orientation` is 'vertical',
the next slide is towards the bottom.
:attr:`next_slide` is a :class:`~kivy.properties.AliasProperty`.
.. versionchanged:: 1.5.0
The property doesn't expose the container used for storing the slide.
It returns the widget you have added.
'''
scroll_timeout = NumericProperty(200)
'''Timeout allowed to trigger the :attr:`scroll_distance`, in milliseconds.
If the user has not moved :attr:`scroll_distance` within the timeout,
the scrolling will be disabled and the touch event will go to the children.
:attr:`scroll_timeout` is a :class:`~kivy.properties.NumericProperty` and
defaults to 200 (milliseconds)
.. versionadded:: 1.5.0
'''
scroll_distance = NumericProperty('20dp')
'''Distance to move before scrolling the :class:`Carousel` in pixels. As
soon as the distance has been traveled, the :class:`Carousel` will start
to scroll, and no touch event will go to children.
It is advisable that you base this value on the dpi of your target device's
screen.
:attr:`scroll_distance` is a :class:`~kivy.properties.NumericProperty` and
defaults to 20dp.
.. versionadded:: 1.5.0
'''
anim_type = StringProperty('out_quad')
'''Type of animation to use while animating in the next/previous slide.
.. versionadded:: 1.8.0
'''
#### private properties, for internal use only ###
_index = NumericProperty(0, allownone=True)
_prev = ObjectProperty(None, allownone=True)
_current = ObjectProperty(None, allownone=True)
_next = ObjectProperty(None, allownone=True)
_offset = NumericProperty(0)
_touch = ObjectProperty(None, allownone=True)
def __init__(self, **kwargs):
self._trigger_position_visible_slides = Clock.create_trigger(
self._position_visible_slides, -1)
super(Carousel, self).__init__(**kwargs)
self._skip_slide = None
def load_slide(self, slide):
'''Animate to the slide that is passed as the argument.
.. versionchanged:: 1.8.0
'''
slides = self.slides
start, stop = slides.index(self.current_slide), slides.index(slide)
if start == stop:
return
self._skip_slide = stop
if stop > start:
self._insert_visible_slides(_next_slide=slide)
self.load_next()
else:
self._insert_visible_slides(_prev_slide=slide)
self.load_previous()
def load_previous(self):
'''Animate to the previous slide.
.. versionadded:: 1.7.0
'''
self.load_next(mode='prev')
def load_next(self, mode='next'):
'''Animate to next slide.
.. versionadded:: 1.7.0
'''
if not self.index is None:
w, h = self.size
_direction = {
'top': -h / 2,
'bottom': h / 2,
'left': w / 2,
'right': -w / 2}
_offset = _direction[self.direction]
if mode == 'prev':
_offset = -_offset
self._start_animation(min_move=0, offset=_offset)
def get_slide_container(self, slide):
return slide.parent
def _insert_visible_slides(self, _next_slide=None, _prev_slide=None):
get_slide_container = self.get_slide_container
previous_slide = _prev_slide if _prev_slide else self.previous_slide
if previous_slide:
self._prev = get_slide_container(previous_slide)
else:
self._prev = None
current_slide = self.current_slide
if current_slide:
self._current = get_slide_container(current_slide)
else:
self._current = None
next_slide = _next_slide if _next_slide else self.next_slide
if next_slide:
self._next = get_slide_container(next_slide)
else:
self._next = None
super_remove = super(Carousel, self).remove_widget
for container in self.slides_container:
super_remove(container)
if self._prev:
super(Carousel, self).add_widget(self._prev)
if self._next:
super(Carousel, self).add_widget(self._next)
if self._current:
super(Carousel, self).add_widget(self._current)
def _position_visible_slides(self, *args):
slides, index = self.slides, self.index
no_of_slides = len(slides) - 1
if not slides:
return
x, y, width, height = self.x, self.y, self.width, self.height
_offset, direction = self._offset, self.direction
_prev, _next, _current = self._prev, self._next, self._current
get_slide_container = self.get_slide_container
last_slide = get_slide_container(slides[-1])
first_slide = get_slide_container(slides[0])
skip_next = False
_loop = self.loop
if direction[0] in ['r', 'l']:
xoff = x + _offset
x_prev = {'l': xoff + width, 'r': xoff - width}
x_next = {'l': xoff - width, 'r': xoff + width}
if _prev:
_prev.pos = (x_prev[direction[0]], y)
elif _loop and _next and index == 0:
# if first slide is moving to right with direction set to right
# or toward left with direction set to left
if ((_offset > 0 and direction[0] == 'r') or
(_offset < 0 and direction[0] == 'l')):
# put last_slide before first slide
last_slide.pos = (x_prev[direction[0]], y)
skip_next = True
if _current:
_current.pos = (xoff, y)
if skip_next:
return
if _next:
_next.pos = (x_next[direction[0]], y)
elif _loop and _prev and index == no_of_slides:
if ((_offset < 0 and direction[0] == 'r') or
(_offset > 0 and direction[0] == 'l')):
first_slide.pos = (x_next[direction[0]], y)
if direction[0] in ['t', 'b']:
yoff = y + _offset
y_prev = {'t': yoff - height, 'b': yoff + height}
y_next = {'t': yoff + height, 'b': yoff - height}
if _prev:
_prev.pos = (x, y_prev[direction[0]])
elif _loop and _next and index == 0:
if ((_offset > 0 and direction[0] == 't') or
(_offset < 0 and direction[0] == 'b')):
last_slide.pos = (x, y_prev[direction[0]])
skip_next = True
if _current:
_current.pos = (x, yoff)
if skip_next:
return
if _next:
_next.pos = (x, y_next[direction[0]])
elif _loop and _prev and index == no_of_slides:
if ((_offset < 0 and direction[0] == 't') or
(_offset > 0 and direction[0] == 'b')):
first_slide.pos = (x, y_next[direction[0]])
def on_size(self, *args):
size = self.size
for slide in self.slides_container:
slide.size = size
self._trigger_position_visible_slides()
def on_pos(self, *args):
self._trigger_position_visible_slides()
def on_index(self, *args):
self._insert_visible_slides()
self._trigger_position_visible_slides()
self._offset = 0
def on_slides(self, *args):
if self.slides:
self.index = self.index % len(self.slides)
self._insert_visible_slides()
self._trigger_position_visible_slides()
def on__offset(self, *args):
self._trigger_position_visible_slides()
# if reached full offset, switch index to next or prev
direction = self.direction
_offset = self._offset
width = self.width
height = self.height
index = self.index
if self._skip_slide is not None or index is None:
return
if direction[0] == 'r':
if _offset <= -width:
index += 1
if _offset >= width:
index -= 1
if direction[0] == 'l':
if _offset <= -width:
index -= 1
if _offset >= width:
index += 1
if direction[0] == 't':
if _offset <= - height:
index += 1
if _offset >= height:
index -= 1
if direction[0] == 'b':
if _offset <= -height:
index -= 1
if _offset >= height:
index += 1
self.index = index
def _start_animation(self, *args, **kwargs):
# compute target offset for ease back, next or prev
new_offset = 0
direction = kwargs.get('direction', self.direction)
is_horizontal = direction[0] in ['r', 'l']
extent = self.width if is_horizontal else self.height
min_move = kwargs.get('min_move', self.min_move)
_offset = kwargs.get('offset', self._offset)
if _offset < min_move * -extent:
new_offset = -extent
elif _offset > min_move * extent:
new_offset = extent
# if new_offset is 0, it wasnt enough to go next/prev
dur = self.anim_move_duration
if new_offset == 0:
dur = self.anim_cancel_duration
# detect edge cases if not looping
len_slides = len(self.slides)
index = self.index
if not self.loop or len_slides == 1:
is_first = (index == 0)
is_last = (index == len_slides - 1)
if direction[0] in ['r', 't']:
towards_prev = (new_offset > 0)
towards_next = (new_offset < 0)
else:
towards_prev = (new_offset < 0)
towards_next = (new_offset > 0)
if (is_first and towards_prev) or (is_last and towards_next):
new_offset = 0
anim = Animation(_offset=new_offset, d=dur, t=self.anim_type)
anim.cancel_all(self)
def _cmp(*l):
if self._skip_slide is not None:
self.index = self._skip_slide
self._skip_slide = None
anim.bind(on_complete=_cmp)
anim.start(self)
def _get_uid(self, prefix='sv'):
return '{0}.{1}'.format(prefix, self.uid)
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
touch.ud[self._get_uid('cavoid')] = True
return
if self.disabled:
return True
if self._touch:
return super(Carousel, self).on_touch_down(touch)
Animation.cancel_all(self)
self._touch = touch
uid = self._get_uid()
touch.grab(self)
touch.ud[uid] = {
'mode': 'unknown',
'time': touch.time_start}
Clock.schedule_once(self._change_touch_mode,
self.scroll_timeout / 1000.)
return True
def on_touch_move(self, touch):
if self._get_uid('cavoid') in touch.ud:
return
if self._touch is not touch:
super(Carousel, self).on_touch_move(touch)
return self._get_uid() in touch.ud
if touch.grab_current is not self:
return True
ud = touch.ud[self._get_uid()]
direction = self.direction
if ud['mode'] == 'unknown':
if direction[0] in ('r', 'l'):
distance = abs(touch.ox - touch.x)
else:
distance = abs(touch.oy - touch.y)
if distance > self.scroll_distance:
Clock.unschedule(self._change_touch_mode)
ud['mode'] = 'scroll'
else:
if direction[0] in ('r', 'l'):
self._offset += touch.dx
if direction[0] in ('t', 'b'):
self._offset += touch.dy
return True
def on_touch_up(self, touch):
if self._get_uid('cavoid') in touch.ud:
return
if self in [x() for x in touch.grab_list]:
touch.ungrab(self)
self._touch = None
ud = touch.ud[self._get_uid()]
if ud['mode'] == 'unknown':
Clock.unschedule(self._change_touch_mode)
super(Carousel, self).on_touch_down(touch)
Clock.schedule_once(partial(self._do_touch_up, touch), .1)
else:
self._start_animation()
else:
if self._touch is not touch and self.uid not in touch.ud:
super(Carousel, self).on_touch_up(touch)
return self._get_uid() in touch.ud
def _do_touch_up(self, touch, *largs):
super(Carousel, self).on_touch_up(touch)
# don't forget about grab event!
for x in touch.grab_list[:]:
touch.grab_list.remove(x)
x = x()
if not x:
continue
touch.grab_current = x
super(Carousel, self).on_touch_up(touch)
touch.grab_current = None
def _change_touch_mode(self, *largs):
if not self._touch:
return
self._start_animation()
uid = self._get_uid()
touch = self._touch
ud = touch.ud[uid]
if ud['mode'] == 'unknown':
touch.ungrab(self)
self._touch = None
super(Carousel, self).on_touch_down(touch)
return
def add_widget(self, widget, index=0):
slide = RelativeLayout(size=self.size, x=self.x - self.width, y=self.y)
slide.add_widget(widget)
super(Carousel, self).add_widget(slide, index)
if index != 0:
self.slides.insert(index, widget)
else:
self.slides.append(widget)
def remove_widget(self, widget, *args, **kwargs):
# XXX be careful, the widget.parent refer to the RelativeLayout
# added in add_widget(). But it will break if RelativeLayout
# implementation change.
# if we passed the real widget
if widget in self.slides:
slide = widget.parent
self.slides.remove(widget)
return slide.remove_widget(widget, *args, **kwargs)
return super(Carousel, self).remove_widget(widget, *args, **kwargs)
def clear_widgets(self):
for slide in self.slides[:]:
self.remove_widget(slide)
super(Carousel, self).clear_widgets()
if __name__ == '__main__':
from kivy.app import App
class Example1(App):
def build(self):
carousel = Carousel(direction='left',
loop=True)
for i in range(4):
src = "http://placehold.it/480x270.png&text=slide-%d&.png" % i
image = Factory.AsyncImage(source=src, allow_stretch=True)
carousel.add_widget(image)
return carousel
Example1().run()
|
tests/modules/net_ilb/test_plan.py
|
IuryAlves/cloud-foundation-fabric
| 203 |
66406
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
FIXTURES_DIR = os.path.join(os.path.dirname(__file__), 'fixture')
_BACKENDS = '[{balancing_mode="CONNECTION", group="foo", failover=false}]'
def test_defaults(plan_runner):
"Test variable defaults."
_, resources = plan_runner(FIXTURES_DIR, backends=_BACKENDS)
assert len(resources) == 3
resources = dict((r['type'], r['values']) for r in resources)
fwd_rule = resources['google_compute_forwarding_rule']
assert fwd_rule['load_balancing_scheme'] == 'INTERNAL'
assert fwd_rule['all_ports']
assert fwd_rule['allow_global_access'] is None
backend = resources['google_compute_region_backend_service']
assert len(backend['backend']) == 1
assert backend['backend'][0]['group'] == 'foo'
health_check = resources['google_compute_health_check']
for k, v in health_check.items():
if k == 'http_health_check':
assert len(v) == 1
assert v[0]['port_specification'] == 'USE_SERVING_PORT'
elif k.endswith('_health_check'):
assert len(v) == 0
def test_forwarding_rule(plan_runner):
"Test forwarding rule variables."
_, resources = plan_runner(
FIXTURES_DIR, backends=_BACKENDS, global_access='true', ports="[80]")
assert len(resources) == 3
values = [r['values'] for r in resources if r['type']
== 'google_compute_forwarding_rule'][0]
assert not values['all_ports']
assert values['ports'] == ['80']
assert values['allow_global_access']
|
astroquery/casda/tests/test_casda.py
|
gbrammer/astroquery
| 577 |
66412
|
# -*- coding: utf-8 -*
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import requests
import os
from astropy.coordinates import SkyCoord
import astropy.units as u
from astropy.table import Table, Column
from astropy.io.votable import parse
from astroquery import log
from astroquery.casda import Casda
try:
from unittest.mock import Mock, patch, PropertyMock, MagicMock
except ImportError:
pytest.skip("Install mock for the casda tests.", allow_module_level=True)
DATA_FILES = {'CIRCLE': 'cone.xml', 'RANGE': 'box.xml', 'DATALINK': 'datalink.xml', 'RUN_JOB': 'run_job.xml',
'COMPLETED_JOB': 'completed_job.xml', 'DATALINK_NOACCESS': 'datalink_noaccess.xml'}
class MockResponse:
def __init__(self, content):
self.content = content
self.text = content.decode()
def raise_for_status(self):
return
first_job_pass = True
def get_mockreturn(self, method, url, data=None, timeout=10,
files=None, params=None, headers=None, **kwargs):
log.debug("get_mockreturn url:{} params:{} kwargs:{}".format(url, params, kwargs))
if kwargs and 'auth' in kwargs:
auth = kwargs['auth']
if auth and (auth[0] != 'user' or auth[1] != 'password'):
log.debug("Rejecting credentials")
return create_auth_failure_response()
if 'data/async' in str(url):
# Responses for an asynchronous SODA job
if str(url).endswith('data/async'):
self.first_job_pass = True
return create_soda_create_response('111-000-111-000')
elif str(url).endswith('/phase') and method == 'POST':
key = "RUN_JOB"
elif str(url).endswith('111-000-111-000') and method == 'GET':
key = "RUN_JOB" if self.first_job_pass else "COMPLETED_JOB"
self.first_job_pass = False
else:
raise ValueError("Unexpected SODA async {} call to url {}".format(method, url))
elif 'datalink' in str(url):
if 'cube-244' in str(url):
key = 'DATALINK'
else:
key = 'DATALINK_NOACCESS'
else:
key = params['POS'].split()[0] if params['POS'] else None
filename = data_path(DATA_FILES[key])
log.debug('providing ' + filename)
content = open(filename, 'rb').read()
return MockResponse(content)
def create_soda_create_response(jobid):
job_url = 'https://casda.csiro.au/casda_data_access/data/async/' + jobid
create_response_headers = [
['location', job_url]
]
create_response = Mock(spec=requests.Response)
create_response.configure_mock(status_code=303, message='OK', headers=create_response_headers, url=job_url)
return create_response
def create_auth_failure_response():
unauthenticated_headers = [
['WWW-Authenticate', 'Basic realm="ATNF OPAL Login"']
]
create_response = MagicMock(spec=requests.Response)
attrs = {'raise_for_status.side_effect': requests.exceptions.HTTPError()}
create_response.configure_mock(status_code=401, message='OK', headers=unauthenticated_headers, **attrs)
return create_response
@pytest.fixture
def patch_get(request):
try:
mp = request.getfixturevalue("monkeypatch")
except AttributeError: # pytest < 3
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(requests.Session, 'request', get_mockreturn)
return mp
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
def isclose(value1, value2, abs_tol=1e-09):
return abs(value1 - value2) < abs_tol
def test_query_region_text_radius(patch_get):
ra = 333.9092
dec = -45.8418
radius = 0.5
query_payload = Casda.query_region('22h15m38.2s -45d50m30.5s', radius=radius * u.deg, cache=False,
get_query_payload=True)
assert isinstance(query_payload, dict)
assert 'POS' in query_payload
assert query_payload['POS'].startswith('CIRCLE 333')
pos_parts = query_payload['POS'].split(' ')
assert pos_parts[0] == 'CIRCLE'
assert isclose(float(pos_parts[1]), ra, abs_tol=1e-4)
assert isclose(float(pos_parts[2]), dec, abs_tol=1e-4)
assert isclose(float(pos_parts[3]), radius)
assert len(pos_parts) == 4
responses = Casda.query_region('22h15m38.2s -45d50m30.5s', radius=0.5 * u.deg, cache=False)
assert isinstance(responses, Table)
assert len(responses) == 3
def test_query_region_radius(patch_get):
ra = 333.9092
dec = -45.8418
radius = 0.5
centre = SkyCoord(ra, dec, unit=('deg', 'deg'))
query_payload = Casda.query_region(centre, radius=radius * u.deg, cache=False, get_query_payload=True)
assert isinstance(query_payload, dict)
assert 'POS' in query_payload
assert query_payload['POS'].startswith('CIRCLE 333')
pos_parts = query_payload['POS'].split(' ')
assert pos_parts[0] == 'CIRCLE'
assert isclose(float(pos_parts[1]), ra, abs_tol=1e-5)
assert isclose(float(pos_parts[2]), dec, abs_tol=1e-5)
assert isclose(float(pos_parts[3]), radius)
assert len(pos_parts) == 4
responses = Casda.query_region(centre, radius=0.5 * u.deg, cache=False)
assert isinstance(responses, Table)
assert len(responses) == 3
def test_query_region_async_radius(patch_get):
ra = 333.9092
dec = -45.8418
radius = 0.5
centre = SkyCoord(ra, dec, unit=('deg', 'deg'))
query_payload = Casda.query_region_async(centre, radius=radius * u.deg, cache=False, get_query_payload=True)
assert isinstance(query_payload, dict)
assert 'POS' in query_payload
assert query_payload['POS'].startswith('CIRCLE 333')
pos_parts = query_payload['POS'].split(' ')
assert pos_parts[0] == 'CIRCLE'
assert isclose(float(pos_parts[1]), ra, abs_tol=1e-5)
assert isclose(float(pos_parts[2]), dec, abs_tol=1e-5)
assert isclose(float(pos_parts[3]), radius)
assert len(pos_parts) == 4
responses = Casda.query_region_async(centre, radius=0.5 * u.deg, cache=False)
assert isinstance(responses, MockResponse)
def test_query_region_box(patch_get):
ra = 333.9092
dec = -45.8418
width = 0.5
height = 0.2
centre = SkyCoord(ra, dec, unit=('deg', 'deg'))
query_payload = Casda.query_region(centre, width=width * u.deg, height=height * u.deg, cache=False,
get_query_payload=True)
assert isinstance(query_payload, dict)
assert 'POS' in query_payload
assert query_payload['POS'].startswith('RANGE 333')
pos_parts = query_payload['POS'].split(' ')
assert pos_parts[0] == 'RANGE'
assert isclose(float(pos_parts[1]), ra - width / 2, abs_tol=1e-5)
assert isclose(float(pos_parts[2]), ra + width / 2, abs_tol=1e-5)
assert isclose(float(pos_parts[3]), dec - height / 2, abs_tol=1e-5)
assert isclose(float(pos_parts[4]), dec + height / 2, abs_tol=1e-5)
assert len(pos_parts) == 5
responses = Casda.query_region(centre, width=width * u.deg, height=height * u.deg, cache=False)
assert isinstance(responses, Table)
assert len(responses) == 2
def test_query_region_async_box(patch_get):
ra = 333.9092
dec = -45.8418
width = 0.5
height = 0.2
centre = SkyCoord(ra, dec, unit=('deg', 'deg'))
query_payload = Casda.query_region_async(centre, width=width * u.deg, height=height * u.deg, cache=False,
get_query_payload=True)
assert isinstance(query_payload, dict)
assert 'POS' in query_payload
assert query_payload['POS'].startswith('RANGE 333')
pos_parts = query_payload['POS'].split(' ')
assert pos_parts[0] == 'RANGE'
assert isclose(float(pos_parts[1]), ra - width / 2, abs_tol=1e-5)
assert isclose(float(pos_parts[2]), ra + width / 2, abs_tol=1e-5)
assert isclose(float(pos_parts[3]), dec - height / 2, abs_tol=1e-5)
assert isclose(float(pos_parts[4]), dec + height / 2, abs_tol=1e-5)
assert len(pos_parts) == 5
responses = Casda.query_region_async(centre, width=width * u.deg, height=height * u.deg, cache=False)
assert isinstance(responses, MockResponse)
def test_filter_out_unreleased():
all_records = parse(data_path('partial_unreleased.xml'), verify='warn').get_first_table().to_table()
assert all_records[0]['obs_release_date'] == '2017-08-02T03:51:19.728Z'
assert all_records[1]['obs_release_date'] == '2218-01-02T16:51:00.728Z'
assert all_records[2]['obs_release_date'] == ''
assert len(all_records) == 3
# This should filter out the rows with either a future obs_release_date or no obs_release_date
filtered = Casda.filter_out_unreleased(all_records)
assert filtered[0]['obs_release_date'] == '2017-08-02T03:51:19.728Z'
assert filtered[0]['obs_publisher_did'] == 'cube-502'
assert len(filtered) == 1
def test_stage_data_unauthorised(patch_get):
table = Table()
with pytest.raises(ValueError) as excinfo:
Casda.stage_data(table)
assert "Credentials must be supplied" in str(excinfo.value)
def test_stage_data_empty(patch_get):
table = Table()
casda = Casda('user', 'password')
urls = casda.stage_data(table)
assert urls == []
def test_stage_data_invalid_credentials(patch_get):
prefix = 'https://somewhere/casda/datalink/links?'
access_urls = [prefix + 'cube-220']
table = Table([Column(data=access_urls, name='access_url')])
casda = Casda('user', '<PASSWORD>')
with pytest.raises(requests.exceptions.HTTPError) as excinfo:
casda.stage_data(table)
def test_stage_data_no_link(patch_get):
prefix = 'https://somewhere/casda/datalink/links?'
access_urls = [prefix + 'cube-240']
table = Table([Column(data=access_urls, name='access_url')])
casda = Casda('user', 'password')
casda.POLL_INTERVAL = 1
with pytest.raises(ValueError) as excinfo:
casda.stage_data(table)
assert "You do not have access to any of the requested data files." in str(excinfo.value)
def test_stage_data(patch_get):
prefix = 'https://somewhere/casda/datalink/links?'
access_urls = [prefix + 'cube-244']
table = Table([Column(data=access_urls, name='access_url')])
casda = Casda('user', 'password')
casda.POLL_INTERVAL = 1
urls = casda.stage_data(table, verbose=True)
assert urls == ['http://casda.csiro.au/download/web/111-000-111-000/askap_img.fits.checksum',
'http://casda.csiro.au/download/web/111-000-111-000/askap_img.fits']
|
homeassistant/components/notify_events/const.py
|
tbarbette/core
| 30,023 |
66417
|
<filename>homeassistant/components/notify_events/const.py
"""Const for notify_events."""
DOMAIN = "notify_events"
|
utils/mean_values.py
|
SivagamiNambi/Pytorch3d
| 162 |
66442
|
# Source: https://github.com/kenshohara/3D-ResNets-PyTorch/blob/master/mean.py
def get_mean(norm_value=255, dataset='activitynet'):
# Below values are in RGB order
assert dataset in ['activitynet', 'kinetics', 'ucf101']
if dataset == 'activitynet':
return [114.7748/norm_value, 107.7354/norm_value, 99.4750/norm_value]
elif dataset == 'kinetics':
# Kinetics (10 videos for each class)
return [110.63666788/norm_value, 103.16065604/norm_value, 96.29023126/norm_value]
elif dataset == 'ucf101':
return [101.00131/norm_value, 97.3644226/norm_value, 89.42114168/norm_value]
def get_std(norm_value=255):
# Kinetics (10 videos for each class)
return [38.7568578/norm_value, 37.88248729/norm_value, 40.02898126/norm_value]
|
docs/introduction/codeexamples/googleProxy.py
|
tristanlatr/klein
| 643 |
66447
|
<reponame>tristanlatr/klein<gh_stars>100-1000
import treq
from klein import Klein
app = Klein()
@app.route("/", branch=True)
def google(request):
d = treq.get("https://www.google.com" + request.uri)
d.addCallback(treq.content)
return d
app.run("localhost", 8080)
|
pywick/datasets/tnt/transformdataset.py
|
achaiah/pywick
| 408 |
66450
|
<reponame>achaiah/pywick
from .dataset import Dataset
class TransformDataset(Dataset):
"""
Dataset which transforms a given dataset with a given function.
Given a function `transform`, and a `dataset`, `TransformDataset` applies
the function in an on-the-fly manner when querying a sample with
`__getitem__(idx)` and therefore returning `transform[dataset[idx]]`.
`transform` can also be a dict with functions as values. In this case, it
is assumed that `dataset[idx]` is a dict which has all the keys in
`transform`. Then, `transform[key]` is applied to dataset[idx][key] for
each key in `transform`
The size of the new dataset is equal to the size of the underlying
`dataset`.
Purpose: when performing pre-processing operations, it is convenient to be
able to perform on-the-fly transformations to a dataset.
Args:
dataset (Dataset): Dataset which has to be transformed.
transforms (function/dict): Function or dict with function as values.
These functions will be applied to data.
"""
def __init__(self, dataset, transforms):
super(TransformDataset, self).__init__()
if not (isinstance(transforms, dict) or callable(transforms)):
raise AssertionError('expected a dict of transforms or a function')
if isinstance(transforms, dict):
for k, v in transforms.items():
if not callable(v):
raise AssertionError(str(k) + ' is not a function')
self.dataset = dataset
self.transforms = transforms
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
super(TransformDataset, self).__getitem__(idx)
z = self.dataset[idx]
if isinstance(self.transforms, dict):
for k, transform in self.transforms.items():
z[k] = transform(z[k])
else:
z = self.transforms(z)
return z
|
mantraml/models/__init__.py
|
cclauss/mantra
| 330 |
66451
|
from .MantraModel import MantraModel
|
tst/regression/scripts/utils/log_pipe.py
|
lucyundead/athena--fork
| 174 |
66463
|
"""Provides LogPipe class to pipe output from subprocess to a log.
Adapted from https://codereview.stackexchange.com/questions/6567"""
import logging
import threading
import os
class LogPipe(threading.Thread):
def __init__(self, logger, level):
"""Setup the object with a logger and a loglevel and start the thread"""
super(LogPipe, self).__init__()
# threading.Thread.__init__(self)
self.logger = logging.getLogger(logger)
self.daemon = False
self.level = level
self.fdRead, self.fdWrite = os.pipe()
self.pipeReader = os.fdopen(self.fdRead)
self.start()
def fileno(self):
"""Return the write file descriptor of the pipe"""
return self.fdWrite
def run(self):
"""Run the thread, logging everything."""
for line in iter(self.pipeReader.readline, ''):
self.logger.log(self.level, line.strip('\n'))
self.pipeReader.close()
def close(self):
"""Close the write end of the pipe."""
os.close(self.fdWrite)
|
modelci/types/models/pattern.py
|
Lionjump0723/ML-Model-CI
| 170 |
66482
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Author: USER
Email: <EMAIL>
Date: 2/24/2021
Basic patterns for creation of required components.
"""
import ast
import inspect
from enum import Enum
from typing import Type, Any, Union
from fastapi import Form
from fastapi.exceptions import RequestValidationError
from pydantic import BaseModel, ValidationError
from pydantic.fields import ModelField, FieldInfo
from modelci.utils.misc import isgeneric
def _make_form_parameter(field_info: FieldInfo) -> Any:
"""
Converts a field from a `Pydantic` model to the appropriate `FastAPI`
parameter type.
Args:
field_info (FieldInfo): The field information to convert.
Returns:
A form.
"""
return Form(
default=field_info.default,
alias=field_info.alias,
title=field_info.title,
description=field_info.description,
gt=field_info.gt,
lt=field_info.lt,
le=field_info.le,
min_length=field_info.min_length,
max_length=field_info.max_length,
regex=field_info.regex,
**field_info.extra,
)
def _make_form_enum(enum_cls: Type[Enum]):
"""
Modify an :class:`Enum` class that uses int value to accept string value member.
Args:
enum_cls (Type[Enum]): An enum class.
Returns:
Type[Enum]: the modified enum class.
"""
def _missing_(cls, value):
for member in cls:
if str(member.value) == value:
# save to value -> member mapper
cls._value2member_map_[value] = member
return member
return missing_old(value)
if hasattr(enum_cls, '__form__') or all(isinstance(e.value, str) for e in enum_cls):
return
missing_old = getattr(enum_cls, '_missing_')
setattr(enum_cls, '_missing_', classmethod(_missing_))
setattr(enum_cls, '__form__', True)
return enum_cls
def make_annotation(field: ModelField):
"""
Convert a field annotation type to form data accepted type.
The method convert structural field such as `BaseModel` and `Dict` to a str. Such as the model's value is
supplied as a serialized JSON string format. Such string will be converted back to a dictionary, and used
for initialize previous field.
"""
field_outer_type = field.outer_type_
is_literal = False
# check outer type
if isgeneric(field_outer_type):
# outer type is a generic class
if field_outer_type.__origin__ is Union:
# only Union is valid generic class
inner_types = field_outer_type.__args__
else:
return str, True
else:
inner_types = (field_outer_type,)
field_outer_type = None
# check inner types
inner_types_new = list()
for inner_type in inner_types:
if inner_type in (str, int, float, ..., Any):
# inner type of `str`, `int` and `float` will be natively used as form data value
inner_types_new.append(inner_type)
elif issubclass(inner_type, Enum):
inner_types_new.append(_make_form_enum(inner_type))
else:
# other types will be converted to string literal
is_literal = True
inner_types_new.append(str)
if field_outer_type is None:
field_outer_type = inner_types_new[0]
else:
# set new generic type args
field_outer_type = field_outer_type.__origin__[tuple(inner_types_new)]
return field_outer_type, is_literal
def as_form(cls: Type[BaseModel]) -> Type[BaseModel]:
"""
Adds an `as_form` class method to decorated models. The `as_form` class
method can be used with `FastAPI` endpoints.
TODO: auto generate OpenAPI example
Args:
cls: The model class to decorate.
Returns:
The decorated class.
References:
* https://github.com/tiangolo/fastapi/issues/2387#issuecomment-731662551
"""
literal_fields = set()
new_params = list()
for field in cls.__fields__.values():
annotation, is_literal = make_annotation(field)
if is_literal:
literal_fields.add(field.alias)
new_params.append(
inspect.Parameter(
field.alias,
inspect.Parameter.POSITIONAL_ONLY,
default=_make_form_parameter(field.field_info),
annotation=annotation,
)
)
async def _as_form(**data):
"""
Create the model as a form data.
"""
# parse literal back to dictionary
for field_alias in literal_fields:
value = data.pop(field_alias, None)
data[field_alias] = ast.literal_eval(str(value))
try:
cls.parse_obj(data)
return cls(**data)
except ValidationError as exc:
raise RequestValidationError(exc.raw_errors)
sig = inspect.signature(_as_form)
sig = sig.replace(parameters=new_params)
_as_form.__signature__ = sig
setattr(cls, "as_form", _as_form)
return cls
|
chapter-6/holdings/holdings/clients.py
|
wallacei/microservices-in-action-copy
| 115 |
66568
|
import logging
import requests
from tenacity import before_log, retry, stop_after_attempt
class MarketDataClient(object):
logger = logging.getLogger(__name__)
base_url = 'http://market-data:8000'
def _make_request(self, url):
response = requests.get(
f"{self.base_url}/{url}", headers={'content-type': 'application/json'})
return response.json()
@retry(stop=stop_after_attempt(3),
before=before_log(logger, logging.DEBUG))
def all_prices(self):
return self._make_request("prices")
def price(self, code):
return self._make_request(f"prices/{code}")
|
holoviews/tests/plotting/bokeh/test_geomplot.py
|
TheoMathurin/holoviews
| 864 |
66576
|
from unittest import SkipTest
from holoviews.core import NdOverlay
from holoviews.core.util import pd
from holoviews.element import Segments
from .test_plot import TestBokehPlot, bokeh_renderer
try:
from bokeh.models import FactorRange
except:
pass
class TestSegmentPlot(TestBokehPlot):
def test_segments_color_selection_nonselection(self):
opts = dict(color='green', selection_color='red', nonselection_color='blue')
segments = Segments([(i, i*2, i*3, i*4, i*5, chr(65+i)) for i in range(10)],
vdims=['a', 'b']).opts(**opts)
plot = bokeh_renderer.get_plot(segments)
glyph_renderer = plot.handles['glyph_renderer']
self.assertEqual(glyph_renderer.glyph.line_color, 'green')
self.assertEqual(glyph_renderer.selection_glyph.line_color, 'red')
self.assertEqual(glyph_renderer.nonselection_glyph.line_color, 'blue')
def test_segments_alpha_selection_nonselection(self):
opts = dict(alpha=0.8, selection_alpha=1.0, nonselection_alpha=0.2)
segments = Segments([(i, i*2, i*3, i*4, i*5, chr(65+i)) for i in range(10)],
vdims=['a', 'b']).opts(**opts)
plot = bokeh_renderer.get_plot(segments)
glyph_renderer = plot.handles['glyph_renderer']
self.assertEqual(glyph_renderer.glyph.line_alpha, 0.8)
self.assertEqual(glyph_renderer.selection_glyph.line_alpha, 1)
self.assertEqual(glyph_renderer.nonselection_glyph.line_alpha, 0.2)
def test_segments_overlay_hover(self):
obj = NdOverlay({
i: Segments((range(31), range(31),range(1, 32), range(31)))
for i in range(5)
}, kdims=['Test']).opts({'Segments': {'tools': ['hover']}})
tooltips = [
('Test', '@{Test}'),
('x0', '@{x0}'),
('y0', '@{y0}'),
('x1', '@{x1}'),
('y1', '@{y1}')
]
self._test_hover_info(obj, tooltips)
def test_segments_overlay_datetime_hover(self):
if pd is None:
raise SkipTest("Test requires pandas")
obj = NdOverlay({
i: Segments((
list(pd.date_range('2016-01-01', '2016-01-31')),
range(31),
pd.date_range('2016-01-02', '2016-02-01'),
range(31)
))
for i in range(5)
}, kdims=['Test']).opts({'Segments': {'tools': ['hover']}})
tooltips = [
('Test', '@{Test}'),
('x0', '@{x0}{%F %T}'),
('y0', '@{y0}'),
('x1', '@{x1}{%F %T}'),
('y1', '@{y1}')
]
formatters = {'@{x0}': "datetime", '@{x1}': "datetime"}
self._test_hover_info(obj, tooltips, formatters=formatters)
def test_segments_categorical_xaxis(self):
segments = Segments((['A', 'B', 'C'], [1, 2, 3], ['A', 'B', 'C'], [4, 5, 6]))
plot = bokeh_renderer.get_plot(segments)
x_range = plot.handles['x_range']
self.assertIsInstance(x_range, FactorRange)
self.assertEqual(x_range.factors, ['A', 'B', 'C'])
def test_segments_categorical_yaxis(self):
segments = Segments(([1, 2, 3], ['A', 'B', 'C'], [4, 5, 6], ['A', 'B', 'C']))
plot = bokeh_renderer.get_plot(segments)
y_range = plot.handles['y_range']
self.assertIsInstance(y_range, FactorRange)
self.assertEqual(y_range.factors, ['A', 'B', 'C'])
def test_segments_categorical_yaxis_invert_axes(self):
segments = Segments(([1, 2, 3], ['A', 'B', 'C'], [4, 5, 6], ['A', 'B', 'C']))
plot = bokeh_renderer.get_plot(segments)
y_range = plot.handles['y_range']
self.assertIsInstance(y_range, FactorRange)
self.assertEqual(y_range.factors, ['A', 'B', 'C'])
def test_segments_overlay_categorical_yaxis(self):
segments = Segments(([1, 2, 3], ['A', 'B', 'C'], [4, 5, 6], ['A', 'B', 'C']))
segments2 = Segments(([1, 2, 3], ['B', 'C', 'D'], [4, 5, 6], ['B', 'C', 'D']))
plot = bokeh_renderer.get_plot(segments*segments2)
y_range = plot.handles['y_range']
self.assertIsInstance(y_range, FactorRange)
self.assertEqual(y_range.factors, ['A', 'B', 'C', 'D'])
def test_segments_overlay_categorical_yaxis_invert_axis(self):
segments = Segments(([1, 2, 3], ['A', 'B', 'C'], [4, 5, 6], ['A', 'B', 'C'])).opts(invert_yaxis=True)
segments2 = Segments(([1, 2, 3], ['B', 'C', 'D'], [4, 5, 6], ['B', 'C', 'D']))
plot = bokeh_renderer.get_plot(segments*segments2)
y_range = plot.handles['y_range']
self.assertIsInstance(y_range, FactorRange)
self.assertEqual(y_range.factors, ['A', 'B', 'C', 'D'][::-1])
def test_segments_overlay_categorical_yaxis_invert_axes(self):
segments = Segments(([1, 2, 3], ['A', 'B', 'C'], [4, 5, 6], ['A', 'B', 'C'])).opts(invert_axes=True)
segments2 = Segments(([1, 2, 3], ['B', 'C', 'D'], [4, 5, 6], ['B', 'C', 'D']))
plot = bokeh_renderer.get_plot(segments*segments2)
x_range = plot.handles['x_range']
self.assertIsInstance(x_range, FactorRange)
self.assertEqual(x_range.factors, ['A', 'B', 'C', 'D'])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.