max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
LAMA/megatron_11b/models.py
|
leeyy2020/P-tuning
| 411 |
55971
|
from fairseq.models.transformer_lm import *
from torch.nn import CrossEntropyLoss
from typing import Any, Dict, List, Optional, Tuple
from torch import Tensor
class TransformerLanguageModelWrapper(TransformerLanguageModel):
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_lm_architecture(args)
if args.decoder_layers_to_keep:
args.decoder_layers = len(args.decoder_layers_to_keep.split(","))
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = getattr(
args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS
)
if args.character_embeddings:
embed_tokens = CharacterTokenEmbedder(
task.source_dictionary,
eval(args.character_filters),
args.character_embedding_dim,
args.decoder_embed_dim,
args.char_embedder_highway_layers,
)
elif args.adaptive_input:
embed_tokens = AdaptiveInput(
len(task.source_dictionary),
task.source_dictionary.pad(),
args.decoder_input_dim,
args.adaptive_input_factor,
args.decoder_embed_dim,
options.eval_str_list(args.adaptive_input_cutoff, type=int),
args.quant_noise_pq,
args.quant_noise_pq_block_size,
)
else:
embed_tokens = cls.build_embedding(
args, task.source_dictionary, args.decoder_input_dim
)
if args.tie_adaptive_weights:
assert args.adaptive_input
assert args.adaptive_input_factor == args.adaptive_softmax_factor
assert (
args.adaptive_softmax_cutoff == args.adaptive_input_cutoff
), "{} != {}".format(
args.adaptive_softmax_cutoff, args.adaptive_input_cutoff
)
assert args.decoder_input_dim == args.decoder_output_dim
decoder = TransformerDecoderWrapper(
args, task.target_dictionary, embed_tokens, no_encoder_attn=True
)
return cls(decoder)
class TransformerDecoderWrapper(TransformerDecoder):
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super(TransformerDecoderWrapper, self).__init__(args, dictionary, embed_tokens, no_encoder_attn)
self.use_parallel = False
def predict(self, prev_output_tokens, inputs_embeds, attention_mask, labels,
encoder_out=None, incremental_state=None, full_context_alignment=False,
alignment_layer=None, alignment_heads=None):
prev_output_tokens = prev_output_tokens.to("cuda:0")
inputs_embeds = inputs_embeds.to("cuda:0")
# embed positions
positions = (
self.embed_positions(
prev_output_tokens, incremental_state=None
)
if self.embed_positions is not None
else None
)
prev_output_tokens = prev_output_tokens.to("cuda:0")
x = self.embed_scale * inputs_embeds
if self.quant_noise is not None:
x = self.quant_noise(x)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions.to("cuda:0")
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
x = x.transpose(0, 1)
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
# decoder layers
attn: Optional[Tensor] = None
#inner_states: List[Optional[Tensor]] = [x]
for idx, layer in enumerate(self.layers):
if incremental_state is None and not full_context_alignment:
self_attn_mask = self.buffered_future_mask(x)
else:
self_attn_mask = None
x, layer_attn, _ = layer(
x,
encoder_out.encoder_out if encoder_out is not None else None,
encoder_out.encoder_padding_mask if encoder_out is not None else None,
incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
need_attn=bool((idx == alignment_layer)),
need_head_weights=bool((idx == alignment_layer)),
)
#inner_states.append(x)
if layer_attn is not None and idx == alignment_layer:
attn = layer_attn.float().to(x)
if attn is not None:
if alignment_heads is not None:
attn = attn[:alignment_heads]
# average probabilities over heads
attn = attn.mean(dim=0)
x = x.to("cuda:0")
if self.layer_norm is not None:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
lm_logits = self.output_layer(x)
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
if labels is not None:
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
# return loss, lm_logits
return lm_logits, loss
else:
return lm_logits, None
|
tests/offline/test_logix_driver.py
|
amrhady2/ABB_Pycomm
| 185 |
55975
|
"""Tests for the logix_driver.py file.
The Logix Driver is beholden to the CIPDriver interface. Only tests
which bind it to that interface should be allowed here. Tests binding
to another interface such as Socket are an anti-pattern.
There are quite a few methods in the LogixDriver which are difficult to
read or test due to both code clarity issues and it being inconvenient.
Also the vast majority of methods are private, I think that private
methods should not be tested directly, but rather, their effects on
public methods should be tested.
pytest --cov=pycomm3 --cov-branch tests/offline/
----------- coverage: platform linux, python 3.8.1-final-0 -----------
Name Stmts Miss Branch BrPart Cover
----------------------------------------------------------------
pycomm3/logix_driver.py 798 718 346 0 7%
We're currently at 7% test coverage, I would like to increase that to >=50%
and then continue to do so for the rest of the modules.
"""
from unittest import mock
import pytest
from pycomm3.cip_driver import CIPDriver
from pycomm3.const import MICRO800_PREFIX, SUCCESS
from pycomm3.exceptions import CommError, PycommError, RequestError
from pycomm3.logix_driver import LogixDriver, encode_value
from pycomm3.packets import RequestPacket, ResponsePacket
from pycomm3.socket_ import Socket
from pycomm3.tag import Tag
from pycomm3.custom_types import ModuleIdentityObject
CONNECT_PATH = '192.168.1.100/1'
IDENTITY_CLX_V20 = {'vendor': 'Rockwell Automation/Allen-Bradley',
'product_type': 'Programmable Logic Controller', 'product_code': 0,
'revision': {'major': 20, 'minor': 0},
'status': b'00', 'serial': '00000000',
'product_name': '1756-L55'}
IDENTITY_CLX_V21 = {'vendor': 'Rockwell Automation/Allen-Bradley',
'product_type': 'Programmable Logic Controller', 'product_code': 0,
'revision': {'major': 21, 'minor': 0},
'status': b'00', 'serial': '00000000',
'product_name': '1756-L62'}
IDENTITY_CLX_V32 = {'vendor': 'Rockwell Automation/Allen-Bradley',
'product_type': 'Programmable Logic Controller', 'product_code': 0,
'revision': {'major': 32, 'minor': 0},
'status': b'00', 'serial': '00000000',
'product_name': '1756-L85'}
IDENTITY_M8000 = {'encap_protocol_version': 1,
'ip_address': '192.168.1.124',
'product_code': 259,
'product_name': '2080-LC50-48QWBS',
'product_type': 'Programmable Logic Controller',
'revision': {'major': 12, 'minor': 11},
'serial': '12345678',
'state': 2,
'status': b'4\x00',
'vendor': 'Rockwell Automation/Allen-Bradley'}
def test_open_call_init_driver_open():
"""
This test is to make sure that the initialize driver method is called during
the `open()` method of the driver.
"""
with mock.patch.object(CIPDriver, 'open') as mock_open, \
mock.patch.object(LogixDriver, '_initialize_driver') as mock_init:
driver = LogixDriver(CONNECT_PATH)
driver.open()
assert mock_open.called
assert mock_init.called
def test_open_call_init_driver_with():
"""
This test is to make sure that the initialize driver method is called during
the `open()` method of the driver.
"""
with mock.patch.object(CIPDriver, 'open') as mock_open, \
mock.patch.object(LogixDriver, '_initialize_driver') as mock_init:
with LogixDriver(CONNECT_PATH):
...
assert mock_open.called
assert mock_init.called
@pytest.mark.parametrize('identity', [IDENTITY_CLX_V20, IDENTITY_CLX_V21, IDENTITY_CLX_V32])
def test_logix_init_for_version_support_instance_ids_large_connection(identity):
with mock.patch.object(LogixDriver, '_list_identity') as mock_identity, \
mock.patch.object(LogixDriver, 'get_plc_info') as mock_get_info, \
mock.patch.object(LogixDriver, 'get_plc_name') as mock_get_name:
mock_identity.return_value = identity
mock_get_info.return_value = identity # this is the ListIdentity response
# not the same as module idenity, but
# has all the fields needed for the test
plc = LogixDriver(CONNECT_PATH)
plc._initialize_driver(False, False)
assert plc._micro800 is False
assert plc._cfg['use_instance_ids'] == (identity['revision']['major'] >= 21)
assert mock_get_info.called
assert mock_get_name.called
@pytest.mark.parametrize('identity', [IDENTITY_M8000, ])
def test_logix_init_micro800(identity):
with mock.patch.object(LogixDriver, '_list_identity') as mock_identity, \
mock.patch.object(LogixDriver, 'get_plc_info') as mock_get_info, \
mock.patch.object(LogixDriver, 'get_plc_name') as mock_get_name:
mock_identity.return_value = identity
mock_get_info.return_value = identity
plc = LogixDriver(CONNECT_PATH)
plc._initialize_driver(False, False)
assert plc._micro800 is True
assert plc._cfg['use_instance_ids'] is False
assert mock_get_info.called
assert not mock_get_name.called
assert not plc._cfg['cip_path']
@pytest.mark.parametrize('identity', [IDENTITY_CLX_V20, IDENTITY_CLX_V21, IDENTITY_CLX_V32, IDENTITY_M8000])
def test_logix_init_calls_get_tag_list_if_init_tags(identity):
with mock.patch.object(LogixDriver, '_list_identity') as mock_identity, \
mock.patch.object(LogixDriver, 'get_plc_info') as mock_get_info, \
mock.patch.object(LogixDriver, 'get_plc_name'), \
mock.patch.object(CIPDriver, 'open'), \
mock.patch.object(LogixDriver, 'get_tag_list') as mock_tag:
mock_identity.return_value = identity
mock_get_info.return_value = identity
driver = LogixDriver(CONNECT_PATH, init_info=False, init_tags=True)
driver._target_is_connected = True
driver.open()
assert mock_tag.called
def test_logix_context_manager_calls_open_and_close():
with mock.patch.object(LogixDriver, 'open') as mock_open, \
mock.patch.object(LogixDriver, 'close') as mock_close:
with LogixDriver(CONNECT_PATH, init_info=False, init_tags=False):
pass
assert mock_open.called
assert mock_close.called
def test__exit__returns_false_on_commerror():
ld = LogixDriver(CONNECT_PATH, init_info=False, init_tags=False)
assert ld.__exit__(None, None, None) is True # Exit with no exception
def test__exit__returns_true_on_no_error_and_no_exc_type():
with mock.patch.object(LogixDriver, 'close'):
ld = LogixDriver(CONNECT_PATH, init_info=False, init_tags=False)
assert ld.__exit__(None, None, None) is True
def test__exit__returns_false_on_no_error_and_exc_type():
with mock.patch.object(LogixDriver, 'close'):
ld = LogixDriver(CONNECT_PATH, init_info=False, init_tags=False)
assert ld.__exit__('Some Exc Type', None, None) is False
def test__repr___ret_str():
ld = LogixDriver(CONNECT_PATH, init_info=False, init_tags=False)
_repr = repr(ld)
assert repr
assert isinstance(_repr, str)
def test_default_logix_tags_are_empty_dict():
"""Show that LogixDriver tags are an empty dict on init."""
ld = LogixDriver(CONNECT_PATH, init_info=False, init_tags=False)
assert ld.tags == dict()
def test_logix_connected_false_on_init_with_false_init_params():
ld = LogixDriver(CONNECT_PATH, init_info=False, init_tags=False)
assert ld.connected is False
def test_clx_get_plc_time_sends_packet():
with mock.patch.object(LogixDriver, 'send') as mock_send, \
mock.patch('pycomm3.cip_driver.with_forward_open'):
ld = LogixDriver(CONNECT_PATH, init_info=False, init_tags=False)
ld.get_plc_time()
assert mock_send.called
def test_clx_set_plc_time_sends_packet():
with mock.patch.object(LogixDriver, 'send') as mock_send, \
mock.patch('pycomm3.cip_driver.with_forward_open'):
ld = LogixDriver(CONNECT_PATH, init_info=False, init_tags=False)
ld.set_plc_time()
assert mock_send.called
# TODO: all of the tag list associated tests
@pytest.mark.skip(reason="""tag parsing is extremely complex, and it's \
nearly impossible to test this without also reverse-engineering it""")
def test__get_tag_list_returns_expected_user_tags():
EXPECTED_USER_TAGS = [{
'tag_type': 'struct', # bit 15 is a 1
'instance_id': 1,
'tag_name': b"\x00\x01",
'symbol_type': "",
'symbol_address': "",
'symbol_object_address': "",
'software_control': "",
'external_access': "",
'dimensions': ["", "", ""]
}]
TEST_RESPONSE = ResponsePacket()
# 0 -> 4 are the 'instance', dint
# 4 -> 6 is the 'tag_length', uint, used internally
# 8 -> 'tag_length' is 'tag_name'
# 8+tag_length -> 10+tag_length is 'symbol_type' uint
# 10+tag_length -> 14+tag_length is 'symbol_address' udint
# 14+tag_length -> 18+tag_length is 'symbol_object_address' udint
# 18+tag_length -> 22+tag_length is 'software_control' udint
# 'dim1', 'dim2' and 'dim3' are the next 12 bytes, udint
TEST_RESPONSE.data = \
b"\x00\x00\x00\x01" + \
b"\x00\x01" + \
b"\x00\x01" + \
b"\x00\x00\x00\x00\x00\x10"
TEST_RESPONSE.command = "Something"
TEST_RESPONSE.command_status = SUCCESS
ld = LogixDriver(CONNECT_PATH, init_info=False, init_tags=False)
with mock.patch.object(RequestPacket, 'send') as mock_send, \
mock.patch.object(CIPDriver, '_forward_open'), \
mock.patch.object(LogixDriver, '_parse_instance_attribute_list'):
mock_send.return_value = TEST_RESPONSE
actual_tags = ld.get_tag_list()
assert EXPECTED_USER_TAGS == actual_tags
|
dynaconf/vendor/ruamel/yaml/scalarint.py
|
RonnyPfannschmidt/dynaconf
| 2,293 |
55984
|
from __future__ import print_function,absolute_import,division,unicode_literals
_B=False
_A=None
from .compat import no_limit_int
from .anchor import Anchor
if _B:from typing import Text,Any,Dict,List
__all__=['ScalarInt','BinaryInt','OctalInt','HexInt','HexCapsInt','DecimalInt']
class ScalarInt(no_limit_int):
def __new__(D,*E,**A):
F=A.pop('width',_A);G=A.pop('underscore',_A);C=A.pop('anchor',_A);B=no_limit_int.__new__(D,*E,**A);B._width=F;B._underscore=G
if C is not _A:B.yaml_set_anchor(C,always_dump=True)
return B
def __iadd__(A,a):B=type(A)(A+a);B._width=A._width;B._underscore=A._underscore[:]if A._underscore is not _A else _A;return B
def __ifloordiv__(A,a):B=type(A)(A//a);B._width=A._width;B._underscore=A._underscore[:]if A._underscore is not _A else _A;return B
def __imul__(A,a):B=type(A)(A*a);B._width=A._width;B._underscore=A._underscore[:]if A._underscore is not _A else _A;return B
def __ipow__(A,a):B=type(A)(A**a);B._width=A._width;B._underscore=A._underscore[:]if A._underscore is not _A else _A;return B
def __isub__(A,a):B=type(A)(A-a);B._width=A._width;B._underscore=A._underscore[:]if A._underscore is not _A else _A;return B
@property
def anchor(self):
A=self
if not hasattr(A,Anchor.attrib):setattr(A,Anchor.attrib,Anchor())
return getattr(A,Anchor.attrib)
def yaml_anchor(A,any=_B):
if not hasattr(A,Anchor.attrib):return _A
if any or A.anchor.always_dump:return A.anchor
return _A
def yaml_set_anchor(A,value,always_dump=_B):A.anchor.value=value;A.anchor.always_dump=always_dump
class BinaryInt(ScalarInt):
def __new__(A,value,width=_A,underscore=_A,anchor=_A):return ScalarInt.__new__(A,value,width=width,underscore=underscore,anchor=anchor)
class OctalInt(ScalarInt):
def __new__(A,value,width=_A,underscore=_A,anchor=_A):return ScalarInt.__new__(A,value,width=width,underscore=underscore,anchor=anchor)
class HexInt(ScalarInt):
def __new__(A,value,width=_A,underscore=_A,anchor=_A):return ScalarInt.__new__(A,value,width=width,underscore=underscore,anchor=anchor)
class HexCapsInt(ScalarInt):
def __new__(A,value,width=_A,underscore=_A,anchor=_A):return ScalarInt.__new__(A,value,width=width,underscore=underscore,anchor=anchor)
class DecimalInt(ScalarInt):
def __new__(A,value,width=_A,underscore=_A,anchor=_A):return ScalarInt.__new__(A,value,width=width,underscore=underscore,anchor=anchor)
|
green/suite.py
|
jwaschkau/green
| 686 |
55991
|
from __future__ import unicode_literals
from __future__ import print_function
from fnmatch import fnmatch
import sys
from unittest.suite import _call_if_exists, _DebugResult, _isnotsuite, TestSuite
from unittest import util
import unittest
from io import StringIO
from green.config import default_args
from green.output import GreenStream
from green.result import ProtoTest
class GreenTestSuite(TestSuite):
"""
This version of a test suite has two important functions:
1) It brings Python 3.x-like features to Python 2.7
2) It adds Green-specific features (see customize())
"""
args = None
def __init__(self, tests=(), args=None):
# You should either set GreenTestSuite.args before instantiation, or
# pass args into __init__
self._removed_tests = 0
self.allow_stdout = default_args.allow_stdout
self.full_test_pattern = "test" + default_args.test_pattern
self.customize(args)
super(GreenTestSuite, self).__init__(tests)
def addTest(self, test):
"""
Override default behavior with some green-specific behavior.
"""
if (
self.full_test_pattern
# test can actually be suites and things. Only tests have
# _testMethodName
and getattr(test, "_testMethodName", False)
# Fake test cases (generated for module import failures, for example)
# do not start with 'test'. We still want to see those fake cases.
and test._testMethodName.startswith("test")
):
if not fnmatch(test._testMethodName, self.full_test_pattern):
return
super(GreenTestSuite, self).addTest(test)
def customize(self, args):
"""
Green-specific behavior customization via an args dictionary from
the green.config module. If you don't pass in an args dictionary,
then this class acts like TestSuite from Python 3.x.
"""
# Set a new args on the CLASS
if args:
self.args = args
# Use the class args
if self.args and getattr(self.args, "allow_stdout", None):
self.allow_stdout = self.args.allow_stdout
if self.args and getattr(self.args, "test_pattern", None):
self.full_test_pattern = "test" + self.args.test_pattern
def _removeTestAtIndex(self, index):
"""
Python 3.x-like version of this function for Python 2.7's sake.
"""
test = self._tests[index]
if hasattr(test, "countTestCases"):
self._removed_tests += test.countTestCases()
self._tests[index] = None
def countTestCases(self):
"""
Python 3.x-like version of this function for Python 2.7's sake.
"""
cases = self._removed_tests
for test in self:
if test:
cases += test.countTestCases()
return cases
def _handleClassSetUpPre38(self, test, result): # pragma: nocover
previousClass = getattr(result, "_previousTestClass", None)
currentClass = test.__class__
if currentClass == previousClass:
return
if result._moduleSetUpFailed:
return
if getattr(currentClass, "__unittest_skip__", False): # pragma: no cover
return
try:
currentClass._classSetupFailed = False
except TypeError: # pragma: no cover
# test may actually be a function
# so its class will be a builtin-type
pass
setUpClass = getattr(currentClass, "setUpClass", None)
if setUpClass is not None:
_call_if_exists(result, "_setupStdout")
try:
setUpClass()
# Upstream Python forgets to take SkipTest into account
except unittest.case.SkipTest as e:
currentClass.__unittest_skip__ = True
currentClass.__unittest_skip_why__ = str(e)
# -- END of fix
except Exception as e: # pragma: no cover
if isinstance(result, _DebugResult):
raise
currentClass._classSetupFailed = True
className = util.strclass(currentClass)
errorName = "setUpClass (%s)" % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, "_restoreStdout")
def _handleClassSetUpPost38(
self, test, result
): # pragma: no cover -- because it's just like *Pre38
previousClass = getattr(result, "_previousTestClass", None)
currentClass = test.__class__
if currentClass == previousClass:
return
if result._moduleSetUpFailed:
return
if getattr(currentClass, "__unittest_skip__", False):
return
try:
currentClass._classSetupFailed = False
except TypeError:
# test may actually be a function
# so its class will be a builtin-type
pass
setUpClass = getattr(currentClass, "setUpClass", None)
if setUpClass is not None:
_call_if_exists(result, "_setupStdout")
try:
setUpClass()
# Upstream Python forgets to take SkipTest into account
except unittest.case.SkipTest as e:
currentClass.__unittest_skip__ = True
currentClass.__unittest_skip_why__ = str(e)
# -- END of fix
except Exception as e:
if isinstance(result, _DebugResult):
raise
currentClass._classSetupFailed = True
className = util.strclass(currentClass)
self._createClassOrModuleLevelException(
result, e, "setUpClass", className
)
finally:
_call_if_exists(result, "_restoreStdout")
if currentClass._classSetupFailed is True:
currentClass.doClassCleanups()
if len(currentClass.tearDown_exceptions) > 0:
for exc in currentClass.tearDown_exceptions:
self._createClassOrModuleLevelException(
result, exc[1], "setUpClass", className, info=exc
)
if sys.version_info < (3, 8): # pragma: no cover
_handleClassSetUp = _handleClassSetUpPre38
else:
_handleClassSetUp = _handleClassSetUpPost38
def run(self, result):
"""
Emulate unittest's behavior, with Green-specific changes.
"""
topLevel = False
if getattr(result, "_testRunEntered", False) is False:
result._testRunEntered = topLevel = True
for index, test in enumerate(self):
if result.shouldStop:
break
if _isnotsuite(test):
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if getattr(test.__class__, "_classSetupFailed", False) or getattr(
result, "_moduleSetUpFailed", False
):
continue
if not self.allow_stdout:
captured_stdout = StringIO()
captured_stderr = StringIO()
saved_stdout = sys.stdout
saved_stderr = sys.stderr
sys.stdout = GreenStream(captured_stdout)
sys.stderr = GreenStream(captured_stderr)
test(result)
if _isnotsuite(test):
if not self.allow_stdout:
sys.stdout = saved_stdout
sys.stderr = saved_stderr
result.recordStdout(test, captured_stdout.getvalue())
result.recordStderr(test, captured_stderr.getvalue())
# Since we're intercepting the stdout/stderr out here at the
# suite level, we need to poke the test result and let it know
# when we're ready to transmit results back up to the parent
# process. I would rather just do it automatically at test
# stop time, but we don't have the captured stuff at that
# point. Messy...but the only other alternative I can think of
# is monkey-patching loaded TestCases -- which could be from
# unittest or twisted or some other custom subclass.
result.finalize()
self._removeTestAtIndex(index)
# Green's subprocesses have handled all actual tests and sent up the
# result, but unittest expects to be able to add teardown errors to
# the result still, so we'll need to watch for that ourself.
errors_before = len(result.errors)
if topLevel:
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
result._testRunEntered = False
# Special handling for class/module tear-down errors. startTest() and
# finalize() both trigger communication between the subprocess and
# the runner process. addError()
if errors_before != len(result.errors):
difference = len(result.errors) - errors_before
result.errors, new_errors = (
result.errors[:-difference],
result.errors[-difference:],
)
for (test, err) in new_errors:
# test = ProtoTest()
test.module = result._previousTestClass.__module__
test.class_name = result._previousTestClass.__name__
# test.method_name = 'some method name'
test.is_class_or_module_teardown_error = True
test.name = "Error in class or module teardown"
# test.docstr_part = 'docstr part' # error_holder.description
result.startTest(test)
result.addError(test, err)
result.stopTest(test)
result.finalize()
return result
|
lnbits/extensions/tipjar/views_api.py
|
fusion44/lnbits
| 258 |
56044
|
<gh_stars>100-1000
from quart import g, jsonify
from http import HTTPStatus
from lnbits.decorators import api_validate_post_request, api_check_wallet_key
from lnbits.core.crud import get_user
from . import tipjar_ext
from .helpers import get_charge_details
from .crud import (
create_tipjar,
get_tipjar,
create_tip,
get_tipjars,
get_tip,
get_tips,
update_tip,
update_tipjar,
delete_tip,
delete_tipjar,
)
from ..satspay.crud import create_charge
@tipjar_ext.route("/api/v1/tipjars", methods=["POST"])
@api_check_wallet_key("invoice")
@api_validate_post_request(
schema={
"name": {"type": "string", "required": True},
"wallet": {"type": "string", "required": True},
"webhook": {"type": "string"},
"onchain": {"type": "string"},
}
)
async def api_create_tipjar():
"""Create a tipjar, which holds data about how/where to post tips"""
try:
tipjar = await create_tipjar(**g.data)
except Exception as e:
return jsonify({"message": str(e)}), HTTPStatus.INTERNAL_SERVER_ERROR
return jsonify(tipjar._asdict()), HTTPStatus.CREATED
@tipjar_ext.route("/api/v1/tips", methods=["POST"])
@api_validate_post_request(
schema={
"name": {"type": "string"},
"sats": {"type": "integer", "required": True},
"tipjar": {"type": "integer", "required": True},
"message": {"type": "string"},
}
)
async def api_create_tip():
"""Take data from tip form and return satspay charge"""
sats = g.data["sats"]
message = g.data.get("message", "")[:144]
if not message:
message = "No message"
tipjar_id = g.data["tipjar"]
tipjar = await get_tipjar(tipjar_id)
webhook = tipjar.webhook
charge_details = await get_charge_details(tipjar.id)
name = g.data.get("name", "")[:25]
# Ensure that description string can be split reliably
name = name.replace('"', "''")
if not name:
name = "Anonymous"
description = f'"{name}": {message}'
charge = await create_charge(
amount=sats,
webhook=webhook,
description=description,
**charge_details,
)
await create_tip(
id=charge.id,
wallet=tipjar.wallet,
message=message,
name=name,
sats=g.data["sats"],
tipjar=g.data["tipjar"],
)
return (jsonify({"redirect_url": f"/satspay/{charge.id}"}), HTTPStatus.OK)
@tipjar_ext.route("/api/v1/tipjars", methods=["GET"])
@api_check_wallet_key("invoice")
async def api_get_tipjars():
"""Return list of all tipjars assigned to wallet with given invoice key"""
wallet_ids = (await get_user(g.wallet.user)).wallet_ids
tipjars = []
for wallet_id in wallet_ids:
new_tipjars = await get_tipjars(wallet_id)
tipjars += new_tipjars if new_tipjars else []
return (
jsonify([tipjar._asdict() for tipjar in tipjars] if tipjars else []),
HTTPStatus.OK,
)
@tipjar_ext.route("/api/v1/tips", methods=["GET"])
@api_check_wallet_key("invoice")
async def api_get_tips():
"""Return list of all tips assigned to wallet with given invoice key"""
wallet_ids = (await get_user(g.wallet.user)).wallet_ids
tips = []
for wallet_id in wallet_ids:
new_tips = await get_tips(wallet_id)
tips += new_tips if new_tips else []
return (
jsonify([tip._asdict() for tip in tips] if tips else []),
HTTPStatus.OK,
)
@tipjar_ext.route("/api/v1/tips/<tip_id>", methods=["PUT"])
@api_check_wallet_key("invoice")
async def api_update_tip(tip_id=None):
"""Update a tip with the data given in the request"""
if tip_id:
tip = await get_tip(tip_id)
if not tip:
return (
jsonify({"message": "Tip does not exist."}),
HTTPStatus.NOT_FOUND,
)
if tip.wallet != g.wallet.id:
return (jsonify({"message": "Not your tip."}), HTTPStatus.FORBIDDEN)
tip = await update_tip(tip_id, **g.data)
else:
return (
jsonify({"message": "No tip ID specified"}),
HTTPStatus.BAD_REQUEST,
)
return jsonify(tip._asdict()), HTTPStatus.CREATED
@tipjar_ext.route("/api/v1/tipjars/<tipjar_id>", methods=["PUT"])
@api_check_wallet_key("invoice")
async def api_update_tipjar(tipjar_id=None):
"""Update a tipjar with the data given in the request"""
if tipjar_id:
tipjar = await get_tipjar(tipjar_id)
if not tipjar:
return (
jsonify({"message": "TipJar does not exist."}),
HTTPStatus.NOT_FOUND,
)
if tipjar.wallet != g.wallet.id:
return (jsonify({"message": "Not your tipjar."}), HTTPStatus.FORBIDDEN)
tipjar = await update_tipjar(tipjar_id, **g.data)
else:
return (jsonify({"message": "No tipjar ID specified"}), HTTPStatus.BAD_REQUEST)
return jsonify(tipjar._asdict()), HTTPStatus.CREATED
@tipjar_ext.route("/api/v1/tips/<tip_id>", methods=["DELETE"])
@api_check_wallet_key("invoice")
async def api_delete_tip(tip_id):
"""Delete the tip with the given tip_id"""
tip = await get_tip(tip_id)
if not tip:
return (jsonify({"message": "No tip with this ID!"}), HTTPStatus.NOT_FOUND)
if tip.wallet != g.wallet.id:
return (
jsonify({"message": "Not authorized to delete this tip!"}),
HTTPStatus.FORBIDDEN,
)
await delete_tip(tip_id)
return "", HTTPStatus.NO_CONTENT
@tipjar_ext.route("/api/v1/tipjars/<tipjar_id>", methods=["DELETE"])
@api_check_wallet_key("invoice")
async def api_delete_tipjar(tipjar_id):
"""Delete the tipjar with the given tipjar_id"""
tipjar = await get_tipjar(tipjar_id)
if not tipjar:
return (jsonify({"message": "No tipjar with this ID!"}), HTTPStatus.NOT_FOUND)
if tipjar.wallet != g.wallet.id:
return (
jsonify({"message": "Not authorized to delete this tipjar!"}),
HTTPStatus.FORBIDDEN,
)
await delete_tipjar(tipjar_id)
return "", HTTPStatus.NO_CONTENT
|
sonarqube/community/project_branches.py
|
ckho-wkcda/python-sonarqube-api
| 113 |
56067
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author: <NAME>
from sonarqube.utils.rest_client import RestClient
from sonarqube.utils.config import (
API_PROJECT_BRANCHES_LIST_ENDPOINT,
API_PROJECT_BRANCHES_DELETE_ENDPOINT,
API_PROJECT_BRANCHES_RENAME_ENDPOINT,
API_PROJECT_BRANCHES_SET_PROTECTION_ENDPOINT,
)
from sonarqube.utils.common import GET, POST
class SonarQubeProjectBranches(RestClient):
"""
SonarQube project branches Operations
"""
def __init__(self, **kwargs):
"""
:param kwargs:
"""
super(SonarQubeProjectBranches, self).__init__(**kwargs)
@GET(API_PROJECT_BRANCHES_LIST_ENDPOINT)
def search_project_branches(self, project):
"""
SINCE 6.6
List the branches of a project.
:param project: Project key
:return:
"""
@POST(API_PROJECT_BRANCHES_DELETE_ENDPOINT)
def delete_project_branch(self, project, branch):
"""
SINCE 6.6
Delete a non-main branch of a project.
:param project: Project key
:param branch: Name of the branch
:return:
"""
@POST(API_PROJECT_BRANCHES_RENAME_ENDPOINT)
def rename_project_branch(self, project, name):
"""
SINCE 6.6
Rename the main branch of a project
:param project: Project key
:param name: New name of the main branch
:return:
"""
@POST(API_PROJECT_BRANCHES_SET_PROTECTION_ENDPOINT)
def set_automatic_deletion_protection_for_project_branch(self, project, branch, value):
"""
SINCE 8.1
Protect a specific branch from automatic deletion. Protection can't be disabled for the main branch.
:param project: Project key
:param branch: Branch key
:param value: Sets whether the branch should be protected from automatic deletion
when it hasn't been analyzed for a set period of time. Possible values are for: true or false, yes or no.
:return:
"""
|
community/dm-scaffolder/providers/pubsub.py
|
shan2202/deploymentmanager-samples
| 930 |
56072
|
<reponame>shan2202/deploymentmanager-samples
# Copyright 2018 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
PubSubTopic provider implements GCP PubSub Topic specific transaltions
Supports V1, CFT versions
"""
import providers.baseprovider as base
### PubSub Subscription start ###
class PubSubSubscriptionBase(base.BaseProvider):
"""
Common implementation shared accross all PubSub Subscription versions.
This class should not be used outside of it's child classes.
"""
def __init__(self, dm_api, gcloud_stage, gcloud_flags=''):
base.BaseProvider.__init__(
self, "pubsub", "subscriptions", dm_api, gcloud_stage, gcloud_flags)
def get_new(self):
return None # not supposed to run
class PubSubSubscriptionV1(PubSubSubscriptionBase):
""" PubSub-topic V1 API provider"""
def __init__(self, gcloud_flags=''):
PubSubSubscriptionBase.__init__(
self, "gcp-types/pubsub-v1:projects.subscriptions", "", gcloud_flags)
def get_new(self):
return PubSubSubscriptionV1()
class PubSubSubscriptionCFT(PubSubSubscriptionBase):
""" PubSub-Subscription CFT API provider - DO NOT USE DIRECTLY"""
def __init__(self, gcloud_flags=''):
PubSubSubscriptionBase.__init__(
self, "../templates/pubsub/pubsub.py", " ", gcloud_flags)
def get_new(self):
return PubSubSubscriptionCFT()
def fill_properties(self):
self.base_yaml['properties']['topic'] = self.properties['topic']
self.base_yaml['properties']['subscriptions'] = [ self.properties]
### PubSub Subscription end ###
### PubSub Topics start ###
class PubSubTopicBase(base.BaseProvider):
"""
Common implementation shared accross all PubSub Topic versions.
This class should not be used outside of it's child classes.
"""
def __init__(self, dm_api, gcloud_stage, gcloud_flags=''):
base.BaseProvider.__init__(
self, "pubsub", "topics", dm_api, gcloud_stage, gcloud_flags)
def get_new(self):
return None # not supposed to run
class PubSubTopicV1(PubSubTopicBase):
""" PubSub-topic V1 API provider"""
def __init__(self, gcloud_flags=''):
PubSubTopicBase.__init__(
self, "gcp-types/pubsub-v1:projects.topics", "", gcloud_flags)
def get_new(self):
return PubSubTopicV1()
class PubSubTopicCFT(PubSubTopicBase):
""" PubSub-topic CFT API provider """
def __init__(self, gcloud_flags=''):
PubSubTopicBase.__init__(
self, "../templates/pubsub/pubsub.py", " ", gcloud_flags)
def get_new(self):
return PubSubTopicCFT()
def fill_properties(self):
self.base_yaml['properties']['topic'] = self.properties
self.base_yaml['properties']['subscriptions'] = []
self.get_subscriptions()
def get_subscriptions(self):
""" Sub-optimal implementation """
__subscriptions = PubSubSubscriptionCFT().get_list()
__subs_yaml = []
for sub in __subscriptions:
if sub.base_yaml['properties']['topic'] == self.base_yaml['properties']['topic']['name']:
__subs_yaml.append(sub.base_yaml['properties']['subscriptions'][0])
self.base_yaml['properties']['subscriptions'] = __subs_yaml
|
enteletaor_lib/modules/brute/cmd_brute_main.py
|
Seabreg/enteletaor
| 159 |
56091
|
# -*- coding: utf-8 -*-
#
# Enteletaor - https://github.com/cr0hn/enteletaor
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import six
import logging
from .utils import get_server_type
if six.PY2:
from .cracker import cracking
else:
# from .cracker3 import cracking
from .cracker import cracking
# Reconfigure AMQP LOGGER
logging.getLogger('amqp').setLevel(100)
log = logging.getLogger()
# ----------------------------------------------------------------------
def cmd_brute_main(config):
# --------------------------------------------------------------------------
# Check requisites
# --------------------------------------------------------------------------
if not config.target:
logging.error(" <!> target option, '-t', is required")
return
if not config.wordlist:
logging.error(" <!> wordlist option, '-w', is required")
return
# Fix wordlist path
if not os.path.exists(config.wordlist):
wordlist_base = os.path.join(os.path.dirname(__file__),
"..",
"..",
"resources",
"wordlist")
# Try to find into internal wordlists
internal_wordlists = [x for x in os.listdir(os.path.abspath(wordlist_base)) if "readme" not in x.lower()]
wordlist_choice = "%s.txt" % config.wordlist if ".txt" not in config.wordlist else config.wordlist
# Is wordlist available?
if wordlist_choice not in internal_wordlists:
log.error(" <!> Wordlist '%s' not found." % wordlist_choice)
return
# Fix wordlist path
config.wordlist = os.path.abspath(os.path.join(wordlist_base, wordlist_choice))
# --------------------------------------------------------------------------
# Preparing scan
# --------------------------------------------------------------------------
server_type, status, port = get_server_type(config)
if status != "closed":
log.error(" - Detected '%s' server with '%s'." % ('unknown' if server_type is None else server_type, status))
if server_type.lower() == "rabbitmq":
log.error(" - Set user to '%s'" % config.user)
# --------------------------------------------------------------------------
# Do brute
# --------------------------------------------------------------------------
if status == "auth":
log.error(" - Starting bruteforcer using wordlist: '%s'" % config.wordlist)
cracking(server_type, port, config)
elif status == "open":
log.error(" - '%s' '%s' server is open. No password cracking need" % (server_type, config.target))
else:
log.error(" - Not detected brokers in '%s'." % config.target)
|
sdk/python/pulumi_gcp/projects/default_service_accounts.py
|
sisisin/pulumi-gcp
| 121 |
56115
|
<reponame>sisisin/pulumi-gcp
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['DefaultServiceAccountsArgs', 'DefaultServiceAccounts']
@pulumi.input_type
class DefaultServiceAccountsArgs:
def __init__(__self__, *,
action: pulumi.Input[str],
project: pulumi.Input[str],
restore_policy: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a DefaultServiceAccounts resource.
:param pulumi.Input[str] action: The action to be performed in the default service accounts. Valid values are: `DEPRIVILEGE`, `DELETE`, `DISABLE`. Note that `DEPRIVILEGE` action will ignore the REVERT configuration in the restore_policy
:param pulumi.Input[str] project: The project ID where service accounts are created.
:param pulumi.Input[str] restore_policy: The action to be performed in the default service accounts on the resource destroy.
Valid values are NONE, REVERT and REVERT_AND_IGNORE_FAILURE. It is applied for any action but in the DEPRIVILEGE.
If set to REVERT it attempts to restore all default SAs but the DEPRIVILEGE action.
If set to REVERT_AND_IGNORE_FAILURE it is the same behavior as REVERT but ignores errors returned by the API.
"""
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "project", project)
if restore_policy is not None:
pulumi.set(__self__, "restore_policy", restore_policy)
@property
@pulumi.getter
def action(self) -> pulumi.Input[str]:
"""
The action to be performed in the default service accounts. Valid values are: `DEPRIVILEGE`, `DELETE`, `DISABLE`. Note that `DEPRIVILEGE` action will ignore the REVERT configuration in the restore_policy
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: pulumi.Input[str]):
pulumi.set(self, "action", value)
@property
@pulumi.getter
def project(self) -> pulumi.Input[str]:
"""
The project ID where service accounts are created.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: pulumi.Input[str]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="restorePolicy")
def restore_policy(self) -> Optional[pulumi.Input[str]]:
"""
The action to be performed in the default service accounts on the resource destroy.
Valid values are NONE, REVERT and REVERT_AND_IGNORE_FAILURE. It is applied for any action but in the DEPRIVILEGE.
If set to REVERT it attempts to restore all default SAs but the DEPRIVILEGE action.
If set to REVERT_AND_IGNORE_FAILURE it is the same behavior as REVERT but ignores errors returned by the API.
"""
return pulumi.get(self, "restore_policy")
@restore_policy.setter
def restore_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "restore_policy", value)
@pulumi.input_type
class _DefaultServiceAccountsState:
def __init__(__self__, *,
action: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
restore_policy: Optional[pulumi.Input[str]] = None,
service_accounts: Optional[pulumi.Input[Mapping[str, Any]]] = None):
"""
Input properties used for looking up and filtering DefaultServiceAccounts resources.
:param pulumi.Input[str] action: The action to be performed in the default service accounts. Valid values are: `DEPRIVILEGE`, `DELETE`, `DISABLE`. Note that `DEPRIVILEGE` action will ignore the REVERT configuration in the restore_policy
:param pulumi.Input[str] project: The project ID where service accounts are created.
:param pulumi.Input[str] restore_policy: The action to be performed in the default service accounts on the resource destroy.
Valid values are NONE, REVERT and REVERT_AND_IGNORE_FAILURE. It is applied for any action but in the DEPRIVILEGE.
If set to REVERT it attempts to restore all default SAs but the DEPRIVILEGE action.
If set to REVERT_AND_IGNORE_FAILURE it is the same behavior as REVERT but ignores errors returned by the API.
:param pulumi.Input[Mapping[str, Any]] service_accounts: The Service Accounts changed by this resource. It is used for `REVERT` the `action` on the destroy.
"""
if action is not None:
pulumi.set(__self__, "action", action)
if project is not None:
pulumi.set(__self__, "project", project)
if restore_policy is not None:
pulumi.set(__self__, "restore_policy", restore_policy)
if service_accounts is not None:
pulumi.set(__self__, "service_accounts", service_accounts)
@property
@pulumi.getter
def action(self) -> Optional[pulumi.Input[str]]:
"""
The action to be performed in the default service accounts. Valid values are: `DEPRIVILEGE`, `DELETE`, `DISABLE`. Note that `DEPRIVILEGE` action will ignore the REVERT configuration in the restore_policy
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "action", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The project ID where service accounts are created.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="restorePolicy")
def restore_policy(self) -> Optional[pulumi.Input[str]]:
"""
The action to be performed in the default service accounts on the resource destroy.
Valid values are NONE, REVERT and REVERT_AND_IGNORE_FAILURE. It is applied for any action but in the DEPRIVILEGE.
If set to REVERT it attempts to restore all default SAs but the DEPRIVILEGE action.
If set to REVERT_AND_IGNORE_FAILURE it is the same behavior as REVERT but ignores errors returned by the API.
"""
return pulumi.get(self, "restore_policy")
@restore_policy.setter
def restore_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "restore_policy", value)
@property
@pulumi.getter(name="serviceAccounts")
def service_accounts(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
The Service Accounts changed by this resource. It is used for `REVERT` the `action` on the destroy.
"""
return pulumi.get(self, "service_accounts")
@service_accounts.setter
def service_accounts(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "service_accounts", value)
class DefaultServiceAccounts(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
action: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
restore_policy: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Allows management of Google Cloud Platform project default service accounts.
When certain service APIs are enabled, Google Cloud Platform automatically creates service accounts to help get started, but
this is not recommended for production environments as per [Google's documentation](https://cloud.google.com/iam/docs/service-accounts#default).
See the [Organization documentation](https://cloud.google.com/resource-manager/docs/quickstarts) for more details.
> **WARNING** Some Google Cloud products do not work if the default service accounts are deleted so it is better to `DEPRIVILEGE` as
Google **CAN NOT** recover service accounts that have been deleted for more than 30 days.
Also Google recommends using the `constraints/iam.automaticIamGrantsForDefaultServiceAccounts` [constraint](https://www.terraform.io/docs/providers/google/r/google_organization_policy.html)
to disable automatic IAM Grants to default service accounts.
> This resource works on a best-effort basis, as no API formally describes the default service accounts
and it is for users who are unable to use constraints. If the default service accounts change their name
or additional service accounts are added, this resource will need to be updated.
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
my_project = gcp.projects.DefaultServiceAccounts("myProject",
action="DELETE",
project="my-project-id")
```
To enable the default service accounts on the resource destroy:
```python
import pulumi
import pulumi_gcp as gcp
my_project = gcp.projects.DefaultServiceAccounts("myProject",
action="DISABLE",
project="my-project-id",
restore_policy="REVERT")
```
## Import
This resource does not support import
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] action: The action to be performed in the default service accounts. Valid values are: `DEPRIVILEGE`, `DELETE`, `DISABLE`. Note that `DEPRIVILEGE` action will ignore the REVERT configuration in the restore_policy
:param pulumi.Input[str] project: The project ID where service accounts are created.
:param pulumi.Input[str] restore_policy: The action to be performed in the default service accounts on the resource destroy.
Valid values are NONE, REVERT and REVERT_AND_IGNORE_FAILURE. It is applied for any action but in the DEPRIVILEGE.
If set to REVERT it attempts to restore all default SAs but the DEPRIVILEGE action.
If set to REVERT_AND_IGNORE_FAILURE it is the same behavior as REVERT but ignores errors returned by the API.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DefaultServiceAccountsArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Allows management of Google Cloud Platform project default service accounts.
When certain service APIs are enabled, Google Cloud Platform automatically creates service accounts to help get started, but
this is not recommended for production environments as per [Google's documentation](https://cloud.google.com/iam/docs/service-accounts#default).
See the [Organization documentation](https://cloud.google.com/resource-manager/docs/quickstarts) for more details.
> **WARNING** Some Google Cloud products do not work if the default service accounts are deleted so it is better to `DEPRIVILEGE` as
Google **CAN NOT** recover service accounts that have been deleted for more than 30 days.
Also Google recommends using the `constraints/iam.automaticIamGrantsForDefaultServiceAccounts` [constraint](https://www.terraform.io/docs/providers/google/r/google_organization_policy.html)
to disable automatic IAM Grants to default service accounts.
> This resource works on a best-effort basis, as no API formally describes the default service accounts
and it is for users who are unable to use constraints. If the default service accounts change their name
or additional service accounts are added, this resource will need to be updated.
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
my_project = gcp.projects.DefaultServiceAccounts("myProject",
action="DELETE",
project="my-project-id")
```
To enable the default service accounts on the resource destroy:
```python
import pulumi
import pulumi_gcp as gcp
my_project = gcp.projects.DefaultServiceAccounts("myProject",
action="DISABLE",
project="my-project-id",
restore_policy="REVERT")
```
## Import
This resource does not support import
:param str resource_name: The name of the resource.
:param DefaultServiceAccountsArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DefaultServiceAccountsArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
action: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
restore_policy: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DefaultServiceAccountsArgs.__new__(DefaultServiceAccountsArgs)
if action is None and not opts.urn:
raise TypeError("Missing required property 'action'")
__props__.__dict__["action"] = action
if project is None and not opts.urn:
raise TypeError("Missing required property 'project'")
__props__.__dict__["project"] = project
__props__.__dict__["restore_policy"] = restore_policy
__props__.__dict__["service_accounts"] = None
super(DefaultServiceAccounts, __self__).__init__(
'gcp:projects/defaultServiceAccounts:DefaultServiceAccounts',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
action: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
restore_policy: Optional[pulumi.Input[str]] = None,
service_accounts: Optional[pulumi.Input[Mapping[str, Any]]] = None) -> 'DefaultServiceAccounts':
"""
Get an existing DefaultServiceAccounts resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] action: The action to be performed in the default service accounts. Valid values are: `DEPRIVILEGE`, `DELETE`, `DISABLE`. Note that `DEPRIVILEGE` action will ignore the REVERT configuration in the restore_policy
:param pulumi.Input[str] project: The project ID where service accounts are created.
:param pulumi.Input[str] restore_policy: The action to be performed in the default service accounts on the resource destroy.
Valid values are NONE, REVERT and REVERT_AND_IGNORE_FAILURE. It is applied for any action but in the DEPRIVILEGE.
If set to REVERT it attempts to restore all default SAs but the DEPRIVILEGE action.
If set to REVERT_AND_IGNORE_FAILURE it is the same behavior as REVERT but ignores errors returned by the API.
:param pulumi.Input[Mapping[str, Any]] service_accounts: The Service Accounts changed by this resource. It is used for `REVERT` the `action` on the destroy.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _DefaultServiceAccountsState.__new__(_DefaultServiceAccountsState)
__props__.__dict__["action"] = action
__props__.__dict__["project"] = project
__props__.__dict__["restore_policy"] = restore_policy
__props__.__dict__["service_accounts"] = service_accounts
return DefaultServiceAccounts(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def action(self) -> pulumi.Output[str]:
"""
The action to be performed in the default service accounts. Valid values are: `DEPRIVILEGE`, `DELETE`, `DISABLE`. Note that `DEPRIVILEGE` action will ignore the REVERT configuration in the restore_policy
"""
return pulumi.get(self, "action")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The project ID where service accounts are created.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter(name="restorePolicy")
def restore_policy(self) -> pulumi.Output[Optional[str]]:
"""
The action to be performed in the default service accounts on the resource destroy.
Valid values are NONE, REVERT and REVERT_AND_IGNORE_FAILURE. It is applied for any action but in the DEPRIVILEGE.
If set to REVERT it attempts to restore all default SAs but the DEPRIVILEGE action.
If set to REVERT_AND_IGNORE_FAILURE it is the same behavior as REVERT but ignores errors returned by the API.
"""
return pulumi.get(self, "restore_policy")
@property
@pulumi.getter(name="serviceAccounts")
def service_accounts(self) -> pulumi.Output[Mapping[str, Any]]:
"""
The Service Accounts changed by this resource. It is used for `REVERT` the `action` on the destroy.
"""
return pulumi.get(self, "service_accounts")
|
poco/__init__.py
|
HBoPRC/Poco
| 1,444 |
56119
|
# coding=utf-8
from .pocofw import Poco
|
paperboy/resources/job.py
|
chris-aeviator/paperboy
| 233 |
56143
|
<gh_stars>100-1000
import json
from .base import BaseResource
class JobResource(BaseResource):
def __init__(self, *args, **kwargs):
super(JobResource, self).__init__(*args, **kwargs)
def on_get(self, req, resp):
'''List all job instances'''
resp.content_type = 'application/json'
resp.body = json.dumps(self.db.jobs.list(req.context['user'], req.params, self.session))
def on_post(self, req, resp):
'''Create new or delete job instance'''
resp.content_type = 'application/json'
action = req.params.get('action')
if action == 'delete':
resp.body = json.dumps(self.db.jobs.delete(req.context['user'], req.params, self.session, self.scheduler))
else:
resp.body = json.dumps(self.db.jobs.store(req.context['user'], req.params, self.session, self.scheduler))
class JobDetailResource(BaseResource):
def __init__(self, *args, **kwargs):
super(JobDetailResource, self).__init__(*args, **kwargs)
def on_get(self, req, resp):
'''Get details of specific job instance'''
resp.content_type = 'application/json'
resp.body = json.dumps(self.db.jobs.detail(req.context['user'], req.params, self.session))
|
evalml/pipelines/components/transformers/samplers/__init__.py
|
Mahesh1822/evalml
| 454 |
56150
|
<filename>evalml/pipelines/components/transformers/samplers/__init__.py<gh_stars>100-1000
"""Sampler components."""
from .undersampler import Undersampler
from .oversampler import Oversampler
|
pylearn2/scripts/tutorials/tests/test_dbm.py
|
ikervazquezlopez/Pylearn2
| 2,045 |
56246
|
<gh_stars>1000+
"""
This module tests dbm_demo/rbm.yaml
"""
import os
from pylearn2.testing import skip
from pylearn2.testing import no_debug_mode
from pylearn2.config import yaml_parse
@no_debug_mode
def train_yaml(yaml_file):
train = yaml_parse.load(yaml_file)
train.main_loop()
def train(yaml_file_path, save_path):
yaml = open("{0}/rbm.yaml".format(yaml_file_path), 'r').read()
hyper_params = {'detector_layer_dim': 5,
'monitoring_batches': 2,
'train_stop': 500,
'max_epochs': 7,
'save_path': save_path}
yaml = yaml % (hyper_params)
train_yaml(yaml)
def test_dbm():
skip.skip_if_no_data()
yaml_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'../dbm_demo'))
save_path = os.path.dirname(os.path.realpath(__file__))
train(yaml_file_path, save_path)
try:
os.remove("{}/dbm.pkl".format(save_path))
except:
pass
if __name__ == '__main__':
test_dbm()
|
utils/lit/tests/Inputs/shtest-timeout/infinite_loop.py
|
kpdev/llvm-tnt
| 1,073 |
56256
|
# RUN: %{python} %s
from __future__ import print_function
import time
import sys
print("Running infinite loop")
sys.stdout.flush() # Make sure the print gets flushed so it appears in lit output.
while True:
pass
|
python/_args_parser.py
|
gglin001/poptorch
| 128 |
56309
|
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
import copy
import inspect
import torch
# Do not import any poptorch.* here: it will break the poptorch module
from . import _impl
from ._logging import logger
class ArgsParser:
class Args:
def __init__(self):
self._args = []
self.first_none = None
def clone(self):
clone = ArgsParser.Args()
clone._args = copy.copy(self._args) # pylint: disable=protected-access
clone.first_none = self.first_none
return clone
def _forEach(self, data, fn):
if isinstance(data, (tuple, list)):
return type(data)(self._forEach(d, fn) for d in data)
if isinstance(data, dict):
return {
key: self._forEach(value, fn)
for key, value in data.items()
}
return fn(data)
def _forEachMatched(self, data, condition, doOnTrue, conditionMatches):
if isinstance(data, (tuple, list)):
return type(data)(self._forEachMatched(
d, condition, doOnTrue, conditionMatches) for d in data)
if isinstance(data, dict):
return {
key: self._forEachMatched(value, condition, doOnTrue,
conditionMatches)
for key, value in data.items()
}
if condition(data):
conditionMatches.setTrue()
return doOnTrue(data)
return data
def forEachMatchedAtLeastOnce(self, condition, doOnTrue=None):
class ConditionMatches:
def __init__(self):
self._matches = False
def __bool__(self):
return self._matches
def setTrue(self):
self._matches = True
matches = ConditionMatches()
self._args = self._forEachMatched(self._args, condition, doOnTrue,
matches)
return bool(matches)
def forEach(self, fn):
self._args = self._forEach(self._args, fn)
def asTuple(self):
return tuple(self._args)
def __init__(self, model):
# Combine args and kwargs:
if isinstance(model, _impl.OptimizerWrapper):
sig = inspect.signature(model.model.forward)
else:
sig = inspect.signature(model.forward)
self._has_variadic_arguments = any([
p.kind in [p.VAR_POSITIONAL, p.VAR_KEYWORD]
for p in sig.parameters.values()
])
self._varnames = list(sig.parameters.keys())
self._defaults = [p.default for p in sig.parameters.values()]
self._warned_not_contiguous_input = False
def __call__(self, args, kwargs, fast_path=False):
"""Checks the inputs are of a supported type. Inputs must be
tensors or tuples/lists of tensors. Will convert list to tuples
as we can't natively support lists in the JIT.
"""
in_tensors = ArgsParser.Args()
assert self._has_variadic_arguments or len(args) + len(kwargs) <= len(
self._varnames), ("Too many arguments provided: expected %s (%d) "
"but got %d") % (self._varnames,
len(self._varnames),
len(args) + len(kwargs))
first_optional = len(self._varnames) - len(self._defaults)
none_passed = []
# Make sure all the arguments provided are allowed.
for k in kwargs.keys():
assert k in self._varnames, (
f"{k} is not a valid parameter."
f"Allowed values are {self._varnames}")
for i, name in enumerate(self._varnames):
if i < len(args):
has_list = self._errorOnDictReturnTrueIfList(args[i], name, [])
# Non fast path for compilation, fast path for executing.
if not fast_path:
if has_list:
logger.warning(
"Lists as inputs only have partial support, they "
"can be accessed but full Python functionality is "
"not enabled. Consider changing input to tuple.")
data = self._convertLists(args[i])
in_tensors._args.append(data)
else:
in_tensors._args.append(args[i])
assert name not in kwargs, ("Parameter %s was passed more "
"than once") % name
elif name in kwargs:
assert not none_passed, (
"Torch doesn't support passing tensors (%s)"
" after the following parameters have defaulted to None."
" %s") % (name, ", ".join(none_passed))
has_list = self._errorOnDictReturnTrueIfList(
kwargs[name], name, [])
# Non fast path for compilation, fast path for executing.
if not fast_path:
if has_list:
logger.warning(
"Lists as inputs only have partial support, they "
"can be accessed but full Python functionality is "
"not enabled. Consider changing input to tuple.")
kwargs[name] = self._convertLists(kwargs[name])
in_tensors._args.append(kwargs[name])
else:
assert i >= first_optional, ("Mandatory parameter %s "
"missing") % name
value = self._defaults[i - first_optional]
if value is None:
if in_tensors.first_none is None:
in_tensors.first_none = i
none_passed.append("%s (%d)" % (name, i))
if not none_passed:
in_tensors._args.append(value)
if in_tensors.first_none is None:
in_tensors.first_none = len(self._varnames)
# filter-out trailing None arguments when they default to None
# Extending this to any argument set to its default value has
# proven problematic - the trace may be computed with fewer
# inputs than intended.
for i in reversed(range(len(in_tensors._args))):
if in_tensors._args[i] is not None:
break
if self._defaults[i] is not None:
break
in_tensors._args.pop()
if in_tensors.first_none == i:
in_tensors.first_none = None
# assert we are not passing None parameters to avoid a cryptic error
assert None not in in_tensors._args, \
"'None' may not be passed as explicit model argument. It may " + \
"only be used as default initialiser"
if in_tensors.forEachMatchedAtLeastOnce(
condition=lambda t: isinstance(t, torch.Tensor
) and not t.is_contiguous(),
doOnTrue=lambda t: t.contiguous()):
if not self._warned_not_contiguous_input:
logger.warning("At least one input tensor is not contiguous: "
"non-contiguous tensors will be converted.")
self._warned_not_contiguous_input = True
return in_tensors
def _convertLists(self, input):
if isinstance(input, (tuple, list)):
new_tuple = []
for _, data in enumerate(input):
new_tuple.append(self._convertLists(data))
return tuple(new_tuple)
return input
def _errorOnDictReturnTrueIfList(self, data, arg_name, stack_list):
has_list = False
if isinstance(data, (tuple, list)):
for idx, d in enumerate(data):
stack_list.append(idx)
has_list &= self._errorOnDictReturnTrueIfList(
d, arg_name, stack_list)
stack_list.pop()
if isinstance(data, list):
has_list = True
if isinstance(data, (dict)):
stack_list = [str(s) for s in stack_list]
end_msg = arg_name
if stack_list:
end_msg += "[" + "][".join(stack_list) + "]"
end_msg += " = " + str(data)
if isinstance(data, dict):
raise TypeError(
"Dictionaries are not supported as input arguments,"
" including when nested in tuples.\nReceived dict " + end_msg)
return has_list
|
scripts/graph.py
|
maurizioabba/rose
| 488 |
56323
|
#!/usr/bin/env python
#
###############################################################################
#
# Author: <NAME>
# Date: 8/24/2006
# File: graph.py
# Purpose: Plots ROSE performance data
#
###############################################################################
import sys
import os
import string
import getopt
import csv
import math
from gclass import *
from optparse import *
###############################################################################
def getHash(rawData):
"""
"""
hash = dict([ (rawData[i],rawData[i+1]) for i in range(0,len(rawData)-1,2) ])
return hash
##############################################################################
def generateGraph(reader,fout,xKey,yExclude,yInclude,separator):
"""
"""
if yInclude != []:
keys = yInclude
for row in reader:
hash = getHash(row)
data = ""
if yInclude == [] and yExclude != []:
keys = hash.keys()
keys.remove(xKey)
for y in yExclude:
keys.remove(y)
for key in keys:
data = data + separator + hash[key]
fout.write(hash[xKey] + data + '\n')
return keys
###############################################################################
def csv2gnuplot(inputs,output,xKey,yExclude,yInclude,xlabel,ylabel,
Format,height,width,pointsize,with,
yMin,yMax,xMin,xMax):
"""
"""
fout = open(output + ".dat",'a')
for i in inputs:
reader = csv.reader(open(i,'r'),doublequote=True,skipinitialspace=True)
keys = generateGraph(reader,fout,xKey,yExclude,yInclude,"\t")
# gnuplot formatting
index = 0
fscriptout = open(output + ".plt",'a')
while index < len(keys):
if index > 0:
fscriptout.write("re")
fscriptout.write("plot \"" + output + ".dat" +"\" using 1:" +
str(index + 2) + ' ')
if with != "":
fscriptout.write("smooth csplines ")
fscriptout.write("title \"" + keys[index] + "\"" + ' ')
fscriptout.write("with " + with + '\n')
else:
fscriptout.write("title \"" + keys[index] + "\"" + '\n')
index += 1
# while
if Format != "":
fscriptout.write("set terminal " + Format + '\n')
fscriptout.write("set output \"" + output + '.' + Format + "\"\n")
if xMin != "" or xMax != "":
fscriptout.write("set xrange [" + xMin + ':' + xMax + "]\n")
if yMin != "" or yMax != "":
fscriptout.write("set yrange [" + yMin + ':' + yMax + "]\n")
if xlabel != "":
fscriptout.write("set xlabel \"" + xlabel + "\"\n")
else:
fscriptout.write("set xlabel \"" + xKey + "\"\n")
if ylabel != "":
fscriptout.write("set ylabel \"" + ylabel + "\"\n")
# if
fscriptout.write("set key below\nset key box\n")
fscriptout.write("set size " + width + ',' + height + '\n')
fscriptout.write("set pointsize " + pointsize + '\n')
fscriptout.write("replot\n")
# end gnuplot formatting
return
# cvs2gnuplot()
###############################################################################
def csv2excel(inputs,output,xKey,yExclude):
"""
"""
fout = open(output + ".csv",'a')
for i in inputs:
reader = csv.reader(open(i,'r'),doublequote=True,skipinitialspace=True)
generateGraph(reader,fout,xKey,yExclude,',')
return
###############################################################################
def csv2matlab(inputs,output,xKey,yExclude,xlabel,ylabel,height,width,Format):
"""
"""
fout = open(output + ".dat",'a')
# Matlab data
for i in inputs:
reader = csv.reader(open(i,'r'),doublequote=True,skipinitialspace=True)
keys = generateGraph(reader,fout,xKey,yExclude," ")
# Matlab script
fscriptout = open(output + ".m",'a')
index = 2
ceilSqrt = int(math.ceil(math.sqrt(len(keys))))
if xlabel == "":
xlabel = xKey
fscriptout.write("load " + output + ".dat" + '\n')
fscriptout.write("set(gcf,'position',[0 0 " + str(width) + ' ' +
str(height) + "])\n")
fscriptout.write("x = " + output + "(:,1)\n")
while index < len(keys) + 2:
fscriptout.write("y" + str(index) + " = " + output + "(:,"
+ str(index) + ")\n")
fscriptout.write("xlabel('" + xlabel + "')\n")
fscriptout.write("ylabel('" + ylabel + "')\n")
#fscriptout.write("ylabel('" + keys[index - 2] + "')\n")
fscriptout.write("subplot(" + str(ceilSqrt) + ',' + str(ceilSqrt) +
',' + str(index - 1) + ") ; ")
fscriptout.write("plot(x,y" + str(index) + ",'o')\n")
fscriptout.write("legend('" + keys[index - 2] + "')\n")
index += 1
if Format != "":
fscriptout.write("set(gcf,'PaperPositionMode','auto')\n")
fscriptout.write("print(gcf,'-d" + Format + "'," + '\'' +
output + '.' + Format + "')\n")
fscriptout.write("quit\n")
# Matlab script
return
###############################################################################
def cmdOptionParse(parser):
"""
cmdOptionParse():
Parses command-line arguments and redirects to appropriate functions.
arguments:
parser -- a optparse object that stores command-line arguments
"""
# parse out options and input file arguments
(options,inputs) = parser.parse_args()
if inputs == []:
sys.stderr.write("Error: No input file(s) specified\n")
sys.exit(1)
if options.output != "":
output = options.output.split('.')[0]
else:
sys.stderr.write("Error: No output file name specified\n")
sys.exit(1)
if options.list:
print "Supported formats:"
print "1. Gnuplot (.dat .plt) -fgnuplot"
print "2. MS Excel (.csv) -fexcel"
print "3. Matlab (.dat) -fmatlab"
sys.exit(0)
if options.x == "":
sys.stderr.write("Error: X-Axis data not specified, please specify with -x\n")
sys.exit(1)
# if, error checking
if options.format == "gnuplot":
if options.e != [] and options.y != []:
sys.stderr.write("Error: Options -e and -y may not be used concurrently\n")
sys.exit(1)
csv2gnuplot(inputs,output,options.x,options.e,options.y,
options.xlabel,options.ylabel,options.Format,
options.Height,options.Width,options.pointsize,
options.with,options.ymin,options.ymax,
options.xmin,options.xmax)
if options.rehash != "" and options.granularity != "":
rehash(output + ".dat",string.atoi(options.granularity),'\t')
elif options.rehash != "" and options.granularity == "":
sys.stderr.write("Error: You must specifiy -g or --granularity with --rehash\n")
sys.exit(1)
if options.run:
args = []
args.append("")
args.append(output + ".plt")
os.execvp("gnuplot",args)
# if
# if
elif options.format == "excel":
csv2excel(inputs,options.output,options.x,options.e)
elif options.format == "matlab":
csv2matlab(inputs,options.output,options.x,options.e,
options.xlabel,options.ylabel,
options.Height,options.Width,
options.Format)
if options.run:
args = []
args.append("")
args.append("-nodesktop")
args.append("-r")
args.append(output)
os.execvp("matlab",args)
else:
sys.stderr.write("Error: Unrecognized output format\n")
return
###############################################################################
def cmdOptionInit(arguments):
"""
cmdOptionInit():
Initializes command-line parser optparse object. Specifies which option
flags behave in what way according to optparse.
arguments:
arguments -- sys.argv list of command-line arguments
variables:
parser -- optparse, OptionParser()
"""
parser = OptionParser()
parser.set_usage("graph.py <input file> [options]")
parser.add_option("-f","--format",help="Output file format",
metavar="%FORMAT%")
parser.add_option("-F","--Format",help="Secondard output format",
metavar="%FORMAT%",default="")
parser.add_option("-l","--list", help="List supported output formats",
action="store_true")
parser.add_option("-o","--output",help="Output file name",metavar="%FILE%",
default="")
parser.add_option("-r","--run",help="Run plotting tool",action="store_true")
parser.add_option("-x",help="X Axis Key Data",metavar="<XKEY>",default="")
parser.add_option("-y",help="Include Y Axis Data",metavar="<KEY>",
action="append",default=[])
parser.add_option("-e",help="Exclude Y Axis Data",metavar="<KEY>",
action="append",default=[])
parser.add_option("-g","--granularity",
help="granularity range for data manipulation",
metavar="<#>",default="")
parser.add_option("-w","--with",help="With lines,points,etc.",
metavar="%WITH%",default="")
parser.add_option("-H","--Height",help="Output Height default=1",
metavar="<H#>",default="1")
parser.add_option("-W","--Width",help="Output Width default=1",
metavar="<W#>",default="1")
parser.add_option("-P","--pointsize",help="Set pointsize default=1",
metavar="<P#>",default="1")
parser.add_option("--rehash",help="Rehash Data",metavar="%MODE%",
default="")
parser.add_option("--xlabel",help="X-Axis Label",metavar="%LABEL%",
default="")
parser.add_option("--xmin",help="Minimum X range value",metavar="<#>",
default="")
parser.add_option("--xmax",help="Maximum X range value",metavar="<#>",
default="")
parser.add_option("--ylabel",help="Y-Axis Label",metavar="%LABEL%",
default="")
parser.add_option("--ymin",help="Minimum Y range value",metavar="<#>",
default="")
parser.add_option("--ymax",help="Maximum Y range value",metavar="<#>",
default="")
return parser
###############################################################################
###############################################################################
parser = cmdOptionInit(sys.argv)
cmdOptionParse(parser)
# control flow:
# main->cmdOptionInit->main->cmdOptionParse->csv2{}->generateGraph<->getHash()
|
PhysicsTools/PatAlgos/python/recoLayer0/duplicatedElectrons_cfi.py
|
ckamtsikis/cmssw
| 852 |
56356
|
import FWCore.ParameterSet.Config as cms
# Remove duplicates from the electron list
electronsNoDuplicates = cms.EDFilter("DuplicatedElectronCleaner",
## reco electron input source
electronSource = cms.InputTag("gsfElectrons"),
)
|
Algo and DSA/LeetCode-Solutions-master/Python/single-number.py
|
Sourav692/FAANG-Interview-Preparation
| 3,269 |
56412
|
<reponame>Sourav692/FAANG-Interview-Preparation<gh_stars>1000+
# Time: O(n)
# Space: O(1)
import operator
from functools import reduce
class Solution(object):
"""
:type nums: List[int]
:rtype: int
"""
def singleNumber(self, A):
return reduce(operator.xor, A)
|
openverse_api/catalog/api/migrations/0025_auto_20200429_1401.py
|
ritesh-pandey/openverse-api
| 122 |
56421
|
# Generated by Django 2.2.10 on 2020-04-29 14:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0024_auto_20200423_1601'),
]
operations = [
migrations.AlterField(
model_name='imagereport',
name='status',
field=models.CharField(choices=[('pending_review', 'pending_review'), ('mature_filtered', 'mature_filtered'), ('deindexed', 'deindexed'), ('no_action', 'no_action')], default='pending_review', max_length=20),
),
migrations.DeleteModel(
name='ImageTags',
),
]
|
tests/test_validators.py
|
jonwhittlestone/streaming-form-data
| 107 |
56422
|
import pytest
from streaming_form_data.validators import MaxSizeValidator, ValidationError
def test_max_size_validator_empty_input():
validator = MaxSizeValidator(0)
with pytest.raises(ValidationError):
validator('x')
def test_max_size_validator_normal():
validator = MaxSizeValidator(5)
for char in 'hello':
validator(char)
with pytest.raises(ValidationError):
validator('x')
|
setup.py
|
ceys/django-tracking
| 112 |
56427
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import sys, os
import tracking
setup(
name='django-tracking',
version=tracking.get_version(),
description="Basic visitor tracking and blacklisting for Django",
long_description=open('README.rst', 'r').read(),
keywords='django, tracking, visitors',
author='<NAME>',
author_email='<EMAIL>',
url='http://bitbucket.org/codekoala/django-tracking',
license='MIT',
package_dir={'tracking': 'tracking'},
include_package_data=True,
packages=find_packages(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Internet :: Log Analysis",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Page Counters",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware",
"Topic :: Security",
"Topic :: System :: Monitoring",
"Topic :: Utilities",
]
)
|
src/amuse/test/suite/ext_tests/test_boss_bodenheimer.py
|
rknop/amuse
| 131 |
56435
|
import sys
import os
import numpy.random
from amuse.test import amusetest
from amuse.units import units, nbody_system
from amuse.ext.boss_bodenheimer import bb79_cloud
numpy.random.seed(1234567)
class BossBodenheimerTests(amusetest.TestCase):
def test1(self):
numpy.random.seed(1234)
mc=bb79_cloud(targetN=1000).result
self.assertEqual(len(mc),1000)
ek=mc.kinetic_energy()
ep=mc.potential_energy(G=nbody_system.G)
eth=mc.thermal_energy()
self.assertAlmostEqual(eth/ep, -0.25, 2)
self.assertAlmostEqual(ek/ep, -0.2, 2)
def test2(self):
numpy.random.seed(1234)
convert=nbody_system.nbody_to_si(1. | units.MSun,3.2e16| units.cm)
mc=bb79_cloud(targetN=1000,convert_nbody=convert).result
self.assertEqual(len(mc),1000)
ek=mc.kinetic_energy()
ep=mc.potential_energy()
eth=mc.thermal_energy()
self.assertAlmostEqual(eth/ep, -0.25, 2)
self.assertAlmostEqual(ek/ep, -0.2, 2)
|
devel/toolkitEditor/createRopDialog/createRopDialog.py
|
t3kt/raytk
| 108 |
56455
|
from raytkTools import RaytkTools
# noinspection PyUnreachableCode
if False:
# noinspection PyUnresolvedReferences
from _stubs import *
from ..ropEditor.ropEditor import ROPEditor
iop.ropEditor = ROPEditor(COMP())
class CreateRopDialog:
def __init__(self, ownerComp: 'COMP'):
self.ownerComp = ownerComp
def _setMessageText(self, message):
dat = self.ownerComp.op('set_messageText')
dat.clear()
dat.write(message or '')
def Open(self, _=None):
self.ownerComp.op('window').par.winopen.pulse()
self.ownerComp.op('typeName_field').par.Value0 = ''
self._setMessageText('')
def Close(self, _=None):
self.ownerComp.op('window').par.winclose.pulse()
self._setMessageText('')
def Create(self):
self._setMessageText('')
category = self.ownerComp.op('category_dropmenu').par.Value0.eval()
name = self.ownerComp.op('typeName_field').par.Value0.eval()
try:
rop = RaytkTools().createNewRopType(typeName=name, category=category)
except Exception as err:
self._setMessageText(str(err))
return
iop.ropEditor.LoadROP(rop)
self.Close()
|
test/tests/tutorial_notebook_tests.py
|
jameszhan/swift-jupyter
| 624 |
56528
|
<reponame>jameszhan/swift-jupyter
# TODO(TF-747): Reenable.
# """Checks that tutorial notebooks behave as expected.
# """
#
# import unittest
# import os
# import shutil
# import tempfile
#
# from flaky import flaky
#
# from notebook_tester import NotebookTestRunner
#
#
# class TutorialNotebookTests(unittest.TestCase):
# @classmethod
# def setUpClass(cls):
# cls.tmp_dir = tempfile.mkdtemp()
# git_url = 'https://github.com/tensorflow/swift.git'
# os.system('git clone %s %s -b jupyter-test-branch' % (git_url, cls.tmp_dir))
#
# @classmethod
# def tearDownClass(cls):
# shutil.rmtree(cls.tmp_dir)
#
# @flaky(max_runs=5, min_passes=1)
# def test_iris(self):
# notebook = os.path.join(self.tmp_dir, 'docs', 'site', 'tutorials',
# 'model_training_walkthrough.ipynb')
# runner = NotebookTestRunner(notebook, verbose=False)
# runner.run()
# self.assertEqual([], runner.unexpected_errors)
# all_stdout = '\n\n'.join(runner.stdout)
# self.assertIn('Epoch 100:', all_stdout)
# self.assertIn('Example 2 prediction:', all_stdout)
|
code/parse-commonsenseQA.py
|
salesforce/cos-e
| 138 |
56530
|
<filename>code/parse-commonsenseQA.py
import jsonlines
import sys
import csv
expl = {}
with open(sys.argv[2], 'rb') as f:
for item in jsonlines.Reader(f):
expl[item['id']] = item['explanation']['open-ended']
with open(sys.argv[1], 'rb') as f:
with open(sys.argv[3],'w') as wf:
wfw = csv.writer(wf,delimiter=',',quotechar='"')
wfw.writerow(['id','question','choice_0','choice_1','choice_2','choice_3','choice_4','label','human_expl_open-ended'])
for item in jsonlines.Reader(f):
label = -1
if(item['answerKey'] == 'A'):
label = 0
elif(item['answerKey'] == 'B'):
label = 1
elif(item['answerKey'] == 'C'):
label = 2
elif(item['answerKey'] == 'D'):
label = 3
else:
label = 4
wfw.writerow([item['id'],item['question']['stem'],item['question']['choices'][0]['text'],item['question']['choices'][1]['text'],item['question']['choices'][2]['text'],item['question']['choices'][3]['text'],item['question']['choices'][4]['text'],label,expl[item['id']]])
|
test/test-tool/openmldb-case-gen/auto_gen_case/gen_case_yaml_main.py
|
jasleon/OpenMLDB
| 2,659 |
56549
|
<reponame>jasleon/OpenMLDB<filename>test/test-tool/openmldb-case-gen/auto_gen_case/gen_case_yaml_main.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2021 4Paradigm
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import uuid
import yaml
from hybridsql_case import gen_single_window_test
from hybridsql_function import UDFPool
from hybridsql_param import parse_args
from hybridsql_case import gen_window_union_test
from hybridsql_param import sample_integer_config
from hybridsql_case import gen_window_lastjoin_test, gen_window_subselect_test
gen_sql = {
0: gen_single_window_test,
1: gen_window_union_test,
2: gen_window_lastjoin_test,
3: gen_window_subselect_test,
}
def gen_case_yaml(case_dir=None):
args = parse_args()
udf_pool = UDFPool(args.udf_path, args)
begin = time.time()
case_num = args.yaml_count
if case_dir == None:
case_dir = args.log_dir
if not os.path.exists(case_dir):
os.makedirs(case_dir)
for i in range(case_num):
sql_type = sample_integer_config(args.sql_type)
test_name = str(uuid.uuid1())
case = gen_sql[sql_type](test_name, udf_pool, args)
yamlName = "auto_gen_case_"+str(i)+".yaml"
with open(os.path.join(case_dir, yamlName), "w") as yaml_file:
yaml_file.write(yaml.dump(case))
end = time.time()
print("use time:"+str(end-begin))
if __name__ == "__main__":
'''
生成yaml的入口
'''
currentPath = os.getcwd()
index = currentPath.rfind('HybridSQL-test')
if index == -1:
prePath = currentPath+"/"
else:
prePath = currentPath[0:index]
print("prePath:"+prePath)
casePath = prePath+"HybridSQL-test/OpenMLDB/cases/hybridsql_gen_cases/"
print("casePath:"+casePath)
gen_case_yaml(casePath)
|
parser/reader/__init__.py
|
cheery/better_web_language
| 136 |
56597
|
from stream import CStream
from tokenizer import L2
from data import Expr, Literal, Position
#import space
#table = {
# u'(': u'lp', u')': u'rp',
# u'[': u'lb', u']': u'rb',
# u'{': u'lc', u'}': u'rc',
# u'and': u'and', u'or': u'or', u'not': u'not',
# u'=': u'let', u':=': u'set',
# u'<': u'chain',
# u'>': u'chain',
# u'<=': u'chain',
# u'>=': u'chain',
# u'==': u'chain',
# u'!=': u'chain',
# u'^': u'op', u'&': u'op', u'<<': u'op',
# u'>>': u'op', u'!': u'op', u'*': u'op',
# u'/': u'op', u'%': u'op', u'+': u'op',
# u'-': u'op', u'|': u'op', u'++': u'op',
# u':': u'symbol',
# u'.': u'dot'}
#binops = {
# u'|': 10,
# u'^': 10,
# u'&': 20,
# u'<<': 30, u'>>': 40,
# u'++': 40, u'+': 40, u'-': 40,
# u'*': 50, u'/': 50, u'%': 50,
#}
#right_binding = []
#prefixes = {
# u'~': 90,
# u'-': 90,
# u'+': 90,
#}
#postfixes = {
# u'!': 100,
#}
#
#def read(source):
# exps = []
# ts = L2(CStream(source), table)
# while ts.filled:
# if ts.position.col != 0:
# raise space.Error(u"%s: layout error" % ts.first.start.repr())
# exps.append(toplevel(ts, 0))
# return exps
#
#def toplevel(ts, col):
# head = expression(ts)
# if head.dcf is not None and ts.filled:
# if head.stop.lno == ts.position.lno:
# head.dcf.capture = [toplevel(ts, col)]
# elif ts.position.col > col:
# head.dcf.capture = exps = []
# scol = ts.position.col
# while ts.filled and ts.position.col == scol:
# exp = toplevel(ts, scol)
# exps.append(exp)
# while ts.filled and ts.position.lno == exp.stop.lno and ts.position.col > scol:
# exps.append(toplevel(ts, scol))
# return head
#
#def expressions(ts):
# exps = []
# while ts.filled:
# if match_some(ts.first, [u'rp', u'rb', u'rc']):
# break
# exps.append(expression(ts))
# return exps
#
#def expression(ts):
# left = expression_and(ts)
# if match(ts.first, u'or'):
# op = ts.advance()
# op.name = u'symbol'
# right = expression(ts)
# return Expr(left.start, right.stop, u'form', [op, left, right])
# return left
#
#def expression_and(ts):
# left = expression_chain(ts)
# if match(ts.first, u'and'):
# op = ts.advance()
# op.name = u'symbol'
# right = expression_and(ts)
# return Expr(left.start, right.stop, u'form', [op, left, right])
# return left
#
#def expression_chain(ts):
# left = expression_bare(ts, 0)
# if match(ts.first, u'chain'):
# exps = [left]
# while match(ts.first, u'chain'):
# op = ts.advance()
# op.name = u'symbol'
# exps.append(op)
# exps.append(expression_bare(ts, 0))
# left = Expr(exps[0].start, exps[len(exps)-1].stop, u'chain', exps)
# return left
#
#def expression_bare(ts, rbp):
# if on_prefix(ts):
# op = ts.advance()
# exp = expression_bare(ts, prefixes[op.value])
# op.name = u'symbol'
# op.value = op.value+u'expr'
# left = Expr(op.start, exp.stop, u'form', [op, exp])
# else:
# left = terminal(ts)
# while ts.filled:
# if match(ts.first, u'dot'):
# dot = ts.advance()
# symbol = ts.advance()
# if not match(symbol, u'symbol'):
# raise space.Error(u"%s: expected symbol" % symbol.start.repr())
# left = Expr(left.start, symbol.stop, u'attr', [left, symbol])
# elif match(ts.first, u'lb') and left.stop.eq(ts.first.start):
# lb = ts.advance()
# exps = expressions(ts)
# if not match(ts.first, u'rb'):
# raise space.Error(u"%s: [] truncates at %s" % (lb.start.repr(), ts.position.repr()))
# rb = ts.advance()
# left = Expr(left.start, rb.stop, u'index', [left] + exps)
# elif match_some(ts.first, [u'let', u'set']):
# let = ts.advance()
# exp = expression(ts)
# left = Expr(left.start, exp.stop, let.name, [left, exp])
# elif match(ts.first, u'op') and match(ts.second, u'let') and ts.first.value in binops:
# aug = ts.advance()
# aug.name = u'symbol'
# let = ts.advance()
# exp = expression(ts)
# left = Expr(left.start, exp.stop, u'aug', [aug, left, exp])
# else:
# break
# while ts.filled:
# if on_binop(left, ts) and rbp < binops.get(ts.first.value, 0):
# op = ts.advance()
# op.name = u'symbol'
# lbp = binops.get(op.value, 0)
# right = expression_bare(ts, lbp - (ts.first.value in right_binding))
# left = Expr(left.start, right.stop, u'form', [op, left, right])
# elif on_postfix(left, ts) and rbp < postfixes.get(ts.first.value, 0):
# op = ts.advance()
# op.name = u'symbol'
# lbp = postfixes.get(op.value, 0)
# op.value = u'expr'+op.value
# left = Expr(left.start, op.stop, u'form', [op, left])
# else:
# break
# return left
#
#def terminal(ts):
# if match_some(ts.first, [u'symbol', u'string', u'int', u'hex', u'float']):
# return ts.advance()
# elif match(ts.first, u'lp'):
# lp = ts.advance()
# exps = expressions(ts)
# if not match(ts.first, u'rp'):
# raise space.Error(u"%s: form truncates at %s" % (lp.start.repr(), ts.position.repr()))
# rp = ts.advance()
# exp = Expr(lp.start, rp.stop, u'form', exps)
# exp.dcf = exp
# return exp
# elif match(ts.first, u'lb'):
# lb = ts.advance()
# exps = expressions(ts)
# if not match(ts.first, u'rb'):
# raise space.Error(u"%s: list truncates at %s" % (lb.start.repr(), ts.position.repr()))
# rb = ts.advance()
# exp = Expr(lb.start, rb.stop, u'list', exps)
# exp.dcf = exp
# return exp
# elif match(ts.first, u'lc'):
# lc = ts.advance()
# if match(ts.second, u'rc'):
# exp = ts.advance()
# exp.name = u'symbol'
# else:
# exp = expression(ts)
# rc = ts.advance()
# return exp
# elif match(ts.first, u'not'):
# op = ts.advance()
# op.name = u'symbol'
# exp = expression_chain(ts)
# return Expr(op.start, exp.stop, u'form', [op, exp])
# if ts.filled:
# raise space.Error(u"%s: expected term, got %s" % (ts.position.repr(), ts.first.value))
# raise space.Error(u"%s: expected term, got eof" % ts.position.repr())
#
#def match_some(t, names):
# return t is not None and t.name in names
#
#def match(t, name):
# return t is not None and t.name == name
#
#def on_prefix(ts):
# if match(ts.first, u'op') and ts.second is not None:
# return ts.first.stop.eq(ts.second.start)
# return False
#
#def on_binop(left, ts):
# if match(ts.first, u'op') and ts.second is not None:
# l = left.stop.eq(ts.first.start)
# r = ts.first.stop.eq(ts.second.start)
# return l == r
# return False
#
#def on_postfix(left, ts):
# if match(ts.first, u'op'):
# l = left.stop.eq(ts.first.start)
# r = ts.second is not None and ts.first.stop.eq(ts.second.start)
# return l and not r
# return False
|
dev/Tools/Python/2.7.13/mac/Python.framework/Versions/2.7/lib/python2.7/site-packages/pyxb/bundles/common/__init__.py
|
jeikabu/lumberyard
| 123 |
56618
|
<filename>dev/Tools/Python/2.7.13/mac/Python.framework/Versions/2.7/lib/python2.7/site-packages/pyxb/bundles/common/__init__.py
"""In this module are stored generated bindings for standard schema
like WSDL or SOAP."""
|
scale.app/scripts/msvc2org.py
|
f4rsh/SCALe
| 239 |
56621
|
#!/usr/bin/env python
# Scrubs the output of msvc and prints out the dianostics.
#
# The only argument indicates the file containing the input.
#
# This script can produce lots of messages per diagnostic
#
# Copyright (c) 2007-2018 Carnegie Mellon University. All Rights Reserved.
# See COPYRIGHT file for details.
import sys
import re
import os
if len(sys.argv) != 2:
raise TypeError("Usage: " + sys.argv[0] + " <raw-input> > <org-output>")
input = sys.argv[1]
uniqueErrors = {}
regexes = []
regexes.append(re.compile("(.*?)\((\d*)\).*?error (.*?): (.*)"))
regexes.append(re.compile("(.*?)\((\d*),\d*\).*?error (.*?): (.*)"))
regexes.append(re.compile("(.*?)\((\d*)\).*?warning (.*?): (.*)"))
regexes.append(re.compile("(.*?)\((\d*),\d*\).*?warning (.*?): (.*)"))
for line in open(input):
# match regular expressions
for regex in regexes:
parse = re.match(regex, line)
if parse != None:
break
else:
continue
fileLocation = parse.group(1).strip()
lineNumber = parse.group(2).strip()
errorNumber = parse.group(3).strip()
diagonostic = parse.group(4).strip().replace("|", " ")
# print table
tableEntry = " | ".join(
["", errorNumber, fileLocation, lineNumber, diagonostic, ""])
print tableEntry
|
tests/test_test1.py
|
sharmavaruns/descriptastorus
| 118 |
56673
|
import unittest
from descriptastorus import MolFileIndex
import os, shutil
import logging
import datahook
TEST_DIR = "test1"
class TestCase(unittest.TestCase):
def setUp(self):
if os.path.exists(TEST_DIR):
shutil.rmtree(TEST_DIR, ignore_errors=True)
index = self.index = MolFileIndex.MakeSmilesIndex(
os.path.join(datahook.datadir, "../data/test1.smi"), TEST_DIR, hasHeader=True,
smilesColumn="smiles", nameColumn="name")
def tearDown(self):
if os.path.exists(TEST_DIR):
shutil.rmtree(TEST_DIR, ignore_errors=True)
def testIndexing(self):
logging.info("Running index test")
self.assertEqual(self.index.N, 14)
self.assertEqual(self.index.getMol(12), 'c1ccccc1CCCCCCCCCCCC')
self.assertEqual(self.index.getName(12), '13')
self.assertEqual(self.index.getRDMol(13), None)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
howdy/src/cli/remove.py
|
matan-arnon/howdy
| 3,552 |
56678
|
<filename>howdy/src/cli/remove.py
# Remove a encoding from the models file
# Import required modules
import sys
import os
import json
import builtins
from i18n import _
# Get the absolute path and the username
path = os.path.dirname(os.path.realpath(__file__)) + "/.."
user = builtins.howdy_user
# Check if enough arguments have been passed
if not builtins.howdy_args.arguments:
print(_("Please add the ID of the model you want to remove as an argument"))
print(_("For example:"))
print("\n\thowdy remove 0\n")
print(_("You can find the IDs by running:"))
print("\n\thowdy list\n")
sys.exit(1)
# Check if the models file has been created yet
if not os.path.exists(path + "/models"):
print(_("Face models have not been initialized yet, please run:"))
print("\n\thowdy add\n")
sys.exit(1)
# Path to the models file
enc_file = path + "/models/" + user + ".dat"
# Try to load the models file and abort if the user does not have it yet
try:
encodings = json.load(open(enc_file))
except FileNotFoundError:
print(_("No face model known for the user {}, please run:").format(user))
print("\n\thowdy add\n")
sys.exit(1)
# Tracks if a encoding with that id has been found
found = False
# Get the ID from the cli arguments
id = builtins.howdy_args.arguments[0]
# Loop though all encodings and check if they match the argument
for enc in encodings:
if str(enc["id"]) == id:
# Only ask the user if there's no -y flag
if not builtins.howdy_args.y:
# Double check with the user
print(_('This will remove the model called "{label}" for {user}').format(label=enc["label"], user=user))
ans = input(_("Do you want to continue [y/N]: "))
# Abort if the answer isn't yes
if (ans.lower() != "y"):
print(_('\nInterpreting as a "NO", aborting'))
sys.exit(1)
# Add a padding empty line
print()
# Mark as found and print an enter
found = True
break
# Abort if no matching id was found
if not found:
print(_("No model with ID {id} exists for {user}").format(id=id, user=user))
sys.exit(1)
# Remove the entire file if this encoding is the only one
if len(encodings) == 1:
os.remove(path + "/models/" + user + ".dat")
print(_("Removed last model, howdy disabled for user"))
else:
# A place holder to contain the encodings that will remain
new_encodings = []
# Loop though all encodings and only add those that don't need to be removed
for enc in encodings:
if str(enc["id"]) != id:
new_encodings.append(enc)
# Save this new set to disk
with open(enc_file, "w") as datafile:
json.dump(new_encodings, datafile)
print(_("Removed model {}").format(id))
|
data_gen/tts/txt_processors/en.py
|
ishine/DiffSinger-1
| 288 |
56692
|
<reponame>ishine/DiffSinger-1
import re
from data_gen.tts.data_gen_utils import PUNCS
from g2p_en import G2p
import unicodedata
from g2p_en.expand import normalize_numbers
from nltk import pos_tag
from nltk.tokenize import TweetTokenizer
from data_gen.tts.txt_processors.base_text_processor import BaseTxtProcessor
class EnG2p(G2p):
word_tokenize = TweetTokenizer().tokenize
def __call__(self, text):
# preprocessing
words = EnG2p.word_tokenize(text)
tokens = pos_tag(words) # tuples of (word, tag)
# steps
prons = []
for word, pos in tokens:
if re.search("[a-z]", word) is None:
pron = [word]
elif word in self.homograph2features: # Check homograph
pron1, pron2, pos1 = self.homograph2features[word]
if pos.startswith(pos1):
pron = pron1
else:
pron = pron2
elif word in self.cmu: # lookup CMU dict
pron = self.cmu[word][0]
else: # predict for oov
pron = self.predict(word)
prons.extend(pron)
prons.extend([" "])
return prons[:-1]
class TxtProcessor(BaseTxtProcessor):
g2p = EnG2p()
@staticmethod
def preprocess_text(text):
text = normalize_numbers(text)
text = ''.join(char for char in unicodedata.normalize('NFD', text)
if unicodedata.category(char) != 'Mn') # Strip accents
text = text.lower()
text = re.sub("[\'\"()]+", "", text)
text = re.sub("[-]+", " ", text)
text = re.sub(f"[^ a-z{PUNCS}]", "", text)
text = re.sub(f" ?([{PUNCS}]) ?", r"\1", text) # !! -> !
text = re.sub(f"([{PUNCS}])+", r"\1", text) # !! -> !
text = text.replace("i.e.", "that is")
text = text.replace("i.e.", "that is")
text = text.replace("etc.", "etc")
text = re.sub(f"([{PUNCS}])", r" \1 ", text)
text = re.sub(rf"\s+", r" ", text)
return text
@classmethod
def process(cls, txt, pre_align_args):
txt = cls.preprocess_text(txt).strip()
phs = cls.g2p(txt)
phs_ = []
n_word_sep = 0
for p in phs:
if p.strip() == '':
phs_ += ['|']
n_word_sep += 1
else:
phs_ += p.split(" ")
phs = phs_
assert n_word_sep + 1 == len(txt.split(" ")), (phs, f"\"{txt}\"")
return phs, txt
|
tools/console/plugins/plugin_package/helper/package_helper.py
|
rh101/engine-x
| 321 |
56742
|
import os
import os.path
import json
import urllib2
import re
import cocos
from MultiLanguage import MultiLanguage
from functions import *
from local_package_database import LocalPackagesDatabase
from zip_downloader import ZipDownloader
def convert_version_part(version_part):
tag = '(\d+)(\D*.*)'
match = re.search(tag, version_part)
if match is None:
return 0
return int(match.group(1)), match.group(2)
def compare_extra_version_string(str1, str2):
if str1 == str2:
return 0
if len(str1) == 0:
return 1
elif len(str2) == 0:
return -1
if str1 > str2:
return 1
else:
return -1
def compare_version(version1, version2):
v1 = re.split('\.', version1)
v2 = re.split('\.', version2)
n1 = len(v1)
n2 = len(v2)
if n1 > n2:
n = n1
for x in xrange(n2,n):
v2.append("0")
else:
n = n2
for x in xrange(n1,n):
v1.append("0")
for x in xrange(0,n):
ver_num1, ver_str1 = convert_version_part(v1[x])
ver_num2, ver_str2 = convert_version_part(v2[x])
if ver_num1 > ver_num2:
return 1
elif ver_num2 > ver_num1:
return -1
c = compare_extra_version_string(ver_str1, ver_str2)
if c != 0:
return c
return 0
def get_newer_package(package1, package2):
if compare_version(package1["version"], package2["version"]) > 0:
return package1
else:
return package2
def get_packages_adapt_engine(packages, engine):
packages_out = []
for package in packages:
package_engine = package["engine"]
if package_engine[-1] == '+':
flag = True
package_engine = package_engine[:-1]
else:
flag = False
c = compare_version(engine, package_engine)
if flag and c >= 0:
packages_out.append(package)
elif c == 0:
packages_out.append(package)
if len(packages_out) > 0:
return packages_out
class PackageHelper:
REPO_URL = "http://pmr.cocos.com/"
REPO_PACKAGES_DIR = "packages"
WORKDIR = ".cocos" + os.sep + "packages"
LOCALDB_FILENAME = "local_packages.json"
QUERY_PACKAGE_URL = REPO_URL + "?name=%s"
QUERY_KEYWORD_URL = REPO_URL + "?keyword=%s"
@classmethod
def get_workdir(cls):
home = os.path.expanduser("~").rstrip("/\\")
return home + os.sep + cls.WORKDIR
@classmethod
def get_local_database_path(cls):
return cls.get_workdir() + os.sep + cls.LOCALDB_FILENAME
@classmethod
def get_package_path(cls, package_data):
return cls.get_workdir() + os.sep + package_data["name"] + "-" + package_data["version"]
@classmethod
def search_keyword(cls, keyword):
url = cls.QUERY_KEYWORD_URL % keyword
# print "[PACKAGE] query url: %s" % url
response = urllib2.urlopen(url)
html = response.read()
packages_data = json.loads(html)
if packages_data is None or len(packages_data) == 0:
return None
if "err" in packages_data:
message = MultiLanguage.get_string('PACKAGE_ERROR_WITH_CODE_FMT',
(packages_data["err"], packages_data["code"]))
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_WRONG_CONFIG)
return packages_data
@classmethod
def query_package_data(cls, name, version = 'all'):
url = cls.QUERY_PACKAGE_URL % name + '&version=' + version
# print "[PACKAGE] query url: %s" % url
response = urllib2.urlopen(url)
html = response.read()
package_data = json.loads(html)
# d1 = json.dumps(package_data,indent=4)
# print d1
if package_data is None or len(package_data) == 0 or ("err" in package_data and "code" in package_data and package_data["code"] == "1002"):
return None
if "err" in package_data:
message = MultiLanguage.get_string('PACKAGE_ERROR_WITH_CODE_FMT',
(package_data["err"], package_data["code"]))
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_WRONG_CONFIG)
return package_data
@classmethod
def download_package_zip(cls, package_data, force):
download_url = cls.REPO_URL + cls.REPO_PACKAGES_DIR + "/" + package_data["filename"]
workdir = cls.get_package_path(package_data)
print MultiLanguage.get_string('PACKAGE_WORKDIR_FMT', workdir)
downloader = ZipDownloader(download_url, workdir, package_data, force)
downloader.run()
@classmethod
def add_package(cls, package_data):
localdb = LocalPackagesDatabase(cls.get_local_database_path())
localdb.add_package(package_data)
@classmethod
def get_installed_packages(cls):
localdb = LocalPackagesDatabase(cls.get_local_database_path())
return localdb.get_packages()
@classmethod
def get_installed_package_data(cls, package_name, version = None):
localdb = LocalPackagesDatabase(cls.get_local_database_path())
packages = localdb.get_packages()
keys = packages.keys()
keys.sort()
keys.reverse()
for key in keys:
package_data = packages[key]
if package_data["name"] == package_name:
if version == None:
return package_data
elif package_data["version"] == version:
return package_data
@classmethod
def get_installed_package_newest_version(cls, package_name, engine = None):
localdb = LocalPackagesDatabase(cls.get_local_database_path())
packages = localdb.get_packages()
keys = packages.keys()
keys.sort()
keys.reverse()
package_list = []
for key in keys:
package_data = packages[key]
if package_data["name"] == package_name:
package_list.append(package_data)
n = len(package_list)
if n < 1:
return
if not engine is None:
package_list = get_packages_adapt_engine(package_list, engine)
if package_list is None:
return
package_newest = package_list[0]
for x in xrange(1,n-1):
package_newest = get_newer_package(package_list[x], package_newest)
return package_newest
@classmethod
def get_installed_package_zip_path(cls, package_data):
workdir = cls.get_package_path(package_data)
return workdir + os.sep + package_data["filename"]
|
src/actions/actions/logit-events/__init__.py
|
anderson-attilio/runbook
| 155 |
56847
|
<gh_stars>100-1000
#!/usr/bin/python
######################################################################
# Cloud Routes Bridge
# -------------------------------------------------------------------
# Actions Module
######################################################################
import rethinkdb as r
from rethinkdb.errors import RqlRuntimeError, RqlDriverError
import time
import datetime
import json
def action(**kwargs):
''' This method is called to action a reaction '''
redata = kwargs['redata']
jdata = kwargs['jdata']
rdb = kwargs['rdb']
r_server = kwargs['r_server']
logger = kwargs['logger']
run = True
if jdata['check']['prev_status'] == jdata['check']['status']:
run = False
if run:
return logit(jdata, rdb, r_server, logger)
else:
return None
def logit(jdata, rdb, r_server, logger):
''' This method will be called to log monitor transaction history '''
etime = time.time()
transaction = {
'cid': jdata['cid'],
'type': "monitor",
'zone': jdata['zone'],
'uid': jdata['uid'],
'failcount': jdata['failcount'],
'prev_failcount': jdata['prev_failcount'],
'status': jdata['check']['status'],
'prev_status': jdata['check']['prev_status'],
'method': jdata['check']['method'],
'time': etime,
'time_friendly': datetime.datetime.fromtimestamp(etime).strftime('%Y-%m-%d %H:%M:%S'),
'cacheonly': jdata['cacheonly'],
'name': jdata['name']
}
success = False
cacheonly = False
# Try to set rethinkdb first
try:
results = r.table('events').insert(transaction).run(rdb)
if results['inserted'] == 1:
success = True
cacheonly = False
else:
success = False
except (RqlDriverError, RqlRuntimeError) as e:
success = False
cacheonly = True
line = "logit-events: RethinkDB is inaccessible cannot log %s, sending to redis" % jdata[
'cid']
logger.info(line)
line = "logit-events: RethinkDB Error: %s" % e.message
logger.info(line)
try:
# Then set redis cache
ldata = json.dumps(transaction)
r_server.sadd("events", ldata)
success = True
except:
line = "logit-events: Redis is inaccessible cannot log %s, via redis" % jdata[
'cid']
logger.info(line)
success = False
return success
|
tests/client/test_auth.py
|
Camille-cmd/python-amazon-sp-api
| 213 |
56857
|
<filename>tests/client/test_auth.py
from sp_api.base import AccessTokenClient
from sp_api.base import Credentials, CredentialProvider
from sp_api.base import AuthorizationError
from sp_api.base.credential_provider import FromCodeCredentialProvider
refresh_token = '<refresh_token>'
lwa_app_id = '<lwa_app_id>'
lwa_client_secret = '<lwa_client_secret>'
aws_secret_key = '<aws_secret_access_key>'
aws_access_key = '<aws_access_key_id>'
role_arn = '<role_arn>'
def test_auth_exception():
e = AuthorizationError(200, 'Foo', 999)
assert e.status_code == 999
assert e.error_code == 200
assert e.message == 'Foo'
def test_credentials():
x = CredentialProvider()
assert x.credentials.lwa_app_id is not None
assert x.credentials.lwa_client_secret is not None
assert x.credentials.aws_secret_key is not None
assert x.credentials.aws_access_key is not None
def test_auth_client():
client = AccessTokenClient(credentials=CredentialProvider(credentials=dict(
refresh_token=refresh_token,
lwa_app_id=lwa_app_id,
lwa_client_secret=lwa_client_secret,
aws_secret_key=aws_secret_key,
aws_access_key=aws_access_key,
role_arn=role_arn,
)).credentials)
x = client._auth_code_request_body('foo')
assert x.get('grant_type') == 'authorization_code'
try:
client.authorize_auth_code('foo')
except AuthorizationError as e:
assert isinstance(e, AuthorizationError)
try:
client._request('https://jsonplaceholder.typicode.com/posts/1', {}, {})
except AuthorizationError as e:
assert isinstance(e, AuthorizationError)
|
Skype4Py/lang/tr.py
|
low456high/Skype4Py
| 199 |
56868
|
apiAttachAvailable = u'API Kullanilabilir'
apiAttachNotAvailable = u'Kullanilamiyor'
apiAttachPendingAuthorization = u'Yetkilendirme Bekliyor'
apiAttachRefused = u'Reddedildi'
apiAttachSuccess = u'Basarili oldu'
apiAttachUnknown = u'Bilinmiyor'
budDeletedFriend = u'Arkadas Listesinden Silindi'
budFriend = u'Arkadas'
budNeverBeenFriend = u'Arkadas Listesinde Hi\xe7 Olmadi'
budPendingAuthorization = u'Yetkilendirme Bekliyor'
budUnknown = u'Bilinmiyor'
cfrBlockedByRecipient = u'\xc7agri alici tarafindan engellendi'
cfrMiscError = u'Diger Hata'
cfrNoCommonCodec = u'Genel codec yok'
cfrNoProxyFound = u'Proxy bulunamadi'
cfrNotAuthorizedByRecipient = u'Ge\xe7erli kullanici alici tarafindan yetkilendirilmemis'
cfrRecipientNotFriend = u'Alici bir arkadas degil'
cfrRemoteDeviceError = u'Uzak ses aygitinda problem var'
cfrSessionTerminated = u'Oturum sonlandirildi'
cfrSoundIOError = u'Ses G/\xc7 hatasi'
cfrSoundRecordingError = u'Ses kayit hatasi'
cfrUnknown = u'Bilinmiyor'
cfrUserDoesNotExist = u'Kullanici/telefon numarasi mevcut degil'
cfrUserIsOffline = u'\xc7evrim Disi'
chsAllCalls = u'Eski Diyalog'
chsDialog = u'Diyalog'
chsIncomingCalls = u'\xc7oklu Sohbet Kabul\xfc Gerekli'
chsLegacyDialog = u'Eski Diyalog'
chsMissedCalls = u'Diyalog'
chsMultiNeedAccept = u'\xc7oklu Sohbet Kabul\xfc Gerekli'
chsMultiSubscribed = u'\xc7oklu Abonelik'
chsOutgoingCalls = u'\xc7oklu Abonelik'
chsUnknown = u'Bilinmiyor'
chsUnsubscribed = u'Aboneligi Silindi'
clsBusy = u'Mesgul'
clsCancelled = u'Iptal Edildi'
clsEarlyMedia = u'Early Media y\xfcr\xfct\xfcl\xfcyor'
clsFailed = u'\xdczg\xfcn\xfcz, arama basarisiz!'
clsFinished = u'Bitirildi'
clsInProgress = u'Arama Yapiliyor'
clsLocalHold = u'Yerel Beklemede'
clsMissed = u'Cevapsiz Arama'
clsOnHold = u'Beklemede'
clsRefused = u'Reddedildi'
clsRemoteHold = u'Uzak Beklemede'
clsRinging = u'ariyor'
clsRouting = u'Y\xf6nlendirme'
clsTransferred = u'Bilinmiyor'
clsTransferring = u'Bilinmiyor'
clsUnknown = u'Bilinmiyor'
clsUnplaced = u'Asla baglanmadi'
clsVoicemailBufferingGreeting = u'Selamlama Ara Bellege Aliniyor'
clsVoicemailCancelled = u'Sesli Posta Iptal Edildi'
clsVoicemailFailed = u'Sesli Mesaj Basarisiz'
clsVoicemailPlayingGreeting = u'Selamlama Y\xfcr\xfct\xfcl\xfcyor'
clsVoicemailRecording = u'Sesli Mesaj Kaydediliyor'
clsVoicemailSent = u'Sesli Posta G\xf6nderildi'
clsVoicemailUploading = u'Sesli Posta Karsiya Y\xfckleniyor'
cltIncomingP2P = u'Gelen Esler Arasi Telefon \xc7agrisi'
cltIncomingPSTN = u'Gelen Telefon \xc7agrisi'
cltOutgoingP2P = u'Giden Esler Arasi Telefon \xc7agrisi'
cltOutgoingPSTN = u'Giden Telefon \xc7agrisi'
cltUnknown = u'Bilinmiyor'
cmeAddedMembers = u'Eklenen \xdcyeler'
cmeCreatedChatWith = u'Sohbet Olusturuldu:'
cmeEmoted = u'Bilinmiyor'
cmeLeft = u'Birakilan'
cmeSaid = u'Ifade'
cmeSawMembers = u'G\xf6r\xfclen \xdcyeler'
cmeSetTopic = u'Konu Belirleme'
cmeUnknown = u'Bilinmiyor'
cmsRead = u'Okundu'
cmsReceived = u'Alindi'
cmsSending = u'G\xf6nderiliyor...'
cmsSent = u'G\xf6nderildi'
cmsUnknown = u'Bilinmiyor'
conConnecting = u'Baglaniyor'
conOffline = u'\xc7evrim Disi'
conOnline = u'\xc7evrim I\xe7i'
conPausing = u'Duraklatiliyor'
conUnknown = u'Bilinmiyor'
cusAway = u'Uzakta'
cusDoNotDisturb = u'Rahatsiz Etmeyin'
cusInvisible = u'G\xf6r\xfcnmez'
cusLoggedOut = u'\xc7evrim Disi'
cusNotAvailable = u'Kullanilamiyor'
cusOffline = u'\xc7evrim Disi'
cusOnline = u'\xc7evrim I\xe7i'
cusSkypeMe = u'Skype Me'
cusUnknown = u'Bilinmiyor'
cvsBothEnabled = u'Video G\xf6nderme ve Alma'
cvsNone = u'Video Yok'
cvsReceiveEnabled = u'Video Alma'
cvsSendEnabled = u'Video G\xf6nderme'
cvsUnknown = u''
grpAllFriends = u'T\xfcm Arkadaslar'
grpAllUsers = u'T\xfcm Kullanicilar'
grpCustomGroup = u'\xd6zel'
grpOnlineFriends = u'\xc7evrimi\xe7i Arkadaslar'
grpPendingAuthorizationFriends = u'Yetkilendirme Bekliyor'
grpProposedSharedGroup = u'Proposed Shared Group'
grpRecentlyContactedUsers = u'Son Zamanlarda Iletisim Kurulmus Kullanicilar'
grpSharedGroup = u'Shared Group'
grpSkypeFriends = u'Skype Arkadaslari'
grpSkypeOutFriends = u'SkypeOut Arkadaslari'
grpUngroupedFriends = u'Gruplanmamis Arkadaslar'
grpUnknown = u'Bilinmiyor'
grpUsersAuthorizedByMe = u'Tarafimdan Yetkilendirilenler'
grpUsersBlockedByMe = u'Engellediklerim'
grpUsersWaitingMyAuthorization = u'Yetkilendirmemi Bekleyenler'
leaAddDeclined = u'Ekleme Reddedildi'
leaAddedNotAuthorized = u'Ekleyen Kisinin Yetkisi Olmali'
leaAdderNotFriend = u'Ekleyen Bir Arkadas Olmali'
leaUnknown = u'Bilinmiyor'
leaUnsubscribe = u'Aboneligi Silindi'
leaUserIncapable = u'Kullanicidan Kaynaklanan Yetersizlik'
leaUserNotFound = u'Kullanici Bulunamadi'
olsAway = u'Uzakta'
olsDoNotDisturb = u'Rahatsiz Etmeyin'
olsNotAvailable = u'Kullanilamiyor'
olsOffline = u'\xc7evrim Disi'
olsOnline = u'\xc7evrim I\xe7i'
olsSkypeMe = u'Skype Me'
olsSkypeOut = u'SkypeOut'
olsUnknown = u'Bilinmiyor'
smsMessageStatusComposing = u'Composing'
smsMessageStatusDelivered = u'Delivered'
smsMessageStatusFailed = u'Failed'
smsMessageStatusRead = u'Read'
smsMessageStatusReceived = u'Received'
smsMessageStatusSendingToServer = u'Sending to Server'
smsMessageStatusSentToServer = u'Sent to Server'
smsMessageStatusSomeTargetsFailed = u'Some Targets Failed'
smsMessageStatusUnknown = u'Unknown'
smsMessageTypeCCRequest = u'Confirmation Code Request'
smsMessageTypeCCSubmit = u'Confirmation Code Submit'
smsMessageTypeIncoming = u'Incoming'
smsMessageTypeOutgoing = u'Outgoing'
smsMessageTypeUnknown = u'Unknown'
smsTargetStatusAcceptable = u'Acceptable'
smsTargetStatusAnalyzing = u'Analyzing'
smsTargetStatusDeliveryFailed = u'Delivery Failed'
smsTargetStatusDeliveryPending = u'Delivery Pending'
smsTargetStatusDeliverySuccessful = u'Delivery Successful'
smsTargetStatusNotRoutable = u'Not Routable'
smsTargetStatusUndefined = u'Undefined'
smsTargetStatusUnknown = u'Unknown'
usexFemale = u'Kadin'
usexMale = u'Erkek'
usexUnknown = u'Bilinmiyor'
vmrConnectError = u'Baglanti Hatasi'
vmrFileReadError = u'Dosya Okuma Hatasi'
vmrFileWriteError = u'Dosya Yazma Hatasi'
vmrMiscError = u'Diger Hata'
vmrNoError = u'Hata Yok'
vmrNoPrivilege = u'Sesli Posta \xd6nceligi Yok'
vmrNoVoicemail = u'B\xf6yle Bir Sesli Posta Yok'
vmrPlaybackError = u'Y\xfcr\xfctme Hatasi'
vmrRecordingError = u'Kayit Hatasi'
vmrUnknown = u'Bilinmiyor'
vmsBlank = u'Bos'
vmsBuffering = u'Ara bellege aliniyor'
vmsDeleting = u'Siliniyor'
vmsDownloading = u'Karsidan Y\xfckleniyor'
vmsFailed = u'Basarisiz Oldu'
vmsNotDownloaded = u'Karsidan Y\xfcklenmedi'
vmsPlayed = u'Y\xfcr\xfct\xfcld\xfc'
vmsPlaying = u'Y\xfcr\xfct\xfcl\xfcyor'
vmsRecorded = u'Kaydedildi'
vmsRecording = u'Sesli Mesaj Kaydediliyor'
vmsUnknown = u'Bilinmiyor'
vmsUnplayed = u'Y\xfcr\xfct\xfclmemis'
vmsUploaded = u'Karsiya Y\xfcklendi'
vmsUploading = u'Karsiya Y\xfckleniyor'
vmtCustomGreeting = u'\xd6zel Selamlama'
vmtDefaultGreeting = u'Varsayilan Selamlama'
vmtIncoming = u'gelen sesli mesaj'
vmtOutgoing = u'Giden'
vmtUnknown = u'Bilinmiyor'
vssAvailable = u'Kullanilabilir'
vssNotAvailable = u'Kullanilamiyor'
vssPaused = u'Duraklatildi'
vssRejected = u'Reddedildi'
vssRunning = u'\xc7alisiyor'
vssStarting = u'Basliyor'
vssStopping = u'Durduruluyor'
vssUnknown = u'Bilinmiyor'
|
src/masonite/stubs/middlewares/Middleware.py
|
cercos/masonite
| 1,816 |
56875
|
<filename>src/masonite/stubs/middlewares/Middleware.py
from masonite.middleware import Middleware
class __class__(Middleware):
def before(self, request, response):
return request
def after(self, request, response):
return request
|
tests/test_APITokenCommands.py
|
sc979/jenkins-attack-framework
| 451 |
56910
|
import random
import string
import unittest
import warnings
from libs import jenkinslib
from libs.JAF.BaseCommandLineParser import BaseCommandLineParser
from libs.JAF.plugin_CreateAPIToken import CreateAPIToken, CreateAPITokenParser
from libs.JAF.plugin_DeleteAPIToken import DeleteAPIToken, DeleteAPITokenParser
from libs.JAF.plugin_ListAPITokens import ListAPITokens, ListAPITokensParser
from .configuration import (
server,
user_admin,
user_bad,
user_noaccess,
user_normal,
user_read_job_access,
user_read_no_job_access,
)
from .helpers import DummyWebServer, TestFramework
class CreateAPITokenTest(unittest.TestCase, TestFramework):
def setUp(self):
warnings.simplefilter("ignore", ResourceWarning)
self.testcommand = "CreateAPIToken"
self.TestParserClass = CreateAPITokenParser
self.TestClass = CreateAPIToken
def test_invalid_url(self):
"""Make sure that calling with invalid url fails gracefully"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", "https://127.0.0.1:59321/", "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_url_bad_protocol(self):
"""Make sure that calling with valid url (that isn't Jenkins or right protocol) fails gracefully"""
with DummyWebServer():
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", "https://127.0.0.1:59322/", "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_url_and_protocol(self):
"""Make sure that calling with valid url (that isn't Jenkins but right protocol) fails gracefully"""
with DummyWebServer():
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", "http://127.0.0.1:59322/", "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_invalid_creds(self):
"""Make sure that calling with valid jenkins (but bad creds) fails gracefully"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_anonymous_creds(self):
"""Make sure that calling with valid jenkins (but no creds)"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_valid_unprivileged_creds(self):
"""Make sure that calling with valid jenkins (unprivileged creds) returns expected results"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_noaccess],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_valid_normal_creds_with_user_argument(self):
"""Make sure that calling with valid jenkins (normal creds) and user flag returns expected results"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_normal, "-U", user_admin],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
class CreateAPITokenParserTest(unittest.TestCase, TestFramework):
def setUp(self):
self.testcommand = "CreateAPIToken"
self.TestClass = CreateAPIToken
self.TestParserClass = CreateAPITokenParser
def test_no_args(self):
"""Ensure that calling with no arguments results in help output and not an error"""
self.basic_test_harness(
["jaf.py", self.testcommand],
[
r"usage: jaf.py {0} \[-h\]".format(self.testcommand),
r"Jenkins Attack Framework",
r"positional arguments:",
],
)
class DeleteAPITokenTest(unittest.TestCase, TestFramework):
def setUp(self):
warnings.simplefilter("ignore", ResourceWarning)
self.testcommand = "DeleteAPIToken"
self.TestParserClass = DeleteAPITokenParser
self.TestClass = DeleteAPIToken
def test_invalid_url(self):
"""Make sure that calling with invalid url fails gracefully"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", "https://127.0.0.1:59321/", "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_url_bad_protocol(self):
"""Make sure that calling with valid url (that isn't Jenkins or right protocol) fails gracefully"""
with DummyWebServer():
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", "https://127.0.0.1:59322/", "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_url_and_protocol(self):
"""Make sure that calling with valid url (that isn't Jenkins but right protocol) fails gracefully"""
with DummyWebServer():
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", "http://127.0.0.1:59322/", "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_invalid_creds(self):
"""Make sure that calling with valid jenkins (but bad creds) fails gracefully"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_anonymous_creds(self):
"""Make sure that calling with valid jenkins (but no creds)"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_valid_unprivileged_creds(self):
"""Make sure that calling with valid jenkins (unprivileged creds) returns expected results"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_noaccess],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_valid_normal_creds_with_user_argument(self):
"""Make sure that calling with valid jenkins (normal creds) and user flag returns expected results"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_normal, "-U", user_admin],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
class DeleteAPITokenParserTest(unittest.TestCase, TestFramework):
def setUp(self):
self.testcommand = "DeleteAPIToken"
self.TestClass = DeleteAPIToken
self.TestParserClass = DeleteAPITokenParser
def test_no_args(self):
"""Ensure that calling with no arguments results in help output and not an error"""
self.basic_test_harness(
["jaf.py", self.testcommand],
[
r"usage: jaf.py {0} \[-h\]".format(self.testcommand),
r"Jenkins Attack Framework",
r"positional arguments:",
],
)
class ListAPITokensTest(unittest.TestCase, TestFramework):
def setUp(self):
warnings.simplefilter("ignore", ResourceWarning)
self.testcommand = "ListAPITokens"
self.TestParserClass = ListAPITokensParser
self.TestClass = ListAPITokens
def test_invalid_url(self):
"""Make sure that calling with invalid url fails gracefully"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", "https://127.0.0.1:59321/", "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_url_bad_protocol(self):
"""Make sure that calling with valid url (that isn't Jenkins or right protocol) fails gracefully"""
with DummyWebServer():
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", "https://127.0.0.1:59322/", "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_url_and_protocol(self):
"""Make sure that calling with valid url (that isn't Jenkins but right protocol) fails gracefully"""
with DummyWebServer():
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", "http://127.0.0.1:59322/", "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_invalid_creds(self):
"""Make sure that calling with valid jenkins (but bad creds) fails gracefully"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_anonymous_creds(self):
"""Make sure that calling with valid jenkins (but no creds)"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_valid_unprivileged_creds(self):
"""Make sure that calling with valid jenkins (unprivileged creds) returns expected results"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_noaccess],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_valid_read_no_job_creds_token_list(self):
"""Make sure that calling CreateAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""
self.testcommand = "ListAPITokens"
self.TestClass = ListAPITokens
self.TestParserClass = ListAPITokensParser
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_read_no_job_access],
[r"Current API Tokens:"],
)
def test_valid_jenkins_valid_normal_creds_with_user_argument(self):
"""Make sure that calling with valid jenkins (normal creds) and user flag returns expected results"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_normal, "-U", user_admin],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
class ListAPITokensParserTest(unittest.TestCase, TestFramework):
def setUp(self):
self.testcommand = "ListAPITokens"
self.TestClass = ListAPITokens
self.TestParserClass = ListAPITokensParser
def test_no_args(self):
"""Ensure that calling with no arguments results in help output and not an error"""
self.basic_test_harness(
["jaf.py", self.testcommand],
[
r"usage: jaf.py {0} \[-h\]".format(self.testcommand),
r"Jenkins Attack Framework",
r"positional arguments:",
],
)
class CombinedAPITokenNormalUserCredentialsTest(unittest.TestCase, TestFramework):
@classmethod
def setUpClass(cls):
cls.token_name = "testtoken" + "".join(
random.choices(string.ascii_letters + string.digits, k=26)
)
def test_1_valid_jenkins_valid_read_no_job_creds_token_create(self):
"""Make sure that calling CreateAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""
self.testcommand = "CreateAPIToken"
self.TestClass = CreateAPIToken
self.TestParserClass = CreateAPITokenParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
user_read_no_job_access,
self.token_name,
],
[r"Your new API Token is: "],
)
def test_2_valid_jenkins_valid_read_no_job_creds_token_list(self):
"""Make sure that calling CreateAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""
self.testcommand = "ListAPITokens"
self.TestClass = ListAPITokens
self.TestParserClass = ListAPITokensParser
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_read_no_job_access],
[r"Token Name: " + self.token_name],
)
def test_3_valid_jenkins_valid_read_no_job_creds_token_delete_list(self):
"""Make sure that calling DeleteAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""
self.testcommand = "DeleteAPIToken"
self.TestClass = DeleteAPIToken
self.TestParserClass = DeleteAPITokenParser
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_read_no_job_access],
[r"Token Name: " + self.token_name],
)
def test_4_valid_jenkins_valid_read_no_job_creds_token_delete(self):
"""Make sure that calling DeleteAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""
self.testcommand = "DeleteAPIToken"
self.TestClass = DeleteAPIToken
self.TestParserClass = DeleteAPITokenParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
user_read_no_job_access,
self.token_name,
],
[r"Token Deleted Successfully."],
)
# For now this is commented out because we can only test this on a cloudbees federated setup, which we don't have
'''
class CombinedAPITokenNormalUserCookieTest(unittest.TestCase, TestFramework):
"""
We need to specifically test auth with cookies because code has to do extra work to derive the logged-in user's username
"""
@classmethod
def setUpClass(cls):
cls.token_name = "testtoken" + "".join(
random.choices(string.ascii_letters + string.digits, k=26)
)
try:
js = jenkinslib.Jenkins(
server,
username=user_read_no_job_access.split(':')[0],
password=':'.join(user_read_no_job_access.split(':')[1:]),
timeout=30,
)
cls.cookie = js.get_cookie()
except Exception:
print(cls.cookie)
#Failure will cause tests to fail, so we ignore here
pass
def test_1_valid_jenkins_valid_read_no_job_creds_token_create(self):
"""Make sure that calling CreateAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""
self.testcommand = "CreateAPIToken"
self.TestClass = CreateAPIToken
self.TestParserClass = CreateAPITokenParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
self.cookie,
self.token_name,
],
[r"Your new API Token is: "],
)
def test_2_valid_jenkins_valid_read_no_job_creds_token_list(self):
"""Make sure that calling CreateAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""
self.testcommand = "ListAPITokens"
self.TestClass = ListAPITokens
self.TestParserClass = ListAPITokensParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
self.cookie,
],
[r"Token Name: " + self.token_name],
)
def test_3_valid_jenkins_valid_read_no_job_creds_token_delete_list(self):
"""Make sure that calling DeleteAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""
self.testcommand = "DeleteAPIToken"
self.TestClass = DeleteAPIToken
self.TestParserClass = DeleteAPITokenParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
self.cookie,
],
[r"Token Name: " + self.token_name],
)
def test_4_valid_jenkins_valid_read_no_job_creds_token_delete(self):
"""Make sure that calling DeleteAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""
self.testcommand = "DeleteAPIToken"
self.TestClass = DeleteAPIToken
self.TestParserClass = DeleteAPITokenParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
self.cookie,
self.token_name,
],
[r"Token Deleted Successfully."],
)
'''
class CombinedAPITokenAdminUserTest(unittest.TestCase, TestFramework):
@classmethod
def setUpClass(cls):
cls.token_name = "testtoken" + "".join(
random.choices(string.ascii_letters + string.digits, k=26)
)
def test_1_valid_jenkins_valid_admin_creds_token_create_other_user(self):
"""Make sure that calling CreateAPIToken with valid jenkins (admin creds) returns expected results"""
self.testcommand = "CreateAPIToken"
self.TestClass = CreateAPIToken
self.TestParserClass = CreateAPITokenParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
user_admin,
"-U",
user_read_no_job_access,
self.token_name,
],
[r"Your new API Token is: "],
)
def test_2_valid_jenkins_valid_admin_creds_token_list_other_user(self):
"""Make sure that calling CreateAPIToken with valid jenkins (admin creds) returns expected results"""
self.testcommand = "ListAPITokens"
self.TestClass = ListAPITokens
self.TestParserClass = ListAPITokensParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
user_admin,
"-U",
user_read_no_job_access,
],
[r"Token Name: " + self.token_name],
)
def test_3_valid_jenkins_valid_admin_creds_token_delete_list_other_user(self):
"""Make sure that calling DeleteAPIToken with valid jenkins (admin creds) returns expected results"""
self.testcommand = "DeleteAPIToken"
self.TestClass = DeleteAPIToken
self.TestParserClass = DeleteAPITokenParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
user_admin,
"-U",
user_read_no_job_access,
],
[r"Token Name: " + self.token_name],
)
def test_4_valid_jenkins_valid_admin_creds_token_delete_other_user(self):
"""Make sure that calling DeleteAPIToken with valid jenkins (admin creds) returns expected results"""
self.testcommand = "DeleteAPIToken"
self.TestClass = DeleteAPIToken
self.TestParserClass = DeleteAPITokenParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
user_admin,
"-U",
user_read_no_job_access,
self.token_name,
],
[r"Token Deleted Successfully."],
)
if __name__ == "__main__":
unittest.main()
|
streamdeck-plugin/src/browser_websocket_server.py
|
andrewachen/streamdeck-googlemeet
| 124 |
56968
|
import asyncio
import json
import logging
from typing import List, Set
import websockets
class BrowserWebsocketServer:
"""
The BrowserWebsocketServer manages our connection to our browser extension,
brokering messages between Google Meet and our plugin's EventHandler.
We expect browser tabs (and our websockets) to come and go, and our plugin is
long-lived, so we have a lot of exception handling to do here to keep the
plugin running. Most actions are "best effort".
We also have to handle the possibility of multiple browser websockets at the
same time, e.g. in case the user refreshes their Meet window and we have stale
websockets hanging around, or if we have multiple Meet tabs.
"""
def __init__(self):
"""
Remember to call start() before attempting to use your new instance!
"""
self._logger = logging.getLogger(__name__)
"""
Store all of the connected sockets we have open to the browser extension,
so we can use them to send outbound messages from this plugin to the
extension.
"""
self._ws_clients: Set[websockets.WebSocketServerProtocol] = set()
"""
Any EventHandlers registered to receive inbound events from the browser extension.
"""
self._handlers: List["EventHandler"] = []
def start(self, hostname: str, port: int) -> None:
return websockets.serve(self._message_receive_loop, hostname, port)
async def send_to_clients(self, message: str) -> None:
"""
Send a message from our plugin to the Chrome extension. We broadcast to
any connections we have, in case the user has multiple Meet windows/tabs
open.
"""
if self._ws_clients:
self._logger.info(
f"Broadcasting message to connected browser clients: {message}")
await asyncio.wait([client.send(message) for client in self._ws_clients])
else:
self._logger.warn(
("There were no active browser extension clients to send our"
f" message to! Message: {message}"))
def register_event_handler(self, handler: "EventHandler") -> None:
"""
Register your EventHandler to have it receive callbacks whenever we
get an event over the wire from the browser extension.
"""
self._handlers.append(handler)
def num_connected_clients(self) -> int:
return len(self._ws_clients)
def _register_client(self, ws: websockets.WebSocketServerProtocol) -> None:
self._ws_clients.add(ws)
self._logger.info(
(f"{ws.remote_address} has connected to our browser websocket."
f" We now have {len(self._ws_clients)} active connection(s)."))
async def _unregister_client(self, ws: websockets.WebSocketServerProtocol) -> None:
try:
await ws.close()
except:
self._logger.exception(
"Exception while closing browser webocket connection.")
if ws in self._ws_clients:
self._ws_clients.remove(ws)
self._logger.info(
(f"{ws.remote_address} has disconnected from our browser websocket."
f" We now have {len(self._ws_clients)} active connection(s) remaining."))
async def _message_receive_loop(self, ws: websockets.WebSocketServerProtocol, uri: str) -> None:
"""
Loop of waiting for and processing inbound websocket messages, until the
connection dies. Each connection will create one of these coroutines.
"""
self._register_client(ws)
try:
async for message in ws:
self._logger.info(
f"Received inbound message from browser extension. Message: {message}")
await self._process_inbound_message(message)
except:
self._logger.exception(
"BrowserWebsocketServer encountered an exception while waiting for inbound messages.")
finally:
await self._unregister_client(ws)
if not self._ws_clients:
for handler in self._handlers:
try:
await handler.on_all_browsers_disconnected()
except:
self._logger.exception(
"Connection mananger received an exception from EventHandler!")
async def _process_inbound_message(self, message: str) -> None:
"""
Process one individual inbound websocket message.
"""
try:
parsed_event = json.loads(message)
except:
self._logger.exception(
f"Failed to parse browser websocket message as JSON. Message: {message}")
return
for handler in self._handlers:
try:
await handler.on_browser_event(parsed_event)
except:
self._logger.exception(
"Connection mananger received an exception from EventHandler!")
|
scihub_eva/globals/preferences.py
|
zlgenuine/scihub
| 859 |
56971
|
# -*- coding: utf-8 -*-
# System
SYSTEM_LANGUAGE_KEY = 'System/Language'
SYSTEM_THEME_KEY = 'System/Theme'
SYSTEM_THEME_DEFAULT = 'System'
# File
FILE_SAVE_TO_DIR_KEY = 'File/SaveToDir'
FILE_SAVE_TO_DIR_DEFAULT = ''
FILE_FILENAME_PREFIX_FORMAT_KEY = 'File/FilenamePrefixFormat'
FILE_FILENAME_PREFIX_FORMAT_DEFAULT = '{id}_{year}_{author}_{title}'
FILE_OVERWRITE_EXISTING_FILE_KEY = 'File/OverwriteExistingFile'
FILE_OVERWRITE_EXISTING_FILE_DEFAULT = False
# Network
NETWORK_SCIHUB_URL_KEY = 'Network/SciHubURL'
NETWORK_SCIHUB_URL_DEFAULT = 'https://sci-hub.se'
NETWORK_SCIHUB_URLS_KEY = 'Network/SciHubURLs'
NETWORK_SCIHUB_URLS_DEFAULT = ['https://sci-hub.se', 'https://sci-hub.st']
NETWORK_TIMEOUT_KEY = 'Network/Timeout'
NETWORK_TIMEOUT_DEFAULT = 3000
NETWORK_RETRY_TIMES_KEY = 'Network/RetryTimes'
NETWORK_RETRY_TIMES_DEFAULT = 3
NETWORK_PROXY_ENABLE_KEY = 'Network/ProxyEnable'
NETWORK_PROXY_ENABLE_DEFAULT = False
NETWORK_PROXY_TYPE_KEY = 'Network/ProxyType'
NETWORK_PROXY_TYPE_DEFAULT = 'http'
NETWORK_PROXY_HOST_KEY = 'Network/ProxyHost'
NETWORK_PROXY_HOST_DEFAULT = '127.0.0.1'
NETWORK_PROXY_PORT_KEY = 'Network/ProxyPort'
NETWORK_PROXY_PORT_DEFAULT = '7890'
NETWORK_PROXY_USERNAME_KEY = 'Network/ProxyUsername'
NETWORK_PROXY_USERNAME_DEFAULT = ''
NETWORK_PROXY_PASSWORD_KEY = 'Network/ProxyPassword'
NETWORK_PROXY_PASSWORD_DEFAULT = ''
|
test_haystack/test_app_with_hierarchy/__init__.py
|
nakarinh14/django-haystack
| 2,021 |
56998
|
<filename>test_haystack/test_app_with_hierarchy/__init__.py<gh_stars>1000+
"""Test app with multiple hierarchy levels above the actual models.py file"""
|
aicsimageio/tests/readers/test_default_reader.py
|
brisvag/aicsimageio
| 110 |
57001
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import Tuple
import numpy as np
import pytest
from aicsimageio import exceptions
from aicsimageio.readers.default_reader import DefaultReader
from ..conftest import get_resource_full_path, host
from ..image_container_test_utils import run_image_file_checks
@host
@pytest.mark.parametrize(
"filename, set_scene, expected_shape, expected_dims_order",
[
("example.bmp", "Image:0", (480, 640, 4), "YXS"),
("example.png", "Image:0", (800, 537, 4), "YXS"),
("example.jpg", "Image:0", (452, 400, 3), "YXS"),
("example.gif", "Image:0", (72, 268, 268, 4), "TYXS"),
(
"example_invalid_frame_count.mp4",
"Image:0",
(55, 1080, 1920, 3),
"TYXS",
),
(
"example_valid_frame_count.mp4",
"Image:0",
(72, 272, 272, 3),
"TYXS",
),
pytest.param(
"example.txt",
None,
None,
None,
marks=pytest.mark.raises(exception=exceptions.UnsupportedFileFormatError),
),
pytest.param(
"example.png",
"Image:1",
None,
None,
marks=pytest.mark.raises(exception=IndexError),
),
],
)
def test_default_reader(
filename: str,
host: str,
set_scene: str,
expected_shape: Tuple[int, ...],
expected_dims_order: str,
) -> None:
# Construct full filepath
uri = get_resource_full_path(filename, host)
# Run checks
run_image_file_checks(
ImageContainer=DefaultReader,
image=uri,
set_scene=set_scene,
expected_scenes=("Image:0",),
expected_current_scene="Image:0",
expected_shape=expected_shape,
expected_dtype=np.dtype(np.uint8),
expected_dims_order=expected_dims_order,
expected_channel_names=None,
expected_physical_pixel_sizes=(None, None, None),
expected_metadata_type=dict,
)
def test_ffmpeg_header_fail() -> None:
with pytest.raises(IOError):
# Big Buck Bunny
DefaultReader("https://archive.org/embed/archive-video-files/test.mp4")
|
remote_flow/metaflow/model.py
|
JSpenced/you-dont-need-a-bigger-boat
| 356 |
57021
|
<reponame>JSpenced/you-dont-need-a-bigger-boat
import json
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.models import model_from_json
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.python.client import device_lib
from sklearn.model_selection import train_test_split
from wandb.keras import WandbCallback
from utils import return_json_file_content
def session_indexed(s):
"""
Converts a session (of actions) to indices and adds start/end tokens
:param s: list of actions in a session (i.e 'add','detail', etc)
:return:
"""
# assign an integer to each possible action token
action_to_idx = {'start': 0, 'end': 1, 'add': 2, 'remove': 3, 'purchase': 4, 'detail': 5, 'view': 6}
return [action_to_idx['start']] + [action_to_idx[e] for e in s] + [action_to_idx['end']]
def train_lstm_model(x, y,
epochs=200,
patience=10,
lstm_dim=48,
batch_size=128,
lr=1e-3):
"""
Train an LSTM to predict purchase (1) or abandon (0)
:param x: session sequences
:param y: target labels
:param epochs: num training epochs
:param patience: early stopping patience
:param lstm_dim: lstm units
:param batch_size: batch size
:param lr: learning rate
:return:
"""
# Verfiy if GPU/CPU is being used
print("Print out system device...")
print(device_lib.list_local_devices())
print("Starting training now...")
X_train, X_test, y_train, y_test = train_test_split(x,y)
# pad sequences for training in batches
max_len = max(len(_) for _ in x)
X_train = pad_sequences(X_train, padding="post",value=7, maxlen=max_len)
X_test = pad_sequences(X_test, padding="post", value=7, maxlen=max_len)
# convert to one-hot
X_train = tf.one_hot(X_train, depth=7)
X_test = tf.one_hot(X_test, depth=7)
y_train = np.array(y_train)
y_test = np.array(y_test)
# Define Model
model = keras.Sequential()
model.add(keras.layers.InputLayer(input_shape=(None,7)))
# Masking layer ignores padded time-steps
model.add(keras.layers.Masking())
model.add(keras.layers.LSTM(lstm_dim))
model.add(keras.layers.Dense(1,activation='sigmoid'))
model.summary()
# Some Hyper Params
opt = keras.optimizers.Adam(learning_rate=lr)
loss = keras.losses.BinaryCrossentropy()
es = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=patience,
verbose=1,
restore_best_weights=True)
# Include wandb callback for tracking
callbacks = [es, WandbCallback()]
model.compile(optimizer=opt,
loss=loss,
metrics=['accuracy'])
# Train Model
model.fit(X_train, y_train,
validation_data=(X_test,y_test),
batch_size=batch_size,
epochs=epochs,
callbacks=callbacks)
# return trained model
# NB: to store model as Metaflow Artifact it needs to be pickle-able!
return model.to_json(), model.get_weights(), model
def make_predictions(predictor):
# load test data
test_inp = {'instances': tf.one_hot(np.array([[0, 1, 1, 3, 4, 5]]),
on_value=1,
off_value=0,
depth=7).numpy()}
# make predictions
preds = predictor.predict(test_inp)
return preds
|
applications/ORE/add_unknown_pseudo_labels.py
|
kylevedder/mvits_for_class_agnostic_od
| 114 |
57061
|
"""
The script expects the MViT (MDef-DETR or MDETR) detections in .txt format. For example, there should be,
One .txt file for each image and each line in the file represents a detection.
The format of a single detection should be "<label> <confidence> <x1> <y1> <x2> <y2>
Please see the 'mvit_detections' for reference.
"""
import os
import argparse
import xml.etree.ElementTree as ET
from fvcore.common.file_io import PathManager
import numpy as np
import time
import cv2
from nms import nms
TASK1_TRAIN_LIST = "t1_train.txt"
TASK2_TRAIN_LIST = "t2_train.txt"
TASK3_TRAIN_LIST = "t3_train.txt"
TASK4_TRAIN_LIST = "t4_train.txt"
def read_image_list(path):
with open(path, 'r') as f:
lines = f.read()
images = lines.split('\n')
return images[:-1]
TASK1_TRAIN_IMAGES = read_image_list(TASK1_TRAIN_LIST)
TASK2_TRAIN_IMAGES = read_image_list(TASK2_TRAIN_LIST)
TASK3_TRAIN_IMAGES = read_image_list(TASK3_TRAIN_LIST)
TASK4_TRAIN_IMAGES = read_image_list(TASK4_TRAIN_LIST)
TASK1_KNOWN_CLASSES = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat",
"chair", "cow", "diningtable", "dog", "horse", "motorbike", "person",
"pottedplant", "sheep", "sofa", "train", "tvmonitor", "airplane", "dining table", "motorcycle",
"potted plant", "couch", "tv"]
TASK2_KNOWN_CLASSES = TASK1_KNOWN_CLASSES + ["truck", "traffic light", "fire hydrant", "stop sign", "parking meter",
"bench", "elephant", "bear", "zebra", "giraffe",
"backpack", "umbrella", "handbag", "tie", "suitcase",
"microwave", "oven", "toaster", "sink", "refrigerator"]
TASK3_KNOWN_CLASSES = TASK2_KNOWN_CLASSES + ["frisbee", "skis", "snowboard", "sports ball", "kite",
"baseball bat", "baseball glove", "skateboard", "surfboard",
"tennis racket",
"banana", "apple", "sandwich", "orange", "broccoli",
"carrot", "hot dog", "pizza", "donut", "cake"]
TASK4_KNOWN_CLASSES = TASK3_KNOWN_CLASSES + ["bed", "toilet", "laptop", "mouse",
"remote", "keyboard", "cell phone", "book", "clock",
"vase", "scissors", "teddy bear", "hair drier", "toothbrush",
"wine glass", "cup", "fork", "knife", "spoon", "bowl"]
def parse_arguments():
"""
Parse the command line arguments
"""
ap = argparse.ArgumentParser()
ap.add_argument("-ann", "--annotations_dir_path", required=True,
help="Path to the directory containing the original annotations in pascal VOC format.")
ap.add_argument("-det", "--detections_dir_path", required=True,
help="Path to the directory containing the detections generated using class agnostic object "
"detector. One .txt file for each image where each line in the file represents a detection."
"The format of a single detection should be "
"<label> <confidence> <x1> <y1> <x2> <y2>")
ap.add_argument("-o", "--output_dir_path", required=True,
help="The output dir path to save the updated annotations.")
ap.add_argument("-det_conf", "--detection_confidence_threshold", required=False, type=float, default=0.5,
help="The confidence threshold to filter potential detections at first step. All detections with "
"confidence less than this threshold value will be ignored.")
ap.add_argument("-iou", "--iou_thresh_unk", required=False, type=float, default=0.5,
help="All detections, having an overlap greater than iou_thresh with any of the ground truths, "
"will be ignored.")
ap.add_argument("-nms", "--apply_nms", required=False, type=bool, default=False,
help="Flag to decide either to apply NMS on detections before assigning them unknown/gt or not.")
ap.add_argument("-iou_nms", "--iou_thresh_nms", required=False, type=float, default=0.2,
help="IOU threshold for NMS.")
args = vars(ap.parse_args())
return args
def parse_voc_gt_kn(path):
image_name = os.path.basename(path).split('.')[0]
if os.path.exists(path):
with PathManager.open(path) as f:
tree = ET.parse(f)
boxes = []
for obj in tree.findall("object"):
cls = obj.find("name").text
if image_name in TASK1_TRAIN_IMAGES:
if cls not in TASK1_KNOWN_CLASSES:
continue
elif image_name in TASK2_TRAIN_IMAGES:
if cls not in TASK2_KNOWN_CLASSES:
continue
elif image_name in TASK3_TRAIN_IMAGES:
if cls not in TASK3_KNOWN_CLASSES:
continue
elif image_name in TASK4_TRAIN_IMAGES:
if cls not in TASK4_KNOWN_CLASSES:
continue
else:
# Not a training image
return boxes, tree, False
bbox = obj.find("bndbox")
bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]]
# Original annotations are integers in the range [1, W or H]
# Assuming they mean 1-based pixel indices (inclusive),
# a box with annotation (xmin=1, xmax=W) covers the whole image.
# In coordinate space this is represented by (xmin=0, xmax=W)
bbox[0] -= 1.0
bbox[1] -= 1.0
boxes.append(bbox)
else:
# No annotation file found, create an empty xml node and return
image_name = f"{os.path.basename(path).split('.')[0]}.jpg"
image_path = f"{os.path.dirname(os.path.dirname(path))}/JPEGImages/{image_name}"
img = cv2.imread(image_path)
h, w, c = img.shape
node_root = ET.Element('annotation')
node_folder = ET.SubElement(node_root, 'folder')
node_folder.text = 'VOC2007'
node_filename = ET.SubElement(node_root, 'filename')
node_filename.text = image_name
node_size = ET.SubElement(node_root, 'size')
node_width = ET.SubElement(node_size, 'width')
node_width.text = str(int(w))
node_height = ET.SubElement(node_size, 'height')
node_height.text = str(int(h))
node_depth = ET.SubElement(node_size, 'depth')
node_depth.text = str(int(c))
tree = ET.ElementTree(node_root)
boxes = []
return boxes, tree, True
def parse_det_txt(path, conf_thresh=0.5):
if os.path.exists(path):
with open(path, "r") as f:
lines = f.readlines()
boxes = []
scores = []
for line in lines:
content = line.rstrip().split(' ')
bbox = content[2:]
# Only keep the boxes with score >= conf_thresh
det_conf = float(content[1])
if det_conf >= conf_thresh:
boxes.append([int(b) for b in bbox])
scores.append(det_conf)
return boxes, scores
else:
return [], []
def class_agnostic_nms(boxes, scores, iou=0.7):
# boxes = non_max_suppression_fast(np.array(boxes), iou)
boxes = nms(np.array(boxes), np.array(scores), iou)
return list(boxes)
def get_unk_det(gt, det, iou):
if not gt:
return det
gt = np.array(gt)
unk_det = []
for dl in det:
d = np.array(dl)
ixmin = np.maximum(gt[:, 0], d[0])
iymin = np.maximum(gt[:, 1], d[1])
ixmax = np.minimum(gt[:, 2], d[2])
iymax = np.minimum(gt[:, 3], d[3])
iw = np.maximum(ixmax - ixmin + 1.0, 0.0)
ih = np.maximum(iymax - iymin + 1.0, 0.0)
inters = iw * ih
uni = (
(d[2] - d[0] + 1.0) * (d[3] - d[1] + 1.0)
+ (gt[:, 2] - gt[:, 0] + 1.0) * (gt[:, 3] - gt[:, 1] + 1.0)
- inters
)
overlaps = inters / uni
ov_max = np.max(overlaps)
if ov_max < iou:
unk_det.append(dl)
return unk_det
def main(ann_dir, det_dir, out_dir, det_conf_thesh, iou_thresh, nms=False, iou_thresh_nms=0.7):
files = os.listdir(det_dir)
start = time.time()
for i, file_name in enumerate(files):
if i % 100 == 0:
print(f"On image no. {i}. Time: {time.time() - start}")
start = time.time()
ann_file_path = f"{ann_dir}/{file_name.split('.')[0]}.xml"
ref_det_file_path = f"{det_dir}/{file_name.split('.')[0]}.txt"
out_ann_file_path = f"{out_dir}/{file_name.split('.')[0]}.xml"
gt_boxes, ann_tree, train = parse_voc_gt_kn(ann_file_path) # Read the ground truth bounding boxes
# Only add the unknown detections if training image
if not train:
# Copy the original annotation file
ann_tree.write(out_ann_file_path, encoding='latin-1')
continue
det_boxes, scores = parse_det_txt(ref_det_file_path, conf_thresh=det_conf_thesh) # Read the detections
if nms:
det_boxes = class_agnostic_nms(det_boxes, scores, iou_thresh_nms) # Apply NMS if prompted to do so
det_unk = get_unk_det(gt_boxes, det_boxes, iou_thresh) # Get the potential unknown detections
# Create the updated annotation file
for det in det_unk:
object = ET.SubElement(ann_tree.getroot(), 'object')
name = ET.SubElement(object, "name")
name.text = "unknown"
pose = ET.SubElement(object, "pose")
pose.text = "Unspecified"
truncated = ET.SubElement(object, "truncated")
truncated.text = "2"
difficult = ET.SubElement(object, "difficult")
difficult.text = "0"
bndbox = ET.SubElement(object, "bndbox")
xmin = ET.SubElement(bndbox, "xmin")
xmin.text = str(int(det[0]))
ymin = ET.SubElement(bndbox, "ymin")
ymin.text = str(int(det[1]))
xmax = ET.SubElement(bndbox, "xmax")
xmax.text = str(int(det[2]))
ymax = ET.SubElement(bndbox, "ymax")
ymax.text = str(int(det[3]))
# Save the updated annotations
ann_tree.write(out_ann_file_path, encoding='latin-1')
if __name__ == "__main__":
args = parse_arguments()
annotations_dir = args["annotations_dir_path"]
detections_dir = args["detections_dir_path"]
output_dir = args["output_dir_path"]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
conf_threshold_det = args["detection_confidence_threshold"]
iou_threshold_unk = args["iou_thresh_unk"]
apply_nms = args["apply_nms"]
iou_threshold_nms = args["iou_thresh_nms"]
main(annotations_dir, detections_dir, output_dir, conf_threshold_det, iou_threshold_unk,
apply_nms, iou_threshold_nms)
|
src/genie/libs/parser/iosxe/tests/ShowLacpNeighborDetail/cli/equal/golden_output_expected.py
|
balmasea/genieparser
| 204 |
57100
|
expected_output = {
"interfaces": {
"Port-channel1": {
"name": "Port-channel1",
"protocol": "lacp",
"members": {
"GigabitEthernet0/0/1": {
"activity": "Active",
"age": 18,
"aggregatable": True,
"collecting": True,
"defaulted": False,
"distributing": True,
"expired": False,
"flags": "FA",
"interface": "GigabitEthernet0/0/1",
"lacp_port_priority": 100,
"oper_key": 1,
"port_num": 2,
"port_state": 63,
"synchronization": True,
"system_id": "00127,6487.88ff.68ef",
"timeout": "Short",
},
"GigabitEthernet0/0/7": {
"activity": "Active",
"age": 0,
"aggregatable": True,
"collecting": False,
"defaulted": False,
"distributing": False,
"expired": False,
"flags": "FA",
"interface": "GigabitEthernet0/0/7",
"lacp_port_priority": 200,
"oper_key": 1,
"port_num": 1,
"port_state": 15,
"synchronization": True,
"system_id": "00127,6487.88ff.68ef",
"timeout": "Short",
},
},
}
}
}
|
factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/cameras/tests/test_app.py
|
kaka-lin/azure-intelligent-edge-patterns
| 176 |
57134
|
"""App utilites tests.
"""
import sys
import pytest
from django.apps import apps
from ..models import Camera
pytestmark = pytest.mark.django_db
@pytest.mark.parametrize(
"testargs, output",
[
[["python", "manage.py", "runserver"], 4],
[["python", "manage.py", "makemigration"], 0],
[["python", "manage.py", "migrate"], 0],
[["python", "manage.py", "test"], 0],
[["pytest"], 0],
],
)
def test_app(monkeypatch, testargs, output):
"""test_create_demo_objects."""
monkeypatch.setattr(sys, "argv", testargs)
app_config = apps.get_app_config("locations")
app_config.ready()
app_config = apps.get_app_config("cameras")
app_config.ready()
assert Camera.objects.count() == output
@pytest.mark.parametrize(
"testenv, output",
[
["true", 4],
["True", 4],
["1", 4],
["false", 0],
["False", 0],
["0", 0],
["random_string", 4],
],
)
def test_app_not_create_demo(monkeypatch, testenv, output):
"""test_create_demo_objects."""
monkeypatch.setenv("CREATE_DEMO", testenv)
testargs = ["python", "manage.py", "runserver"]
monkeypatch.setattr(sys, "argv", testargs)
app_config = apps.get_app_config("locations")
app_config.ready()
app_config = apps.get_app_config("cameras")
app_config.ready()
assert Camera.objects.count() == output
|
tools/convert_cocomodel_for_init.py
|
MAYURGAIKWAD/meshrcnn
| 1,028 |
57237
|
<filename>tools/convert_cocomodel_for_init.py
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
r"""
Convert coco model for init. Remove class specific heads, optimizer and scheduler
so that this model can be used for pre-training
"""
import argparse
import torch
def main():
parser = argparse.ArgumentParser(description="Convert models for init")
parser.add_argument(
"--model-file", default="", dest="modelfile", metavar="FILE", help="path to model", type=str
)
parser.add_argument(
"--output-file",
default="",
dest="outputfile",
metavar="FILE",
help="path to model",
type=str,
)
args = parser.parse_args()
model = torch.load(args.modelfile)
# pop the optimizer
model.pop("optimizer")
# pop the scheduler
model.pop("scheduler")
# pop the iteration
model.pop("iteration")
# pop the class specific weights from the coco pretrained model
heads = [
"roi_heads.box_predictor.cls_score.weight",
"roi_heads.box_predictor.cls_score.bias",
"roi_heads.box_predictor.bbox_pred.weight",
"roi_heads.box_predictor.bbox_pred.bias",
"roi_heads.mask_head.predictor.weight",
"roi_heads.mask_head.predictor.bias",
]
for head in heads:
model["model"].pop(head)
torch.save(model, args.outputfile)
if __name__ == "__main__":
main()
|
crafters/image/CenterImageCropper/__init__.py
|
strawberrypie/jina-hub
| 106 |
57263
|
<reponame>strawberrypie/jina-hub<gh_stars>100-1000
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Tuple, Dict, Union
import numpy as np
from jina.executors.decorators import single
from jina.executors.crafters import BaseCrafter
from .helper import _crop_image, _move_channel_axis, _load_image
class CenterImageCropper(BaseCrafter):
"""
Crop the image with the center crop box.
The coordinate is the same coordinate-system in the
:py:mode:`PIL.Image`.
:param target_size: Desired output size. If size
is a sequence like (h, w), the output size will
be matched to this. If size is an int, the
output will have the same height and width as
the `target_size`.
:param channel_axis: Axis for channel
:param args: Additional positional arguments
:param kwargs: Additional keyword arguments
"""
def __init__(self,
target_size: Union[Tuple[int, int], int] = 224,
channel_axis: int = -1,
*args,
**kwargs):
"""Set Constructor."""
super().__init__(*args, **kwargs)
self.target_size = target_size
self.channel_axis = channel_axis
@single
def craft(self, blob: 'np.ndarray', *args, **kwargs) -> Dict:
"""
Crop the input image array.
:param blob: The ndarray of the image
:param args: Additional positional arguments
:param kwargs: Additional keyword arguments
:return: A dict with the cropped image
"""
raw_img = _load_image(blob, self.channel_axis)
_img, top, left = _crop_image(raw_img, self.target_size, how='center')
img = _move_channel_axis(np.asarray(_img), -1, self.channel_axis)
return dict(offset=0, blob=img.astype('float32'), location=(top, left))
|
notebook/requests_download.py
|
vhn0912/python-snippets
| 174 |
57272
|
import requests
import os
url_image = 'https://www.python.org/static/community_logos/python-logo.png'
r_image = requests.get(url_image)
print(r_image.headers['Content-Type'])
# image/png
filename_image = os.path.basename(url_image)
print(filename_image)
# python-logo.png
with open('data/temp/' + filename_image, 'wb') as f:
f.write(r_image.content)
url_zip = 'http://www.post.japanpost.jp/zipcode/dl/oogaki/zip/13tokyo.zip'
r_zip = requests.get(url_zip)
print(r_zip.headers['Content-Type'])
# application/zip
filename_zip = os.path.basename(url_zip)
print(filename_zip)
# 13tokyo.zip
with open('data/temp/' + filename_zip, 'wb') as f:
f.write(r_zip.content)
|
monitorrent/rest/settings_notify_on.py
|
DmitryRibalka/monitorrent
| 465 |
57297
|
import falcon
import six
from monitorrent.settings_manager import SettingsManager
# noinspection PyUnusedLocal
class SettingsNotifyOn(object):
def __init__(self, settings_manager):
"""
:type settings_manager: SettingsManager
"""
self.settings_manager = settings_manager
def on_get(self, req, resp):
resp.json = self.settings_manager.get_external_notifications_levels()
def on_put(self, req, resp):
if req.json is None or len(req.json) == 0:
raise falcon.HTTPBadRequest('BodyRequired', 'Expecting not empty JSON body')
if not isinstance(req.json, list) or any([not isinstance(i, six.text_type) for i in req.json]):
raise falcon.HTTPBadRequest('ArrayOfStringExpected', 'Expecting list of string values')
existing_levels = self.settings_manager.get_existing_external_notifications_levels()
unknown_levels = [l for l in req.json if l not in existing_levels]
if len(unknown_levels) > 0:
raise falcon.HTTPBadRequest('UnknownLevels', '{0} are unknow levels'.format(unknown_levels))
self.settings_manager.set_external_notifications_levels(req.json)
resp.status = falcon.HTTP_NO_CONTENT
|
python/spinn/models/fat_classifier.py
|
stanfordnlp/spinn
| 200 |
57337
|
<reponame>stanfordnlp/spinn<gh_stars>100-1000
"""From the project root directory (containing data files), this can be run with:
Boolean logic evaluation:
python -m spinn.models.fat_classifier --training_data_path ../bl-data/pbl_train.tsv \
--eval_data_path ../bl-data/pbl_dev.tsv
SST sentiment (Demo only, model needs a full GloVe embeddings file to do well):
python -m spinn.models.fat_classifier --data_type sst --training_data_path sst-data/train.txt \
--eval_data_path sst-data/dev.txt --embedding_data_path spinn/tests/test_embedding_matrix.5d.txt \
--model_dim 10 --word_embedding_dim 5
SNLI entailment (Demo only, model needs a full GloVe embeddings file to do well):
python -m spinn.models.fat_classifier --data_type snli --training_data_path snli_1.0/snli_1.0_dev.jsonl \
--eval_data_path snli_1.0/snli_1.0_dev.jsonl --embedding_data_path spinn/tests/test_embedding_matrix.5d.txt \
--model_dim 10 --word_embedding_dim 5
Note: If you get an error starting with "TypeError: ('Wrong number of dimensions..." during development,
there may already be a saved checkpoint in ckpt_path that matches the name of the model you're developing.
Move or delete it as appropriate.
"""
from functools import partial
import os
import pprint
import sys
import gflags
from theano import tensor as T
import theano
import numpy as np
from spinn import afs_safe_logger
from spinn import util
from spinn.data.boolean import load_boolean_data
from spinn.data.sst import load_sst_data
from spinn.data.snli import load_snli_data
import spinn.fat_stack
import spinn.plain_rnn
import spinn.cbow
FLAGS = gflags.FLAGS
def build_sentence_model(cls, vocab_size, seq_length, tokens, transitions,
num_classes, training_mode, ground_truth_transitions_visible, vs,
initial_embeddings=None, project_embeddings=False, ss_mask_gen=None, ss_prob=0.0):
"""
Construct a classifier which makes use of some hard-stack model.
Args:
cls: Hard stack class to use (from e.g. `spinn.fat_stack`)
vocab_size:
seq_length: Length of each sequence provided to the stack model
tokens: Theano batch (integer matrix), `batch_size * seq_length`
transitions: Theano batch (integer matrix), `batch_size * seq_length`
num_classes: Number of output classes
training_mode: A Theano scalar indicating whether to act as a training model
with dropout (1.0) or to act as an eval model with rescaling (0.0).
ground_truth_transitions_visible: A Theano scalar. If set (1.0), allow the model access
to ground truth transitions. This can be disabled at evaluation time to force Model 1
(or 2S) to evaluate in the Model 2 style with predicted transitions. Has no effect on Model 0.
vs: Variable store.
"""
# Prepare layer which performs stack element composition.
if cls is spinn.plain_rnn.RNN:
if FLAGS.use_gru:
compose_network = partial(util.GRULayer,
initializer=util.HeKaimingInitializer())
else:
compose_network = partial(util.LSTMLayer,
initializer=util.HeKaimingInitializer())
embedding_projection_network = None
elif cls is spinn.cbow.CBOW:
compose_network = None
embedding_projection_network = None
else:
if FLAGS.lstm_composition:
if FLAGS.use_gru:
compose_network = partial(util.TreeGRULayer,
initializer=util.HeKaimingInitializer())
else:
compose_network = partial(util.TreeLSTMLayer,
initializer=util.HeKaimingInitializer())
else:
assert not FLAGS.connect_tracking_comp, "Can only connect tracking and composition unit while using TreeLSTM"
compose_network = partial(util.ReLULayer,
initializer=util.HeKaimingInitializer())
if project_embeddings:
embedding_projection_network = util.Linear
else:
assert FLAGS.word_embedding_dim == FLAGS.model_dim, \
"word_embedding_dim must equal model_dim unless a projection layer is used."
embedding_projection_network = util.IdentityLayer
# Build hard stack which scans over input sequence.
sentence_model = cls(
FLAGS.model_dim, FLAGS.word_embedding_dim, vocab_size, seq_length,
compose_network, embedding_projection_network, training_mode, ground_truth_transitions_visible, vs,
predict_use_cell=FLAGS.predict_use_cell,
use_tracking_lstm=FLAGS.use_tracking_lstm,
tracking_lstm_hidden_dim=FLAGS.tracking_lstm_hidden_dim,
X=tokens,
transitions=transitions,
initial_embeddings=initial_embeddings,
embedding_dropout_keep_rate=FLAGS.embedding_keep_rate,
ss_mask_gen=ss_mask_gen,
ss_prob=ss_prob,
connect_tracking_comp=FLAGS.connect_tracking_comp,
context_sensitive_shift=FLAGS.context_sensitive_shift,
context_sensitive_use_relu=FLAGS.context_sensitive_use_relu,
use_input_batch_norm=False)
# Extract top element of final stack timestep.
if FLAGS.lstm_composition or cls is spinn.plain_rnn.RNN:
sentence_vector = sentence_model.final_representations[:,:FLAGS.model_dim / 2].reshape((-1, FLAGS.model_dim / 2))
sentence_vector_dim = FLAGS.model_dim / 2
else:
sentence_vector = sentence_model.final_representations.reshape((-1, FLAGS.model_dim))
sentence_vector_dim = FLAGS.model_dim
sentence_vector = util.BatchNorm(sentence_vector, sentence_vector_dim, vs, "sentence_vector", training_mode)
sentence_vector = util.Dropout(sentence_vector, FLAGS.semantic_classifier_keep_rate, training_mode)
# Feed forward through a single output layer
logits = util.Linear(
sentence_vector, sentence_vector_dim, num_classes, vs,
name="semantic_classifier", use_bias=True)
return sentence_model.transitions_pred, logits
def build_sentence_pair_model(cls, vocab_size, seq_length, tokens, transitions,
num_classes, training_mode, ground_truth_transitions_visible, vs,
initial_embeddings=None, project_embeddings=False, ss_mask_gen=None, ss_prob=0.0):
"""
Construct a classifier which makes use of some hard-stack model.
Args:
cls: Hard stack class to use (from e.g. `spinn.fat_stack`)
vocab_size:
seq_length: Length of each sequence provided to the stack model
tokens: Theano batch (integer matrix), `batch_size * seq_length`
transitions: Theano batch (integer matrix), `batch_size * seq_length`
num_classes: Number of output classes
training_mode: A Theano scalar indicating whether to act as a training model
with dropout (1.0) or to act as an eval model with rescaling (0.0).
ground_truth_transitions_visible: A Theano scalar. If set (1.0), allow the model access
to ground truth transitions. This can be disabled at evaluation time to force Model 1
(or 2S) to evaluate in the Model 2 style with predicted transitions. Has no effect on Model 0.
vs: Variable store.
"""
# Prepare layer which performs stack element composition.
if cls is spinn.plain_rnn.RNN:
if FLAGS.use_gru:
compose_network = partial(util.GRULayer,
initializer=util.HeKaimingInitializer())
else:
compose_network = partial(util.LSTMLayer,
initializer=util.HeKaimingInitializer())
embedding_projection_network = None
elif cls is spinn.cbow.CBOW:
compose_network = None
embedding_projection_network = None
else:
if FLAGS.lstm_composition:
if FLAGS.use_gru:
compose_network = partial(util.TreeGRULayer,
initializer=util.HeKaimingInitializer())
else:
compose_network = partial(util.TreeLSTMLayer,
initializer=util.HeKaimingInitializer())
else:
assert not FLAGS.connect_tracking_comp, "Can only connect tracking and composition unit while using TreeLSTM"
compose_network = partial(util.ReLULayer,
initializer=util.HeKaimingInitializer())
if project_embeddings:
embedding_projection_network = util.Linear
else:
assert FLAGS.word_embedding_dim == FLAGS.model_dim, \
"word_embedding_dim must equal model_dim unless a projection layer is used."
embedding_projection_network = util.IdentityLayer
# Split the two sentences
premise_tokens = tokens[:, :, 0]
hypothesis_tokens = tokens[:, :, 1]
premise_transitions = transitions[:, :, 0]
hypothesis_transitions = transitions[:, :, 1]
# Build two hard stack models which scan over input sequences.
premise_model = cls(
FLAGS.model_dim, FLAGS.word_embedding_dim, vocab_size, seq_length,
compose_network, embedding_projection_network, training_mode, ground_truth_transitions_visible, vs,
predict_use_cell=FLAGS.predict_use_cell,
use_tracking_lstm=FLAGS.use_tracking_lstm,
tracking_lstm_hidden_dim=FLAGS.tracking_lstm_hidden_dim,
X=premise_tokens,
transitions=premise_transitions,
initial_embeddings=initial_embeddings,
embedding_dropout_keep_rate=FLAGS.embedding_keep_rate,
ss_mask_gen=ss_mask_gen,
ss_prob=ss_prob,
connect_tracking_comp=FLAGS.connect_tracking_comp,
context_sensitive_shift=FLAGS.context_sensitive_shift,
context_sensitive_use_relu=FLAGS.context_sensitive_use_relu,
use_attention=FLAGS.use_attention,
initialize_hyp_tracking_state=FLAGS.initialize_hyp_tracking_state)
premise_stack_tops = premise_model.stack_tops if FLAGS.use_attention != "None" else None
premise_tracking_c_state_final = premise_model.tracking_c_state_final if cls not in [spinn.plain_rnn.RNN,
spinn.cbow.CBOW] else None
hypothesis_model = cls(
FLAGS.model_dim, FLAGS.word_embedding_dim, vocab_size, seq_length,
compose_network, embedding_projection_network, training_mode, ground_truth_transitions_visible, vs,
predict_use_cell=FLAGS.predict_use_cell,
use_tracking_lstm=FLAGS.use_tracking_lstm,
tracking_lstm_hidden_dim=FLAGS.tracking_lstm_hidden_dim,
X=hypothesis_tokens,
transitions=hypothesis_transitions,
initial_embeddings=initial_embeddings,
embedding_dropout_keep_rate=FLAGS.embedding_keep_rate,
ss_mask_gen=ss_mask_gen,
ss_prob=ss_prob,
connect_tracking_comp=FLAGS.connect_tracking_comp,
context_sensitive_shift=FLAGS.context_sensitive_shift,
context_sensitive_use_relu=FLAGS.context_sensitive_use_relu,
use_attention=FLAGS.use_attention,
premise_stack_tops=premise_stack_tops,
is_hypothesis=True,
initialize_hyp_tracking_state=FLAGS.initialize_hyp_tracking_state,
premise_tracking_c_state_final=premise_tracking_c_state_final)
# Extract top element of final stack timestep.
if FLAGS.use_attention == "None" or FLAGS.use_difference_feature or FLAGS.use_product_feature:
premise_vector = premise_model.final_representations
hypothesis_vector = hypothesis_model.final_representations
if (FLAGS.lstm_composition and cls is not spinn.cbow.CBOW) or cls is spinn.plain_rnn.RNN:
premise_vector = premise_vector[:,:FLAGS.model_dim / 2].reshape((-1, FLAGS.model_dim / 2))
hypothesis_vector = hypothesis_vector[:,:FLAGS.model_dim / 2].reshape((-1, FLAGS.model_dim / 2))
sentence_vector_dim = FLAGS.model_dim / 2
else:
premise_vector = premise_vector.reshape((-1, FLAGS.model_dim))
hypothesis_vector = hypothesis_vector.reshape((-1, FLAGS.model_dim))
sentence_vector_dim = FLAGS.model_dim
if FLAGS.use_attention != "None":
# Use the attention weighted representation
h_dim = FLAGS.model_dim / 2
mlp_input = hypothesis_model.final_weighed_representation.reshape((-1, h_dim))
mlp_input_dim = h_dim
else:
# Create standard MLP features
mlp_input = T.concatenate([premise_vector, hypothesis_vector], axis=1)
mlp_input_dim = 2 * sentence_vector_dim
if FLAGS.use_difference_feature:
mlp_input = T.concatenate([mlp_input, premise_vector - hypothesis_vector], axis=1)
mlp_input_dim += sentence_vector_dim
if FLAGS.use_product_feature:
mlp_input = T.concatenate([mlp_input, premise_vector * hypothesis_vector], axis=1)
mlp_input_dim += sentence_vector_dim
mlp_input = util.BatchNorm(mlp_input, mlp_input_dim, vs, "sentence_vectors", training_mode)
mlp_input = util.Dropout(mlp_input, FLAGS.semantic_classifier_keep_rate, training_mode)
if FLAGS.classifier_type == "ResNet":
features = util.Linear(
mlp_input, mlp_input_dim, FLAGS.sentence_pair_combination_layer_dim, vs,
name="resnet/linear", use_bias=True)
features_dim = FLAGS.sentence_pair_combination_layer_dim
for layer in range(FLAGS.num_sentence_pair_combination_layers):
features = util.HeKaimingResidualLayerSet(features, features_dim, vs, training_mode, name="resnet/" + str(layer),
dropout_keep_rate=FLAGS.semantic_classifier_keep_rate, depth=FLAGS.resnet_unit_depth,
initializer=util.HeKaimingInitializer())
features = util.BatchNorm(features, features_dim, vs, "combining_mlp/" + str(layer), training_mode)
features = util.Dropout(features, FLAGS.semantic_classifier_keep_rate, training_mode)
elif FLAGS.classifier_type == "Highway":
features = util.Linear(
mlp_input, mlp_input_dim, FLAGS.sentence_pair_combination_layer_dim, vs,
name="resnet/linear", use_bias=True)
features_dim = FLAGS.sentence_pair_combination_layer_dim
for layer in range(FLAGS.num_sentence_pair_combination_layers):
features = util.HighwayLayer(features, features_dim, vs, training_mode, name="highway/" + str(layer),
dropout_keep_rate=FLAGS.semantic_classifier_keep_rate,
initializer=util.HeKaimingInitializer())
features = util.BatchNorm(features, features_dim, vs, "combining_mlp/" + str(layer), training_mode)
features = util.Dropout(features, FLAGS.semantic_classifier_keep_rate, training_mode)
else:
# Apply a combining MLP
features = mlp_input
features_dim = mlp_input_dim
for layer in range(FLAGS.num_sentence_pair_combination_layers):
features = util.ReLULayer(features, features_dim, FLAGS.sentence_pair_combination_layer_dim, vs,
name="combining_mlp/" + str(layer),
initializer=util.HeKaimingInitializer())
features_dim = FLAGS.sentence_pair_combination_layer_dim
features = util.BatchNorm(features, features_dim, vs, "combining_mlp/" + str(layer), training_mode)
features = util.Dropout(features, FLAGS.semantic_classifier_keep_rate, training_mode)
# Feed forward through a single output layer
logits = util.Linear(
features, features_dim, num_classes, vs,
name="semantic_classifier", use_bias=True)
return premise_model.transitions_pred, hypothesis_model.transitions_pred, logits
def build_cost(logits, targets):
"""
Build a classification cost function.
"""
# Clip gradients coming from the cost function.
logits = theano.gradient.grad_clip(
logits, -1. * FLAGS.clipping_max_value, FLAGS.clipping_max_value)
predicted_dist = T.nnet.softmax(logits)
costs = T.nnet.categorical_crossentropy(predicted_dist, targets)
cost = costs.mean()
pred = T.argmax(logits, axis=1)
acc = 1. - T.mean(T.cast(T.neq(pred, targets), theano.config.floatX))
return cost, acc
def build_transition_cost(logits, targets, num_transitions):
"""
Build a parse action prediction cost function.
"""
# swap seq_length dimension to front so that we can scan per timestep
logits = T.swapaxes(logits, 0, 1)
targets = targets.T
def cost_t(logits, tgt, num_transitions):
# TODO(jongauthier): Taper down xent cost as we proceed through
# sequence?
predicted_dist = T.nnet.softmax(logits)
cost = T.nnet.categorical_crossentropy(predicted_dist, tgt)
pred = T.argmax(logits, axis=1)
error = T.neq(pred, tgt)
return cost, error
results, _ = theano.scan(cost_t, [logits, targets], non_sequences=[num_transitions])
costs, errors = results
# Create a mask that selects only transitions that involve real data.
unrolling_length = T.shape(costs)[0]
padding = unrolling_length - num_transitions
padding = T.reshape(padding, (1, -1))
rng = T.arange(unrolling_length) + 1
rng = T.reshape(rng, (-1, 1))
mask = T.gt(rng, padding)
# Compute acc using the mask
acc = 1.0 - (T.sum(errors * mask, dtype=theano.config.floatX)
/ T.sum(num_transitions, dtype=theano.config.floatX))
# Compute cost directly, since we *do* want a cost incentive to get the padding
# transitions right.
cost = T.mean(costs)
return cost, acc
def evaluate(eval_fn, eval_set, logger, step):
# Evaluate
acc_accum = 0.0
action_acc_accum = 0.0
eval_batches = 0.0
for (eval_X_batch, eval_transitions_batch, eval_y_batch, eval_num_transitions_batch) in eval_set[1]:
acc_value, action_acc_value = eval_fn(
eval_X_batch, eval_transitions_batch,
eval_y_batch, eval_num_transitions_batch, 0.0, # Eval mode: Don't apply dropout.
int(FLAGS.allow_gt_transitions_in_eval), # Allow GT transitions to be used according to flag.
float(FLAGS.allow_gt_transitions_in_eval)) # If flag not set, used scheduled sampling
# p(ground truth) = 0.0,
# else SS p(ground truth) = 1.0
acc_accum += acc_value
action_acc_accum += action_acc_value
eval_batches += 1.0
logger.Log("Step: %i\tEval acc: %f\t %f\t%s" %
(step, acc_accum / eval_batches, action_acc_accum / eval_batches, eval_set[0]))
return acc_accum / eval_batches
def evaluate_expanded(eval_fn, eval_set, eval_path, logger, step, sentence_pair_data, ind_to_word, predict_transitions):
"""
Write the gold parses and predicted parses in the files <eval_out_path>.gld and <eval_out_path>.tst
respectively. These files can be given as inputs to Evalb to evaluate parsing performance -
evalb -p evalb_spinn.prm <eval_out_path>.gld <eval_out_path>.tst
TODO(SB): Set up for RNN and Model0 on non-sentence-pair data; port support to classifier.py.
"""
# TODO: Prune out redundant code, make usable on Model0 as well.
acc_accum = 0.0
action_acc_accum = 0.0
eval_batches = 0.0
eval_gold_path = eval_path + ".gld"
eval_out_path = eval_path + ".tst"
eval_lbl_path = eval_path + ".lbl"
with open(eval_gold_path, "w") as eval_gold, open(eval_out_path, "w") as eval_out:
if FLAGS.write_predicted_label:
label_out = open(eval_lbl_path, "w")
if sentence_pair_data:
for (eval_X_batch, eval_transitions_batch, eval_y_batch,
eval_num_transitions_batch) in eval_set[1]:
acc_value, action_acc_value, sem_logit_values, logits_pred_hyp, logits_pred_prem = eval_fn(
eval_X_batch, eval_transitions_batch, eval_y_batch, eval_num_transitions_batch,
0.0, # Eval mode: Don't apply dropout.
int(FLAGS.allow_gt_transitions_in_eval), # Allow GT transitions to be used according to flag.
float(FLAGS.allow_gt_transitions_in_eval)) # adjust visibility of GT
acc_accum += acc_value
action_acc_accum += action_acc_value
eval_batches += 1.0
# write each predicted transition to file
for orig_transitions, pred_logit_hyp, pred_logit_prem, tokens, true_class, example_sem_logits \
in zip(eval_transitions_batch, logits_pred_hyp,
logits_pred_prem, eval_X_batch, eval_y_batch, sem_logit_values):
if predict_transitions:
orig_hyp_transitions, orig_prem_transitions = orig_transitions.T
pred_hyp_transitions = pred_logit_hyp.argmax(axis=1)
pred_prem_transitions = pred_logit_prem.argmax(axis=1)
else:
orig_hyp_transitions = orig_prem_transitions = pred_hyp_transitions = pred_prem_transitions = None
hyp_tokens, prem_tokens = tokens.T
hyp_words = [ind_to_word[t] for t in hyp_tokens]
prem_words = [ind_to_word[t] for t in prem_tokens]
eval_gold.write(util.TransitionsToParse(orig_hyp_transitions, hyp_words) + "\n")
eval_out.write(util.TransitionsToParse(pred_hyp_transitions, hyp_words) + "\n")
eval_gold.write(util.TransitionsToParse(orig_prem_transitions, prem_words) + "\n")
eval_out.write(util.TransitionsToParse(pred_prem_transitions, prem_words) + "\n")
predicted_class = np.argmax(example_sem_logits)
exp_logit_values = np.exp(example_sem_logits)
class_probs = exp_logit_values / np.sum(exp_logit_values)
class_probs_repr = "\t".join(map(lambda p : "%.8f" % (p,), class_probs))
if FLAGS.write_predicted_label:
label_out.write(str(true_class == predicted_class) + "\t" + str(true_class)
+ "\t" + str(predicted_class) + "\t" + class_probs_repr + "\n")
else:
for (eval_X_batch, eval_transitions_batch, eval_y_batch,
eval_num_transitions_batch) in eval_set[1]:
acc_value, action_acc_value, sem_logit_values, logits_pred = eval_fn(
eval_X_batch, eval_transitions_batch, eval_y_batch, eval_num_transitions_batch,
0.0, # Eval mode: Don't apply dropout.
int(FLAGS.allow_gt_transitions_in_eval), # Allow GT transitions to be used according to flag.
float(FLAGS.allow_gt_transitions_in_eval)) # adjust visibility of GT
acc_accum += acc_value
action_acc_accum += action_acc_value
eval_batches += 1.0
# write each predicted transition to file
for orig_transitions, pred_logit, tokens, true_class, example_sem_logits \
in zip(eval_transitions_batch, logits_pred, eval_X_batch, eval_y_batch, sem_logit_values):
words = [ind_to_word[t] for t in tokens]
eval_gold.write(util.TransitionsToParse(orig_transitions, words) + "\n")
eval_out.write(util.TransitionsToParse(pred_logit.argmax(axis=1), words) + "\n")
predicted_class = np.argmax(example_sem_logits)
exp_logit_values = np.exp(example_sem_logits)
class_probs = exp_logit_values / np.sum(exp_logit_values)
class_probs_repr = "\t".join(map(lambda p : "%.3f" % (p,), class_probs))
if FLAGS.write_predicted_label:
label_out.write(str(true_class == predicted_class) + "\t" + str(true_class)
+ "\t" + str(predicted_class) + "\t" + class_probs_repr + "\n")
logger.Log("Written gold parses in %s" % (eval_gold_path))
logger.Log("Written predicted parses in %s" % (eval_out_path))
if FLAGS.write_predicted_label:
logger.Log("Written predicted labels in %s" % (eval_lbl_path))
label_out.close()
logger.Log("Step: %i\tEval acc: %f\t %f\t%s" %
(step, acc_accum / eval_batches, action_acc_accum / eval_batches, eval_set[0]))
def run(only_forward=False):
logger = afs_safe_logger.Logger(os.path.join(FLAGS.log_path, FLAGS.experiment_name) + ".log")
if FLAGS.data_type == "bl":
data_manager = load_boolean_data
elif FLAGS.data_type == "sst":
data_manager = load_sst_data
elif FLAGS.data_type == "snli":
data_manager = load_snli_data
else:
logger.Log("Bad data type.")
return
pp = pprint.PrettyPrinter(indent=4)
logger.Log("Flag values:\n" + pp.pformat(FLAGS.FlagValuesDict()))
# Load the data.
raw_training_data, vocabulary = data_manager.load_data(
FLAGS.training_data_path)
# Load the eval data.
raw_eval_sets = []
if FLAGS.eval_data_path:
for eval_filename in FLAGS.eval_data_path.split(":"):
eval_data, _ = data_manager.load_data(eval_filename)
raw_eval_sets.append((eval_filename, eval_data))
# Prepare the vocabulary.
if not vocabulary:
logger.Log("In open vocabulary mode. Using loaded embeddings without fine-tuning.")
train_embeddings = False
vocabulary = util.BuildVocabulary(
raw_training_data, raw_eval_sets, FLAGS.embedding_data_path, logger=logger,
sentence_pair_data=data_manager.SENTENCE_PAIR_DATA)
else:
logger.Log("In fixed vocabulary mode. Training embeddings.")
train_embeddings = True
# Load pretrained embeddings.
if FLAGS.embedding_data_path:
logger.Log("Loading vocabulary with " + str(len(vocabulary))
+ " words from " + FLAGS.embedding_data_path)
initial_embeddings = util.LoadEmbeddingsFromASCII(
vocabulary, FLAGS.word_embedding_dim, FLAGS.embedding_data_path)
else:
initial_embeddings = None
# Trim dataset, convert token sequences to integer sequences, crop, and
# pad.
logger.Log("Preprocessing training data.")
training_data = util.PreprocessDataset(
raw_training_data, vocabulary, FLAGS.seq_length, data_manager, eval_mode=False, logger=logger,
sentence_pair_data=data_manager.SENTENCE_PAIR_DATA,
for_rnn=FLAGS.model_type == "RNN" or FLAGS.model_type == "CBOW")
training_data_iter = util.MakeTrainingIterator(
training_data, FLAGS.batch_size)
eval_iterators = []
for filename, raw_eval_set in raw_eval_sets:
logger.Log("Preprocessing eval data: " + filename)
e_X, e_transitions, e_y, e_num_transitions = util.PreprocessDataset(
raw_eval_set, vocabulary, FLAGS.seq_length, data_manager, eval_mode=True, logger=logger,
sentence_pair_data=data_manager.SENTENCE_PAIR_DATA,
for_rnn=FLAGS.model_type == "RNN" or FLAGS.model_type == "CBOW")
eval_iterators.append((filename,
util.MakeEvalIterator((e_X, e_transitions, e_y, e_num_transitions), FLAGS.batch_size)))
# Set up the placeholders.
y = T.vector("y", dtype="int32")
lr = T.scalar("lr")
training_mode = T.scalar("training_mode") # 1: Training with dropout, 0: Eval
ground_truth_transitions_visible = T.scalar("ground_truth_transitions_visible", dtype="int32")
logger.Log("Building model.")
vs = util.VariableStore(
default_initializer=util.UniformInitializer(FLAGS.init_range), logger=logger)
if FLAGS.model_type == "CBOW":
model_cls = spinn.cbow.CBOW
elif FLAGS.model_type == "RNN":
model_cls = spinn.plain_rnn.RNN
else:
model_cls = getattr(spinn.fat_stack, FLAGS.model_type)
# Generator of mask for scheduled sampling
numpy_random = np.random.RandomState(1234)
ss_mask_gen = T.shared_randomstreams.RandomStreams(numpy_random.randint(999999))
# Training step number
ss_prob = T.scalar("ss_prob")
if data_manager.SENTENCE_PAIR_DATA:
X = T.itensor3("X")
transitions = T.itensor3("transitions")
num_transitions = T.imatrix("num_transitions")
predicted_premise_transitions, predicted_hypothesis_transitions, logits = build_sentence_pair_model(
model_cls, len(vocabulary), FLAGS.seq_length,
X, transitions, len(data_manager.LABEL_MAP), training_mode, ground_truth_transitions_visible, vs,
initial_embeddings=initial_embeddings, project_embeddings=(not train_embeddings),
ss_mask_gen=ss_mask_gen,
ss_prob=ss_prob)
else:
X = T.matrix("X", dtype="int32")
transitions = T.imatrix("transitions")
num_transitions = T.vector("num_transitions", dtype="int32")
predicted_transitions, logits = build_sentence_model(
model_cls, len(vocabulary), FLAGS.seq_length,
X, transitions, len(data_manager.LABEL_MAP), training_mode, ground_truth_transitions_visible, vs,
initial_embeddings=initial_embeddings, project_embeddings=(not train_embeddings),
ss_mask_gen=ss_mask_gen,
ss_prob=ss_prob)
xent_cost, acc = build_cost(logits, y)
# Set up L2 regularization.
l2_cost = 0.0
for var in vs.trainable_vars:
l2_cost += FLAGS.l2_lambda * T.sum(T.sqr(vs.vars[var]))
# Compute cross-entropy cost on action predictions.
if (not data_manager.SENTENCE_PAIR_DATA) and FLAGS.model_type not in ["Model0", "RNN", "CBOW"]:
transition_cost, action_acc = build_transition_cost(predicted_transitions, transitions, num_transitions)
elif data_manager.SENTENCE_PAIR_DATA and FLAGS.model_type not in ["Model0", "RNN", "CBOW"]:
p_transition_cost, p_action_acc = build_transition_cost(predicted_premise_transitions, transitions[:, :, 0],
num_transitions[:, 0])
h_transition_cost, h_action_acc = build_transition_cost(predicted_hypothesis_transitions, transitions[:, :, 1],
num_transitions[:, 1])
transition_cost = p_transition_cost + h_transition_cost
action_acc = (p_action_acc + h_action_acc) / 2.0 # TODO(SB): Average over transitions, not words.
else:
transition_cost = T.constant(0.0)
action_acc = T.constant(0.0)
transition_cost = transition_cost * FLAGS.transition_cost_scale
total_cost = xent_cost + l2_cost + transition_cost
if ".ckpt" in FLAGS.ckpt_path:
checkpoint_path = FLAGS.ckpt_path
else:
checkpoint_path = os.path.join(FLAGS.ckpt_path, FLAGS.experiment_name + ".ckpt")
if os.path.isfile(checkpoint_path):
logger.Log("Found checkpoint, restoring.")
step, best_dev_error = vs.load_checkpoint(checkpoint_path, num_extra_vars=2,
skip_saved_unsavables=FLAGS.skip_saved_unsavables)
else:
assert not only_forward, "Can't run an eval-only run without a checkpoint. Supply a checkpoint."
step = 0
best_dev_error = 1.0
# Do an evaluation-only run.
if only_forward:
if FLAGS.eval_output_paths:
eval_output_paths = FLAGS.eval_output_paths.strip().split(":")
assert len(eval_output_paths) == len(eval_iterators), "Invalid no. of output paths."
else:
eval_output_paths = [FLAGS.experiment_name + "-" + os.path.split(eval_set[0])[1] + "-parse"
for eval_set in eval_iterators]
# Load model from checkpoint.
logger.Log("Checkpointed model was trained for %d steps." % (step,))
# Generate function for forward pass.
logger.Log("Building forward pass.")
if data_manager.SENTENCE_PAIR_DATA:
eval_fn = theano.function(
[X, transitions, y, num_transitions, training_mode, ground_truth_transitions_visible, ss_prob],
[acc, action_acc, logits, predicted_hypothesis_transitions, predicted_premise_transitions],
on_unused_input='ignore',
allow_input_downcast=True)
else:
eval_fn = theano.function(
[X, transitions, y, num_transitions, training_mode, ground_truth_transitions_visible, ss_prob],
[acc, action_acc, logits, predicted_transitions],
on_unused_input='ignore',
allow_input_downcast=True)
# Generate the inverse vocabulary lookup table.
ind_to_word = {v : k for k, v in vocabulary.iteritems()}
# Do a forward pass and write the output to disk.
for eval_set, eval_out_path in zip(eval_iterators, eval_output_paths):
logger.Log("Writing eval output for %s." % (eval_set[0],))
evaluate_expanded(eval_fn, eval_set, eval_out_path, logger, step,
data_manager.SENTENCE_PAIR_DATA, ind_to_word, FLAGS.model_type not in ["Model0", "RNN", "CBOW"])
else:
# Train
new_values = util.RMSprop(total_cost, vs.trainable_vars.values(), lr)
new_values += [(key, vs.nongradient_updates[key]) for key in vs.nongradient_updates]
# Training open-vocabulary embeddings is a questionable idea right now. Disabled:
# new_values.append(
# util.embedding_SGD(total_cost, embedding_params, embedding_lr))
# Create training and eval functions.
# Unused variable warnings are supressed so that num_transitions can be passed in when training Model 0,
# which ignores it. This yields more readable code that is very slightly slower.
logger.Log("Building update function.")
update_fn = theano.function(
[X, transitions, y, num_transitions, lr, training_mode, ground_truth_transitions_visible, ss_prob],
[total_cost, xent_cost, transition_cost, action_acc, l2_cost, acc],
updates=new_values,
on_unused_input='ignore',
allow_input_downcast=True)
logger.Log("Building eval function.")
eval_fn = theano.function([X, transitions, y, num_transitions, training_mode, ground_truth_transitions_visible, ss_prob], [acc, action_acc],
on_unused_input='ignore',
allow_input_downcast=True)
logger.Log("Training.")
# Main training loop.
for step in range(step, FLAGS.training_steps):
if step % FLAGS.eval_interval_steps == 0:
for index, eval_set in enumerate(eval_iterators):
acc = evaluate(eval_fn, eval_set, logger, step)
if FLAGS.ckpt_on_best_dev_error and index == 0 and (1 - acc) < 0.99 * best_dev_error and step > 1000:
best_dev_error = 1 - acc
logger.Log("Checkpointing with new best dev accuracy of %f" % acc)
vs.save_checkpoint(checkpoint_path + "_best", extra_vars=[step, best_dev_error])
X_batch, transitions_batch, y_batch, num_transitions_batch = training_data_iter.next()
learning_rate = FLAGS.learning_rate * (FLAGS.learning_rate_decay_per_10k_steps ** (step / 10000.0))
ret = update_fn(X_batch, transitions_batch, y_batch, num_transitions_batch,
learning_rate, 1.0, 1.0, np.exp(step*np.log(FLAGS.scheduled_sampling_exponent_base)))
total_cost_val, xent_cost_val, transition_cost_val, action_acc_val, l2_cost_val, acc_val = ret
if step % FLAGS.statistics_interval_steps == 0:
logger.Log(
"Step: %i\tAcc: %f\t%f\tCost: %5f %5f %5f %5f"
% (step, acc_val, action_acc_val, total_cost_val, xent_cost_val, transition_cost_val,
l2_cost_val))
if step % FLAGS.ckpt_interval_steps == 0 and step > 0:
vs.save_checkpoint(checkpoint_path, extra_vars=[step, best_dev_error])
if __name__ == '__main__':
# Experiment naming.
gflags.DEFINE_string("experiment_name", "experiment", "")
# Data types.
gflags.DEFINE_enum("data_type", "bl", ["bl", "sst", "snli"],
"Which data handler and classifier to use.")
# Where to store checkpoints
gflags.DEFINE_string("ckpt_path", ".", "Where to save/load checkpoints. Can be either "
"a filename or a directory. In the latter case, the experiment name serves as the "
"base for the filename.")
gflags.DEFINE_string("log_path", ".", "A directory in which to write logs.")
# Data settings.
gflags.DEFINE_string("training_data_path", None, "")
gflags.DEFINE_string("eval_data_path", None, "Can contain multiple file paths, separated "
"using ':' tokens. The first file should be the dev set, and is used for determining "
"when to save the early stopping 'best' checkpoints.")
gflags.DEFINE_integer("seq_length", 30, "")
gflags.DEFINE_integer("eval_seq_length", 30, "")
gflags.DEFINE_string("embedding_data_path", None,
"If set, load GloVe-formatted embeddings from here.")
# Model architecture settings.
gflags.DEFINE_enum("model_type", "Model0",
["CBOW", "RNN", "Model0", "Model1", "Model2", "Model2S"],
"")
gflags.DEFINE_boolean("allow_gt_transitions_in_eval", False,
"Whether to use ground truth transitions in evaluation when appropriate "
"(i.e., in Model 1 and Model 2S.)")
gflags.DEFINE_integer("model_dim", 8, "")
gflags.DEFINE_integer("word_embedding_dim", 8, "")
gflags.DEFINE_integer("tracking_lstm_hidden_dim", 4, "")
gflags.DEFINE_boolean("use_tracking_lstm", True,
"Whether to use LSTM in the tracking unit")
gflags.DEFINE_boolean("predict_use_cell", False,
"For models which predict parser actions, use "
"both the tracking LSTM hidden and cell values as "
"input to the prediction layer")
gflags.DEFINE_enum("use_attention", "None",
["None", "Rocktaschel", "WangJiang", "Thang", "TreeWangJiang", "TreeThang"],
"")
gflags.DEFINE_boolean("context_sensitive_shift", False,
"Use LSTM hidden state and word embedding to determine the vector to be pushed")
gflags.DEFINE_boolean("context_sensitive_use_relu", False,
"Use ReLU Layer to combine embedding and tracking unit hidden state")
gflags.DEFINE_float("semantic_classifier_keep_rate", 0.5,
"Used for dropout in the semantic task classifier.")
gflags.DEFINE_float("embedding_keep_rate", 0.5,
"Used for dropout on transformed embeddings.")
gflags.DEFINE_boolean("lstm_composition", True, "")
gflags.DEFINE_enum("classifier_type", "MLP", ["MLP", "Highway", "ResNet"], "")
gflags.DEFINE_integer("resnet_unit_depth", 2, "")
# gflags.DEFINE_integer("num_composition_layers", 1, "")
gflags.DEFINE_integer("num_sentence_pair_combination_layers", 2, "")
gflags.DEFINE_integer("sentence_pair_combination_layer_dim", 1024, "")
gflags.DEFINE_float("scheduled_sampling_exponent_base", 0.99,
"Used for scheduled sampling, with probability of Model 1 over Model 2 being base^#training_steps")
gflags.DEFINE_boolean("use_difference_feature", True,
"Supply the sentence pair classifier with sentence difference features.")
gflags.DEFINE_boolean("use_product_feature", True,
"Supply the sentence pair classifier with sentence product features.")
gflags.DEFINE_boolean("connect_tracking_comp", True,
"Connect tracking unit and composition unit. Can only be true if using LSTM in both units.")
gflags.DEFINE_boolean("initialize_hyp_tracking_state", False,
"Initialize the c state of the tracking unit of hypothesis model with the final"
"tracking unit c state of the premise model.")
gflags.DEFINE_boolean("use_gru", False,
"Use GRU units instead of LSTM units.")
# Optimization settings.
gflags.DEFINE_integer("training_steps", 500000, "Stop training after this point.")
gflags.DEFINE_integer("batch_size", 32, "SGD minibatch size.")
gflags.DEFINE_float("learning_rate", 0.001, "Used in RMSProp.")
gflags.DEFINE_float("learning_rate_decay_per_10k_steps", 0.75, "Used in RMSProp.")
gflags.DEFINE_float("clipping_max_value", 5.0, "")
gflags.DEFINE_float("l2_lambda", 1e-5, "")
gflags.DEFINE_float("init_range", 0.005, "Mainly used for softmax parameters. Range for uniform random init.")
gflags.DEFINE_float("transition_cost_scale", 1.0, "Multiplied by the transition cost.")
# Display settings.
gflags.DEFINE_integer("statistics_interval_steps", 100, "Print training set results at this interval.")
gflags.DEFINE_integer("eval_interval_steps", 100, "Evaluate at this interval.")
gflags.DEFINE_integer("ckpt_interval_steps", 5000, "Update the checkpoint on disk at this interval.")
gflags.DEFINE_boolean("ckpt_on_best_dev_error", True, "If error on the first eval set (the dev set) is "
"at most 0.99 of error at the previous checkpoint, save a special 'best' checkpoint.")
# Evaluation settings
gflags.DEFINE_boolean("expanded_eval_only_mode", False,
"If set, a checkpoint is loaded and a forward pass is done to get the predicted "
"transitions. The inferred parses are written to the supplied file(s) along with example-"
"by-example accuracy information. Requirements: Must specify checkpoint path.")
gflags.DEFINE_string("eval_output_paths", None,
"Used when expanded_eval_only_mode is set. The number of supplied paths should be same"
"as the number of eval sets.")
gflags.DEFINE_boolean("write_predicted_label", False,
"Write the predicted labels in a <eval_output_name>.lbl file.")
gflags.DEFINE_boolean("skip_saved_unsavables", False,
"Assume that variables marked as not savable will appear in checkpoints anyway, and "
"skip them when loading. This should be used only when loading old checkpoints.")
# Parse command line flags.
FLAGS(sys.argv)
run(only_forward=FLAGS.expanded_eval_only_mode)
|
backend/book/models/__init__.py
|
Mackrage/worm_rage_bot
| 216 |
57439
|
from .book import Book
|
tools/bin/ext/figleaf/__init__.py
|
YangHao666666/hawq
| 450 |
57451
|
<gh_stars>100-1000
"""
figleaf is another tool to trace Python code coverage.
figleaf uses the sys.settrace hook to record which statements are
executed by the CPython interpreter; this record can then be saved
into a file, or otherwise communicated back to a reporting script.
figleaf differs from the gold standard of Python coverage tools
('coverage.py') in several ways. First and foremost, figleaf uses the
same criterion for "interesting" lines of code as the sys.settrace
function, which obviates some of the complexity in coverage.py (but
does mean that your "loc" count goes down). Second, figleaf does not
record code executed in the Python standard library, which results in
a significant speedup. And third, the format in which the coverage
format is saved is very simple and easy to work with.
You might want to use figleaf if you're recording coverage from
multiple types of tests and need to aggregate the coverage in
interesting ways, and/or control when coverage is recorded.
coverage.py is a better choice for command-line execution, and its
reporting is a fair bit nicer.
Command line usage: ::
figleaf <python file to execute> <args to python file>
The figleaf output is saved into the file '.figleaf', which is an
*aggregate* of coverage reports from all figleaf runs from this
directory. '.figleaf' contains a pickled dictionary of sets; the keys
are source code filenames, and the sets contain all line numbers
executed by the Python interpreter. See the docs or command-line
programs in bin/ for more information.
High level API: ::
* ``start(ignore_lib=True)`` -- start recording code coverage.
* ``stop()`` -- stop recording code coverage.
* ``get_trace_obj()`` -- return the (singleton) trace object.
* ``get_info()`` -- get the coverage dictionary
Classes & functions worth knowing about (lower level API):
* ``get_lines(fp)`` -- return the set of interesting lines in the fp.
* ``combine_coverage(d1, d2)`` -- combine coverage info from two dicts.
* ``read_coverage(filename)`` -- load the coverage dictionary
* ``write_coverage(filename)`` -- write the coverage out.
* ``annotate_coverage(...)`` -- annotate a Python file with its coverage info.
Known problems:
-- module docstrings are *covered* but not found.
AUTHOR: <NAME>, <EMAIL>, with contributions from Iain Lowe.
'figleaf' is Copyright (C) 2006, 2007 <NAME>. It is under the
BSD license.
"""
__version__ = "0.6.1"
# __all__ == @CTB
import sys
import os
from cPickle import dump, load
from optparse import OptionParser
import internals
# use builtin sets if in >= 2.4, otherwise use 'sets' module.
try:
set()
except NameError:
from sets import Set as set
def get_lines(fp):
"""
Return the set of interesting lines in the source code read from
this file handle.
"""
# rstrip is a workaround for http://bugs.python.org/issue4262
src = fp.read().rstrip() + "\n"
code = compile(src, "", "exec")
return internals.get_interesting_lines(code)
def combine_coverage(d1, d2):
"""
Given two coverage dictionaries, combine the recorded coverage
and return a new dictionary.
"""
keys = set(d1.keys())
keys.update(set(d2.keys()))
new_d = {}
for k in keys:
v = d1.get(k, set())
v2 = d2.get(k, set())
s = set(v)
s.update(v2)
new_d[k] = s
return new_d
def write_coverage(filename, append=True):
"""
Write the current coverage info out to the given filename. If
'append' is false, destroy any previously recorded coverage info.
"""
if _t is None:
return
data = internals.CoverageData(_t)
d = data.gather_files()
# sum existing coverage?
if append:
old = {}
fp = None
try:
fp = open(filename)
except IOError:
pass
if fp:
old = load(fp)
fp.close()
d = combine_coverage(d, old)
# ok, save.
outfp = open(filename, 'w')
try:
dump(d, outfp)
finally:
outfp.close()
def read_coverage(filename):
"""
Read a coverage dictionary in from the given file.
"""
fp = open(filename)
try:
d = load(fp)
finally:
fp.close()
return d
def dump_pickled_coverage(out_fp):
"""
Dump coverage information in pickled format into the given file handle.
"""
dump(_t, out_fp)
def load_pickled_coverage(in_fp):
"""
Replace (overwrite) coverage information from the given file handle.
"""
global _t
_t = load(in_fp)
def annotate_coverage(in_fp, out_fp, covered, all_lines,
mark_possible_lines=False):
"""
A simple example coverage annotator that outputs text.
"""
for i, line in enumerate(in_fp):
i = i + 1
if i in covered:
symbol = '>'
elif i in all_lines:
symbol = '!'
else:
symbol = ' '
symbol2 = ''
if mark_possible_lines:
symbol2 = ' '
if i in all_lines:
symbol2 = '-'
out_fp.write('%s%s %s' % (symbol, symbol2, line,))
def get_data():
if _t:
return internals.CoverageData(_t)
#######################
#
# singleton functions/top-level API
#
_t = None
def init(exclude_path=None, include_only=None):
from internals import CodeTracer
global _t
if _t is None:
_t = CodeTracer(exclude_path, include_only)
def start(ignore_python_lib=True):
"""
Start tracing code coverage. If 'ignore_python_lib' is True on
initial call, ignore all files that live below the same directory as
the 'os' module.
"""
global _t
if not _t:
exclude_path = None
if ignore_python_lib:
exclude_path = os.path.realpath(os.path.dirname(os.__file__))
init(exclude_path, None)
_t.start()
def start_section(name):
global _t
_t.start_section(name)
def stop_section():
global _t
_t.stop_section()
def stop():
"""
Stop tracing code coverage.
"""
global _t
if _t is not None:
_t.stop()
def get_trace_obj():
"""
Return the (singleton) trace object, if it exists.
"""
return _t
def get_info(section_name=None):
"""
Get the coverage dictionary from the trace object.
"""
if _t:
return get_data().gather_files(section_name)
#############
def display_ast():
l = internals.LineGrabber(open(sys.argv[1]))
l.pretty_print()
print l.lines
def main():
"""
Execute the given Python file with coverage, making it look like it is
__main__.
"""
ignore_pylibs = False
# gather args
n = 1
figleaf_args = []
for n in range(1, len(sys.argv)):
arg = sys.argv[n]
if arg.startswith('-'):
figleaf_args.append(arg)
else:
break
remaining_args = sys.argv[n:]
usage = "usage: %prog [options] [python_script arg1 arg2 ...]"
option_parser = OptionParser(usage=usage)
option_parser.add_option('-i', '--ignore-pylibs', action="store_true",
dest="ignore_pylibs", default=False,
help="ignore Python library modules")
(options, args) = option_parser.parse_args(args=figleaf_args)
assert len(args) == 0
if not remaining_args:
option_parser.error("you must specify a python script to run!")
ignore_pylibs = options.ignore_pylibs
## Reset system args so that the subsequently exec'd file can read
## from sys.argv
sys.argv = remaining_args
sys.path[0] = os.path.dirname(sys.argv[0])
cwd = os.getcwd()
start(ignore_pylibs) # START code coverage
import __main__
try:
execfile(sys.argv[0], __main__.__dict__)
finally:
stop() # STOP code coverage
write_coverage(os.path.join(cwd, '.figleaf'))
|
5]. Projects/Machine Learning & Data Science (ML-DS)/Python/Deep Learning Projects/Computer Vision/010]. Real Time Object Detection/Real_Time_Object_Detection.py
|
MLinesCode/The-Complete-FAANG-Preparation
| 6,969 |
57502
|
import cv2
import numpy as np
thres = 0.45
nms_threshold = 0.2
#Default Camera Capture
cap = cv2.VideoCapture(0)
cap.set(3, 1280)
cap.set(4, 720)
cap.set(10, 150)
##Importing the COCO dataset in a list
classNames= []
classFile = 'coco.names'
with open(classFile,'rt') as f:
classNames = f.read().rstrip('\n').split('\n')
##Configuring both SSD model and weights (assigning)
configPath = 'ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'
weightsPath = 'frozen_inference_graph.pb'
##dnn-Inbuilt method of OpenCV
net = cv2.dnn_DetectionModel(weightsPath,configPath)
net.setInputSize(320, 320)
net.setInputScale(1.0 / 127.5)
net.setInputMean((127.5, 127.5, 127.5))
net.setInputSwapRB(True)
## using Detect method
while True:
success, img = cap.read()
classIds, confs, bbox = net.detect(img, confThreshold=thres)
bbox = list(bbox)
confs = list(np.array(confs).reshape(1, -1)[0])
confs = list(map(float, confs))
indices = cv2.dnn.NMSBoxes(bbox, confs, thres, nms_threshold)
for i in indices:
i = i[0]
box = bbox[i]
x, y, w, h = box[0], box[1], box[2], box[3]
cv2.rectangle(img, (x, y),(x+w, h+y), color=(0, 255, 0), thickness=2)
cv2.putText(img,classNames[classIds[i][0]-1].upper(), (box[0]+10, box[1]+30),
cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
cv2.imshow("Output", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
ahmia/ahmia/forms.py
|
donno2048/ahmia-site
| 185 |
57545
|
<reponame>donno2048/ahmia-site
"""Forms used in Ahmia."""
import logging
from django import forms
from django.conf import settings
from django.core.mail import send_mail
from django.utils.translation import ugettext as _
from .validators import validate_onion_url, validate_status
logger = logging.getLogger("ahmia")
class AddOnionForm(forms.Form):
"""Request to add an onion domain."""
onion = forms.CharField(
validators=[validate_onion_url, validate_status],
widget=forms.TextInput(
attrs={'placeholder': _('Enter your .onion address here')}
)
)
def send_new_onion(self):
"""Send a new onion request by email."""
if settings.DEBUG:
return
subject = "Hidden service add onion request"
message = "User requests to add the following onion url {0}".format(
self.cleaned_data['onion']
)
try:
send_mail(subject, message,
settings.DEFAULT_FROM_EMAIL, settings.RECIPIENT_LIST,
fail_silently=False)
except IOError as e:
logger.exception(e)
class ReportOnionForm(forms.Form):
"""Report an onion domain."""
onion = forms.CharField(
validators=[validate_onion_url, validate_status],
widget=forms.TextInput(
attrs={'placeholder': _('Enter your .onion address here')}
)
)
def send_abuse_report(self):
"""Send an abuse report by email."""
if settings.DEBUG:
return
subject = "Hidden service abuse notice"
message = "User sent abuse notice for onion url {0}".format(
self.cleaned_data['onion']
)
send_mail(subject, message,
settings.DEFAULT_FROM_EMAIL, settings.RECIPIENT_LIST,
fail_silently=False)
|
Squest/api/authentication.py
|
LaudateCorpus1/squest
| 112 |
57587
|
<gh_stars>100-1000
from rest_framework import authentication, exceptions
from profiles.models import Token
class TokenAuthentication(authentication.TokenAuthentication):
"""
A custom authentication scheme which enforces Token expiration times.
"""
model = Token
keyword = 'Bearer'
def authenticate_credentials(self, key):
model = self.get_model()
try:
token = model.objects.prefetch_related('user').get(key=key)
except model.DoesNotExist:
raise exceptions.AuthenticationFailed("Invalid token")
# Enforce the Token's expiration time, if one has been set.
if token.is_expired():
raise exceptions.AuthenticationFailed("Token expired")
if not token.user.is_active:
raise exceptions.AuthenticationFailed("User inactive")
return token.user, token
|
lib/python/import_util.py
|
leozz37/makani
| 1,178 |
57629
|
<reponame>leozz37/makani
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility to import classes."""
import importlib
def ImportClass(path_to_module):
"""Import the module give the path to its file and the class.
Args:
path_to_module: A string specifying the location of the module.
E.g. makani.analysis.my_checks.MyModule.
Returns:
The module object.
"""
class_path = path_to_module.split('.')
class_name = class_path[-1]
module_path = '.'.join(class_path[:-1])
module = importlib.import_module(module_path)
try:
cls = getattr(module, class_name)
return cls
except AttributeError, e:
raise AttributeError(('Cannot import "%s" from "%s" because of '
'AttributeError: %s.') %
(class_name, path_to_module, e.message))
|
Validation/HcalRecHits/python/NoiseRatesClient_cfi.py
|
ckamtsikis/cmssw
| 852 |
57631
|
<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
noiseratesClient = DQMEDHarvester("NoiseRatesClient",
# outputFile = cms.untracked.string('NoiseRatesHarvestingME.root'),
outputFile = cms.untracked.string(''),
DQMDirName = cms.string("/") # root directory
)
|
code/zoltar_scripts/create_validated_files_db.py
|
eycramer/covid19-forecast-hub
| 428 |
57639
|
import hashlib
import os
import pickle
from zoltpy.quantile_io import json_io_dict_from_quantile_csv_file
from zoltpy import util
from zoltpy.connection import ZoltarConnection
from zoltpy.covid19 import COVID_TARGETS, covid19_row_validator, validate_quantile_csv_file
import glob
import json
import sys
UPDATE = False
if len(sys.argv) >1:
if sys.argv[1].lower() == 'update':
print('Only updating')
UPDATE = True
# util function to get filename from the path
def get_filename_from_path(path):
print(path, path.split(os.path.sep)[-1])
return path.split(os.path.sep)[-1]
g_db = None
def get_db():
global g_db
if g_db is None:
g_db = json.load(open('code/zoltar_scripts/validated_file_db.json'))
return g_db
def dump_db():
global g_db
with open('code/zoltar_scripts/validated_file_db.json', 'w') as fw:
json.dump(g_db, fw, indent=4)
list_of_model_directories = os.listdir('./data-processed/')
for directory in list_of_model_directories:
if "." in directory:
continue
# Get all forecasts in the directory of this model
path = './data-processed/'+directory+'/'
forecasts = glob.glob(path + "*.csv")
for forecast in forecasts:
with open(forecast, "rb") as f:
# Get the current hash of a processed file
checksum = hashlib.md5(f.read()).hexdigest()
db = get_db()
# Validate covid19 file
if UPDATE and db.get(get_filename_from_path(forecast), None) == checksum:
continue
errors_from_validation = validate_quantile_csv_file(forecast)
# Upload forecast
if "no errors" == errors_from_validation:
# Check this hash against the previous version of hash
if db.get(get_filename_from_path(forecast), None) != checksum:
db[get_filename_from_path(forecast)] = checksum
else:
print(errors_from_validation)
print('Dumping db')
dump_db()
|
tests/test_imports.py
|
ethan-jiang-1/ahrs
| 184 |
57645
|
# -*- coding: utf-8 -*-
"""
Test module imports
===================
"""
import sys
def test_module_imports():
try:
import ahrs
except:
sys.exit("[ERROR] Package AHRS not found. Go to root directory of package and type:\n\n\tpip install .\n")
try:
import numpy, scipy, matplotlib
except ModuleNotFoundError:
sys.exit("[ERROR] You don't have the required packages. Try reinstalling the package.")
|
opacus/tests/grad_samples/group_norm_test.py
|
iamgroot42/opacus
| 195 |
57677
|
<reponame>iamgroot42/opacus
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
import hypothesis.strategies as st
import torch
import torch.nn as nn
from hypothesis import given, settings
from .common import GradSampleHooks_test
class GroupNorm_test(GradSampleHooks_test):
"""
We only test the case with ``affine=True`` here, because it is the only case that will actually
compute a gradient. There is no grad_sample from this module otherwise.
"""
@given(
N=st.integers(1, 4),
C=st.integers(1, 8),
H=st.integers(5, 10),
W=st.integers(4, 8),
num_groups=st.sampled_from([1, 4, "C"]),
)
@settings(deadline=10000)
def test_3d_input_groups(
self,
N: int,
C: int,
H: int,
W: int,
num_groups: Union[int, str],
):
if num_groups == "C":
num_groups = C
if C % num_groups != 0:
return
x = torch.randn([N, C, H, W])
norm = nn.GroupNorm(num_groups=num_groups, num_channels=C, affine=True)
self.run_test(x, norm, batch_first=True)
|
aliyun-python-sdk-market/aliyunsdkmarket/request/v20151101/DescribeCommoditiesRequest.py
|
yndu13/aliyun-openapi-python-sdk
| 1,001 |
57683
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmarket.endpoint import endpoint_data
class DescribeCommoditiesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Market', '2015-11-01', 'DescribeCommodities','yunmarket')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_CommodityGmtModifiedTo(self):
return self.get_query_params().get('CommodityGmtModifiedTo')
def set_CommodityGmtModifiedTo(self,CommodityGmtModifiedTo):
self.add_query_param('CommodityGmtModifiedTo',CommodityGmtModifiedTo)
def get_CommodityGmtModifiedFrom(self):
return self.get_query_params().get('CommodityGmtModifiedFrom')
def set_CommodityGmtModifiedFrom(self,CommodityGmtModifiedFrom):
self.add_query_param('CommodityGmtModifiedFrom',CommodityGmtModifiedFrom)
def get_CommodityId(self):
return self.get_query_params().get('CommodityId')
def set_CommodityId(self,CommodityId):
self.add_query_param('CommodityId',CommodityId)
def get_CommodityGmtPublishFrom(self):
return self.get_query_params().get('CommodityGmtPublishFrom')
def set_CommodityGmtPublishFrom(self,CommodityGmtPublishFrom):
self.add_query_param('CommodityGmtPublishFrom',CommodityGmtPublishFrom)
def get_CommodityStatuses(self):
return self.get_query_params().get('CommodityStatuses')
def set_CommodityStatuses(self,CommodityStatuses):
self.add_query_param('CommodityStatuses',CommodityStatuses)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_CommodityGmtCreatedFrom(self):
return self.get_query_params().get('CommodityGmtCreatedFrom')
def set_CommodityGmtCreatedFrom(self,CommodityGmtCreatedFrom):
self.add_query_param('CommodityGmtCreatedFrom',CommodityGmtCreatedFrom)
def get_CommodityIds(self):
return self.get_query_params().get('CommodityIds')
def set_CommodityIds(self,CommodityIds):
self.add_query_param('CommodityIds',CommodityIds)
def get_CommodityGmtCreatedTo(self):
return self.get_query_params().get('CommodityGmtCreatedTo')
def set_CommodityGmtCreatedTo(self,CommodityGmtCreatedTo):
self.add_query_param('CommodityGmtCreatedTo',CommodityGmtCreatedTo)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_CommodityGmtPublishTo(self):
return self.get_query_params().get('CommodityGmtPublishTo')
def set_CommodityGmtPublishTo(self,CommodityGmtPublishTo):
self.add_query_param('CommodityGmtPublishTo',CommodityGmtPublishTo)
def get_CommodityAuditStatuses(self):
return self.get_query_params().get('CommodityAuditStatuses')
def set_CommodityAuditStatuses(self,CommodityAuditStatuses):
self.add_query_param('CommodityAuditStatuses',CommodityAuditStatuses)
def get_Properties(self):
return self.get_query_params().get('Properties')
def set_Properties(self,Properties):
self.add_query_param('Properties',Properties)
def get_CommodityCategoryIds(self):
return self.get_query_params().get('CommodityCategoryIds')
def set_CommodityCategoryIds(self,CommodityCategoryIds):
self.add_query_param('CommodityCategoryIds',CommodityCategoryIds)
|
Algo and DSA/LeetCode-Solutions-master/Python/shift-2d-grid.py
|
Sourav692/FAANG-Interview-Preparation
| 3,269 |
57697
|
<filename>Algo and DSA/LeetCode-Solutions-master/Python/shift-2d-grid.py
# Time: O(m * n)
# Space: O(1)
class Solution(object):
def shiftGrid(self, grid, k):
"""
:type grid: List[List[int]]
:type k: int
:rtype: List[List[int]]
"""
def rotate(grids, k):
def reverse(grid, start, end):
while start < end:
start_r, start_c = divmod(start, len(grid[0]))
end_r, end_c = divmod(end-1, len(grid[0]))
grid[start_r][start_c], grid[end_r][end_c] = grid[end_r][end_c], grid[start_r][start_c]
start += 1
end -= 1
k %= len(grid)*len(grid[0])
reverse(grid, 0, len(grid)*len(grid[0]))
reverse(grid, 0, k)
reverse(grid, k, len(grid)*len(grid[0]))
rotate(grid, k)
return grid
|
nvchecker_source/hackage.py
|
trathborne/nvchecker
| 320 |
57728
|
<filename>nvchecker_source/hackage.py
# MIT licensed
# Copyright (c) 2013-2020 lilydjwg <<EMAIL>>, et al.
HACKAGE_URL = 'https://hackage.haskell.org/package/%s/preferred.json'
async def get_version(name, conf, *, cache, **kwargs):
key = conf.get('hackage', name)
data = await cache.get_json(HACKAGE_URL % key)
return data['normal-version'][0]
|
cvlib/utils.py
|
SunNy820828449/cvlib
| 547 |
57738
|
<filename>cvlib/utils.py
import requests
import progressbar as pb
import os
import cv2
import imageio
from imutils import paths
import numpy as np
def download_file(url, file_name, dest_dir):
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
full_path_to_file = dest_dir + os.path.sep + file_name
if os.path.exists(dest_dir + os.path.sep + file_name):
return full_path_to_file
print("Downloading " + file_name + " from " + url)
try:
r = requests.get(url, allow_redirects=True, stream=True)
except:
print("Could not establish connection. Download failed")
return None
file_size = int(r.headers['Content-Length'])
chunk_size = 1024
num_bars = round(file_size / chunk_size)
bar = pb.ProgressBar(maxval=num_bars).start()
if r.status_code != requests.codes.ok:
print("Error occurred while downloading file")
return None
count = 0
with open(full_path_to_file, 'wb') as file:
for chunk in r.iter_content(chunk_size=chunk_size):
file.write(chunk)
bar.update(count)
count +=1
return full_path_to_file
def get_frames(video_file, save_dir=None, save_prefix='', ext='jpg'):
video = cv2.VideoCapture(video_file)
if not video.isOpened():
print("[ERROR] Could not open video file ", video_file)
video.release()
return
frames = []
frame_count = 0
while video.isOpened():
status, frame = video.read()
if not status:
break
frames.append(frame)
if save_dir:
frame_count += 1
out_file = save_dir + os.path.sep + save_prefix + \
'frame_' + str(frame_count) + '.' + ext
print('[INFO] Writing file to .. ', out_file)
cv2.imwrite(out_file, frame)
video.release()
return frames
def animate(src, gif_name, reshape=None, fps=25):
if not isinstance(src, list):
if os.path.isdir(src):
src = list(paths.list_images(src))
for idx, image in enumerate(src):
src[idx] = cv2.imread(image)
if reshape:
for idx, image in enumerate(src):
src[idx] = cv2.resize(image, reshape)
for idx, image in enumerate(src):
src[idx] = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
src = np.array(src)
imageio.mimsave(gif_name, src, fps=fps)
|
test/test_template_params.py
|
amizzo87/bernstein-stack
| 358 |
57754
|
<gh_stars>100-1000
import re
from cfn_tools import dump_yaml
from templates import ALL, MASTER, CLUSTER, SCHEDULER, WEBSERVER, WORKERSET
def test_if_important_properties_are_specified():
for template in ALL:
for specs in template["Parameters"].values():
assert "Description" in specs
assert "Type" in specs
if "AllowedPattern" in specs:
assert "ConstraintDescription" in specs
if "MinValue" in specs or "MaxValue" in specs:
assert "ConstraintDescription" in specs
def test_if_properties_are_in_order():
def is_ordered(left, right, array):
left_index = array.index(left) if left in array else None
right_index = array.index(right) if right in array else None
if left_index is None or right_index is None:
return True
return left_index < right_index
for template in ALL:
for spec in template["Parameters"].values():
props = list(spec.keys())
assert is_ordered("Description", "ConstraintDescription", props)
assert is_ordered("ConstraintDescription", "AllowedPattern", props)
assert is_ordered("AllowedPattern", "Default", props)
assert is_ordered("Default", "Type", props)
assert is_ordered("Description", "AllowedValues", props)
assert is_ordered("AllowedValues", "Default", props)
assert is_ordered("ConstraintDescription", "MinValue", props)
assert is_ordered("MinValue", "MaxValue", props)
assert is_ordered("MaxValue", "Default", props)
def test_if_default_value_satisfies_pattern():
for template in ALL:
for specs in template["Parameters"].values():
if "AllowedPattern" in specs and "Default" in specs:
assert re.match(specs["AllowedPattern"], specs["Default"])
def test_if_description_ends_in_dot():
for template in ALL:
for specs in template["Parameters"].values():
assert specs["Description"].endswith(".")
def test_if_constraint_description_ends_in_dot():
for template in ALL:
for specs in template["Parameters"].values():
if "ConstraintDescription" in specs:
assert specs["ConstraintDescription"].endswith(".")
def test_consistency():
pairs = [
(MASTER, CLUSTER),
(CLUSTER, SCHEDULER),
(CLUSTER, WEBSERVER),
(CLUSTER, WORKERSET),
]
for (t_outer, t_inner) in pairs:
for param1, specs1 in t_outer["Parameters"].items():
for param2, specs2 in t_inner["Parameters"].items():
if param1 == param2:
assert (param1, dump_yaml(specs1)) == (param2, dump_yaml(specs2))
|
package_control/deps/oscrypto/version.py
|
evandrocoan/package_control
| 3,373 |
57761
|
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
__version__ = '1.2.1'
__version_info__ = (1, 2, 1)
|
eosfactory/core/checklist.py
|
tuan-tl/eosfactory
| 255 |
57783
|
'''
'''
import sys
import json
import argparse
import eosfactory.core.utils as utils
import eosfactory.core.config as config
IS_ERROR = 2
IS_WARNING = 1
class Checklist():
def __init__(self, is_html=False, error_codes=""):
self.is_html = is_html
self.html_text = ""
self.is_error = False
self.is_warning = False
self.IS_WINDOWS = utils.is_windows_ubuntu()
self.os_version = utils.os_version()
self.print_msg("EOSFactory version {}".format(config.VERSION))
################################################################################
# psutil
################################################################################
try:
if "psutil" in error_codes:
import psutil1
else:
import psutil
except ImportError:
command = "pip3 install --user psutil"
button = '''
<button
style="text-align:left;"
class="btn ${{BASH_COMMAND}}";
class="btn";
id="Install psutil";
title="Install psutil. Click the button then ENTER in a newly created bash terminal window, to go."
>
{}
</button>
'''.format(command)
self.error_msg('''
Module 'psutil' is not installed. Install it: {}
'''.format(button))
self.print_error(
'''Module 'psutil' is not installed.
Install it: ''')
self.print_code("`{}`\n".format(command))
################################################################################
# termcolor
################################################################################
try:
if "termcolor" in error_codes:
import termcolor1
else:
import termcolor
except ImportError:
command = "pip3 install --user termcolor"
button = '''
<button
style="text-align:left;"
class="btn ${{BASH_COMMAND}}";
class="btn";
id="Install termcolor";
title="Install termcolor. Click the button then ENTER in a newly created bash terminal window, to go."
>
{}
</button>
'''.format(command)
self.error_msg('''
Module 'termcolor' is not installed. Install it: {}
'''.format(button))
self.print_error(
'''Module 'termcolor' is not installed.
Install it: ''')
self.print_code("`{}`\n".format(command))
if self.IS_WINDOWS:
################################################################################
# Ubuntu version
################################################################################
lsb_release, error = utils.spawn(
["lsb_release", "-r", "-s"], raise_exception=False)
if error:
self.error_msg(error)
else:
if "ubuntuversion" in error_codes:
lsb_release = "16.4.1"
ubuntu_version = int(lsb_release.split(".")[0])
if ubuntu_version < config.UBUNTU_VERSION_MIN:
msg = \
'''
WSL Ubuntu version is {}.
EOSIO nodeos can fail with Windows WSL Ubuntu below version 16.
'''.format(lsb_release)
self.status_msg(self.warning(msg))
self.print_warning(msg)
################################################################################
# WSL root
################################################################################
root = config.wsl_root()
if not root or "wslroot" in error_codes:
self.error_msg(
'''Cannot determine the root of the WSL. Set it:
<button
class="btn ${FIND_WSL}";
id="";
title="Click the button to open file dialog. Navigate to a directory containing the Ubuntu file system."
>
Indicate WSL root
</button>
''')
self.print_error(
'''Cannot determine the root of the WSL. To indicate it, use the command:''')
self.print_code("`python3 -m eosfactory.config --wsl_root`\n")
################################################################################
# eosio
################################################################################
eosio_version = config.eosio_version()
if "eosio" in error_codes:
eosio_version = ["", "1.8.0"]
# eosio_version = ["1.3.3", "1.8.0"]
if eosio_version[0]:
self.status_msg(
"Found eosio version {}".format(eosio_version[0]))
self.print_status(
"Found eosio version {}".format(eosio_version[0]))
if not eosio_version[0] or len(eosio_version) > 1\
and not self.equal(eosio_version[0], eosio_version[1]):
command = ""
if self.os_version == utils.UBUNTU:
ubuntu_version = utils.spawn(
["lsb_release", "-r", "-s"],
raise_exception=False)[0].split(".")[0]
if ubuntu_version and ubuntu_version == 16:
command = \
'''sudo apt remove eosio &&\\
wget https://github.com/eosio/eos/releases/download/v1.8.0/eosio_1.8.0-1-ubuntu-16.04_amd64.deb &&\\
sudo apt install ./eosio_1.8.0-1-ubuntu-16.04_amd64.deb
'''
else:
command = \
'''sudo apt remove eosio &&\\
wget https://github.com/eosio/eos/releases/download/v1.8.0/eosio_1.8.0-1-ubuntu-18.04_amd64.deb &&\\
apt install ./eosio_1.8.0-1-ubuntu-18.04_amd64.deb
'''
elif self.os_version == utils.DARWIN:
command = \
'''brew remove eosio &&\\
brew tap eosio/eosio &&\\
brew install eosio
'''
button = '''
<button
style="text-align:left;"
class="btn ${{BASH_COMMAND}}";
class="btn";
id="Install eosio v{0}";
title="Install eosio v{0}. Click the button then ENTER in a newly created bash terminal window, to go."
>
{1}
</button>
'''.format(eosio_version[1], command)
instructions = '<a href="https://github.com/EOSIO/eos">EOSIO installation instructions</a>'
if eosio_version[0] and len(eosio_version) > 1 :
self.warning_msg(
'''
NOTE: EOSFactory was tested with version {0} while installed is {1}. Install eosio v{0}:<br>
{2}
'''.format(
eosio_version[1], eosio_version[0],
button if command else instructions))
self.print_warning(
'''NOTE: EOSFactory was tested with version {0} while installed is {1}. Install eosio v{0}:
'''.format(
eosio_version[1], eosio_version[0])
)
self.print_code(
'''```
{}
```
'''.format(command if command else instructions))
else:
if not "ignoreeoside" in error_codes:
self.warning_msg('''
Cannot determine that eosio is installed as nodeos does not response.<br>
It hangs up sometimes.<br>
EOSFactory expects eosio version {}. Install eosio, if not installed:<br>
{}<br>
'''.format(eosio_version[1], button if command else instructions))
self.print_warning(
'''Cannot determine that eosio is installed as nodeos does not response.
It hangs up sometimes.
EOSFactory expects eosio version {}. Install eosio, if not installed:
'''.format(eosio_version[1]))
self.print_code(
'''```
{}
```
'''.format(command if command else instructions))
################################################################################
# eosio_cdt
################################################################################
eosio_cdt_version = config.eosio_cdt_version()
if "eosio_cdt" in error_codes:
eosio_cdt_version = ["", "1.6.0"]
# eosio_cdt_version = ["1.6.1", "1.6.0"]
if eosio_cdt_version[0]:
self.status_msg(
"Found eosio.cdt version {}.".format(eosio_cdt_version[0]))
self.print_status(
"Found eosio.cdt version {}.".format(eosio_cdt_version[0]))
if not eosio_cdt_version[0] or len(eosio_cdt_version) > 1\
and not self.equal(eosio_cdt_version[0], eosio_cdt_version[1]):
command = ""
if self.os_version == utils.UBUNTU:
command = \
'''sudo apt remove eosio.cdt &&\\
wget https://github.com/eosio/eosio.cdt/releases/download/v1.6.1/eosio.cdt_1.6.1-1_amd64.deb &&\\
sudo apt install ./eosio.cdt_1.6.1-1_amd64.deb
'''
elif self.os_version == utils.DARWIN:
command = \
'''brew remove eosio.cdt &&\\
brew tap eosio/eosio.cdt && \\
brew install eosio.cdt
'''
button = '''
<button
style="text-align:left;"
class="btn ${{BASH_COMMAND}}";
class="btn";
id="Install eosio.cdt v{0}";
title="Install eosio.cdt v{0}. Click the button then ENTER in a newly created bash terminal window, to go."
>
{1}
</button>
'''.format(eosio_cdt_version[1], command)
instructions = '<a href="https://github.com/EOSIO/eosio.cdt">EOSIO.cdt installation instructions</a>'
if eosio_cdt_version[0] and len(eosio_cdt_version) > 1 \
and not eosio_cdt_version[0] == eosio_cdt_version[1]:
self.warning_msg(
'''
NOTE: EOSFactory was tested with version {0} while installed is {1}. Install eosio.cdt v{0}:<br>
{2}
'''.format(
eosio_cdt_version[1], eosio_cdt_version[0],
button if command else instructions))
self.print_warning(
'''NOTE: EOSFactory was tested with version {0} while installed is {1}. Install eosio v{0}:
'''.format(
eosio_cdt_version[1], eosio_cdt_version[0]))
self.print_code(
'''```
{}
```
'''.format(command if command else instructions))
else:
self.error_msg('''
Cannot determine that eosio.cdt is installed as eosio-cpp does not response.<br>
EOSFactory expects eosio.cdt version {}. Install it, if not installed.
{}<br>
'''.format(eosio_cdt_version[1], button if command else instructions))
self.print_error(
'''Cannot determine that eosio.cdt is installed as eosio-cpp does not response.
EOSFactory expects eosio.cdt version {}. Install it, if not installed.
'''.format(eosio_cdt_version[1]))
self.print_code(
'''```
{}
```
'''.format(command if command else instructions))
################################################################################
# Default workspace
################################################################################
try:
contract_workspace_dir = config.contract_workspace_dir()
except:
contract_workspace_dir = None
button = '''
<button
class="btn ${CHANGE_WORKSPACE}";
id="${CHANGE_WORKSPACE}";
title="Set workspace"
>
Set workspace.
</button>
'''
if not contract_workspace_dir or "workspace" in error_codes:
self.error_msg('''
Default workspace is not set, or it does not exist.{}
'''.format(button))
else:
self.status_msg(
'''Default workspace is {}.{}
'''.format(contract_workspace_dir, button))
################################################################################
#
################################################################################
def just_msg(self, msg):
if self.is_html:
msg = msg.replace("&&\\", "&&\\<br>")
print("{}\n".format(msg))
def print_msg(self, msg):
if not self.is_html:
print(msg)
def status_msg(self, msg):
if self.is_html:
msg = msg.replace("&&\\", "&&\\<br>")
print("<li>{}</li>\n".format(msg))
def print_status(self, msg):
if not self.is_html:
msg = msg.replace("<br>", "")
print(msg)
def warning(self, msg):
self.is_warning = True
if self.is_html:
msg = msg.replace("&&\\", "&&\\<br>")
return '<em style="color: ${{WARNING_COLOR}}"> {} </em>'.format(msg)
def warning_msg(self, msg):
self.is_warning = True
if self.is_html:
msg = msg.replace("&&\\", "&&\\<br>")
print('<em style="color: ${{WARNING_COLOR}}"> {} </em>'.format(msg))
def print_warning(self, msg):
if not self.is_html:
msg = msg.replace("<br>", "")
msg = "WARNING:\n" + msg
try:
import termcolor
msg = termcolor.colored(msg, "yellow")
except:
pass
print(msg)
def error_msg(self, msg):
if self.is_html:
self.is_error = True
msg = msg.replace("&&\\", "&&\\<br>")
print(
'<p style="color: ${{ERROR_COLOR}}">ERROR: {}</p>'.format(msg))
def print_error(self, msg):
if not self.is_html:
self.is_error = True
msg = msg.replace("<br>", "")
msg = "ERROR:\n" + msg
try:
import termcolor
msg = termcolor.colored(msg, "magenta")
except:
pass
print(msg)
def print_code(self, msg):
if not self.is_html:
msg = msg.replace("<br>", "")
try:
import termcolor
msg = termcolor.colored(msg, "blue")
except:
pass
print(msg)
def equal(self, version1, version2):
return version1.split(".")[0] == version2.split(".")[0] \
and version1.split(".")[1] == version2.split(".")[1]
def main():
parser = argparse.ArgumentParser(description='''
Check whether installation conditions are fulfilled.
''')
parser.add_argument(
"--html", help="Print html output.", action="store_true")
parser.add_argument("--error", help="Error code", default="")
parser.add_argument(
"--wsl_root", help="Show set the root of the WSL and exit.",
action="store_true")
parser.add_argument(
"--dont_set_workspace", help="Ignore empty workspace directory.",
action="store_true")
parser.add_argument(
"--json", help="Bare config JSON and exit.",
action="store_true")
parser.add_argument(
"--workspace", help="Set contract workspace and exit.",
action="store_true")
parser.add_argument(
"--dependencies", help="Set dependencies and exit.",
action="store_true")
args = parser.parse_args()
if args.json:
print(json.dumps(
config.current_config(dont_set_workspace=args.dont_set_workspace),
sort_keys=True, indent=4))
elif args.wsl_root:
config.wsl_root()
elif args.workspace:
config.set_contract_workspace_dir()
elif args.html:
checklist = Checklist(args.html, args.error)
if checklist.is_error:
sys.exit(IS_ERROR)
elif checklist.is_warning:
sys.exit(IS_WARNING)
elif args.dependencies:
checklist = Checklist(False, args.error)
else:
print("Checking dependencies of EOSFactory...")
checklist = Checklist(False, args.error)
if not checklist.is_error and not checklist.is_warning:
print("... all the dependencies are in place.\n\n")
else:
print(
'''Some functionalities of EOSFactory may fail if the indicated errors are not
corrected.
''')
config.config()
if __name__ == '__main__':
main()
|
opendeep/models/container/__init__.py
|
vitruvianscience/OpenDeep
| 252 |
57832
|
from __future__ import division, absolute_import, print_function
from .prototype import *
from .repeating import *
|
notebook/numpy_matrix_inv.py
|
vhn0912/python-snippets
| 174 |
57857
|
import numpy as np
arr = np.array([[2, 5], [1, 3]])
arr_inv = np.linalg.inv(arr)
print(arr_inv)
# [[ 3. -5.]
# [-1. 2.]]
mat = np.matrix([[2, 5], [1, 3]])
mat_inv = np.linalg.inv(mat)
print(mat_inv)
# [[ 3. -5.]
# [-1. 2.]]
mat_inv = mat**-1
print(mat_inv)
# [[ 3. -5.]
# [-1. 2.]]
mat_inv = mat.I
print(mat_inv)
# [[ 3. -5.]
# [-1. 2.]]
result = mat * mat.I
print(result)
# [[1. 0.]
# [0. 1.]]
# print(arr.I)
# AttributeError: 'numpy.ndarray' object has no attribute 'I'
arr_s = np.array([[0, 0], [1, 3]])
# print(np.linalg.inv(arr_s))
# LinAlgError: Singular matrix
arr_pinv = np.linalg.pinv(arr_s)
print(arr_pinv)
# [[0. 0.1]
# [0. 0.3]]
print(arr_s @ arr_inv)
# [[0. 0.]
# [0. 1.]]
print(np.linalg.pinv(arr_pinv))
# [[0. 0.]
# [1. 3.]]
print(np.linalg.inv(arr))
# [[ 3. -5.]
# [-1. 2.]]
print(np.linalg.pinv(arr))
# [[ 3. -5.]
# [-1. 2.]]
mat_s = np.mat([[0, 0], [1, 3]])
# print(np.linalg.inv(mat_s))
# LinAlgError: Singular matrix
# print(mat_s**-1)
# LinAlgError: Singular matrix
# print(mat_s.I)
# LinAlgError: Singular matrix
print(np.linalg.pinv(mat_s))
# [[0. 0.1]
# [0. 0.3]]
|
test/jpypetest/test_customizer.py
|
pitmanst/jpype
| 531 |
57858
|
# *****************************************************************************
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See NOTICE file for details.
#
# *****************************************************************************
import _jpype
import jpype
from jpype.types import *
from jpype import java
import common
try:
import numpy as np
except ImportError:
pass
class CustomizerTestCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
self.fixture = JClass('jpype.common.Fixture')()
def testSticky(self):
@jpype.JImplementationFor("jpype.override.A")
class _A:
@jpype.JOverride(sticky=True, rename="remove_")
def remove(self, obj):
pass
A = jpype.JClass("jpype.override.A")
B = jpype.JClass("jpype.override.B")
self.assertEqual(A.remove, _A.remove)
self.assertEqual(B.remove, _A.remove)
self.assertEqual(str(A.remove_), "jpype.override.A.remove")
self.assertEqual(str(B.remove_), "jpype.override.B.remove")
|
python_submitty_utils/submitty_utils/db_utils.py
|
zeez2030/Submitty
| 411 |
57894
|
<filename>python_submitty_utils/submitty_utils/db_utils.py
"""Utilities for interacting with databases"""
def generate_connect_string(
host: str,
port: int,
db: str,
user: str,
password: str,
) -> str:
conn_string = f"postgresql://{user}:{password}@"
if not host.startswith('/'):
conn_string += f"{host}:{port}"
conn_string += f"/{db}"
if host.startswith('/'):
conn_string += f"?host={host}"
return conn_string
|
vnpy/app/algo_trading/base.py
|
funrunskypalace/vnpy
| 19,529 |
57908
|
EVENT_ALGO_LOG = "eAlgoLog"
EVENT_ALGO_SETTING = "eAlgoSetting"
EVENT_ALGO_VARIABLES = "eAlgoVariables"
EVENT_ALGO_PARAMETERS = "eAlgoParameters"
APP_NAME = "AlgoTrading"
|
Visual Tracking/LapSRN-tensorflow-master/main.py
|
shikivi/-
| 120 |
57968
|
#! /usr/bin/python
# -*- coding: utf8 -*-
import os, time, random
import numpy as np
import scipy
import tensorflow as tf
import tensorlayer as tl
from model import *
from utils import *
from config import *
###====================== HYPER-PARAMETERS ===========================###
batch_size = config.train.batch_size
patch_size = config.train.in_patch_size
ni = int(np.sqrt(config.train.batch_size))
def compute_charbonnier_loss(tensor1, tensor2, is_mean=True):
epsilon = 1e-6
if is_mean:
loss = tf.reduce_mean(tf.reduce_mean(tf.sqrt(tf.square(tf.subtract(tensor1,tensor2))+epsilon), [1, 2, 3]))
else:
loss = tf.reduce_mean(tf.reduce_sum(tf.sqrt(tf.square(tf.subtract(tensor1,tensor2))+epsilon), [1, 2, 3]))
return loss
def load_file_list():
train_hr_file_list = []
train_lr_file_list = []
valid_hr_file_list = []
valid_lr_file_list = []
directory = config.train.hr_folder_path
for filename in [y for y in os.listdir(directory) if os.path.isfile(os.path.join(directory,y))]:
train_hr_file_list.append("%s%s"%(directory,filename))
directory = config.train.lr_folder_path
for filename in [y for y in os.listdir(directory) if os.path.isfile(os.path.join(directory,y))]:
train_lr_file_list.append("%s%s"%(directory,filename))
directory = config.valid.hr_folder_path
for filename in [y for y in os.listdir(directory) if os.path.isfile(os.path.join(directory,y))]:
valid_hr_file_list.append("%s%s"%(directory,filename))
directory = config.valid.lr_folder_path
for filename in [y for y in os.listdir(directory) if os.path.isfile(os.path.join(directory,y))]:
valid_lr_file_list.append("%s%s"%(directory,filename))
return sorted(train_hr_file_list),sorted(train_lr_file_list),sorted(valid_hr_file_list),sorted(valid_lr_file_list)
def prepare_nn_data(hr_img_list, lr_img_list, idx_img=None):
i = np.random.randint(len(hr_img_list)) if (idx_img is None) else idx_img
input_image = get_imgs_fn(lr_img_list[i])
output_image = get_imgs_fn(hr_img_list[i])
scale = int(output_image.shape[0] / input_image.shape[0])
assert scale == config.model.scale
out_patch_size = patch_size * scale
input_batch = np.empty([batch_size,patch_size,patch_size,3])
output_batch = np.empty([batch_size,out_patch_size,out_patch_size,3])
for idx in range(batch_size):
in_row_ind = random.randint(0,input_image.shape[0]-patch_size)
in_col_ind = random.randint(0,input_image.shape[1]-patch_size)
input_cropped = augment_imgs_fn(input_image[in_row_ind:in_row_ind+patch_size,
in_col_ind:in_col_ind+patch_size])
input_cropped = normalize_imgs_fn(input_cropped)
input_cropped = np.expand_dims(input_cropped,axis=0)
input_batch[idx] = input_cropped
out_row_ind = in_row_ind * scale
out_col_ind = in_col_ind * scale
output_cropped = output_image[out_row_ind:out_row_ind+out_patch_size,
out_col_ind:out_col_ind+out_patch_size]
output_cropped = normalize_imgs_fn(output_cropped)
output_cropped = np.expand_dims(output_cropped,axis=0)
output_batch[idx] = output_cropped
return input_batch,output_batch
def train():
save_dir = "%s/%s_train"%(config.model.result_path,tl.global_flag['mode'])
checkpoint_dir = "%s"%(config.model.checkpoint_path)
tl.files.exists_or_mkdir(save_dir)
tl.files.exists_or_mkdir(checkpoint_dir)
###========================== DEFINE MODEL ============================###
t_image = tf.placeholder('float32', [batch_size, patch_size, patch_size, 3], name='t_image_input')
t_target_image = tf.placeholder('float32', [batch_size, patch_size*config.model.scale, patch_size*config.model.scale, 3], name='t_target_image')
t_target_image_down = tf.image.resize_images(t_target_image, size=[patch_size*2, patch_size*2], method=0, align_corners=False)
net_image2, net_grad2, net_image1, net_grad1 = LapSRN(t_image, is_train=True, reuse=False)
net_image2.print_params(False)
## test inference
net_image_test, net_grad_test, _, _ = LapSRN(t_image, is_train=False, reuse=True)
###========================== DEFINE TRAIN OPS ==========================###
loss2 = compute_charbonnier_loss(net_image2.outputs, t_target_image, is_mean=True)
loss1 = compute_charbonnier_loss(net_image1.outputs, t_target_image_down, is_mean=True)
g_loss = loss1 + loss2 * 4
g_vars = tl.layers.get_variables_with_name('LapSRN', True, True)
with tf.variable_scope('learning_rate'):
lr_v = tf.Variable(config.train.lr_init, trainable=False)
g_optim = tf.train.AdamOptimizer(lr_v, beta1=config.train.beta1).minimize(g_loss, var_list=g_vars)
###========================== RESTORE MODEL =============================###
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False))
tl.layers.initialize_global_variables(sess)
tl.files.load_and_assign_npz(sess=sess, name=checkpoint_dir+'/params_{}.npz'.format(tl.global_flag['mode']), network=net_image2)
###========================== PRE-LOAD DATA ===========================###
train_hr_list,train_lr_list,valid_hr_list,valid_lr_list = load_file_list()
###========================== INTERMEDIATE RESULT ===============================###
sample_ind = 37
sample_input_imgs,sample_output_imgs = prepare_nn_data(valid_hr_list,valid_lr_list,sample_ind)
tl.vis.save_images(truncate_imgs_fn(sample_input_imgs), [ni, ni], save_dir+'/train_sample_input.png')
tl.vis.save_images(truncate_imgs_fn(sample_output_imgs), [ni, ni], save_dir+'/train_sample_output.png')
###========================== TRAINING ====================###
sess.run(tf.assign(lr_v, config.train.lr_init))
print(" ** learning rate: %f" % config.train.lr_init)
for epoch in range(config.train.n_epoch):
## update learning rate
if epoch != 0 and (epoch % config.train.decay_iter == 0):
lr_decay = config.train.lr_decay ** (epoch // config.train.decay_iter)
lr = config.train.lr_init * lr_decay
sess.run(tf.assign(lr_v, lr))
print(" ** learning rate: %f" % (lr))
epoch_time = time.time()
total_g_loss, n_iter = 0, 0
## load image data
idx_list = np.random.permutation(len(train_hr_list))
for idx_file in range(len(idx_list)):
step_time = time.time()
batch_input_imgs,batch_output_imgs = prepare_nn_data(train_hr_list,train_lr_list,idx_file)
errM, _ = sess.run([g_loss, g_optim], {t_image: batch_input_imgs, t_target_image: batch_output_imgs})
total_g_loss += errM
n_iter += 1
print("[*] Epoch: [%2d/%2d] time: %4.4fs, loss: %.8f" % (epoch, config.train.n_epoch, time.time() - epoch_time, total_g_loss/n_iter))
## save model and evaluation on sample set
if (epoch >= 0):
tl.files.save_npz(net_image2.all_params, name=checkpoint_dir+'/params_{}.npz'.format(tl.global_flag['mode']), sess=sess)
if config.train.dump_intermediate_result is True:
sample_out, sample_grad_out = sess.run([net_image_test.outputs,net_grad_test.outputs], {t_image: sample_input_imgs})#; print('gen sub-image:', out.shape, out.min(), out.max())
tl.vis.save_images(truncate_imgs_fn(sample_out), [ni, ni], save_dir+'/train_predict_%d.png' % epoch)
tl.vis.save_images(truncate_imgs_fn(np.abs(sample_grad_out)), [ni, ni], save_dir+'/train_grad_predict_%d.png' % epoch)
def test(file):
try:
img = get_imgs_fn(file)
except IOError:
print('cannot open %s'%(file))
else:
checkpoint_dir = config.model.checkpoint_path
save_dir = "%s/%s"%(config.model.result_path,tl.global_flag['mode'])
input_image = normalize_imgs_fn(img)
size = input_image.shape
print('Input size: %s,%s,%s'%(size[0],size[1],size[2]))
t_image = tf.placeholder('float32', [None,size[0],size[1],size[2]], name='input_image')
net_g, _, _, _ = LapSRN(t_image, is_train=False, reuse=False)
###========================== RESTORE G =============================###
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False))
tl.layers.initialize_global_variables(sess)
tl.files.load_and_assign_npz(sess=sess, name=checkpoint_dir+'/params_train.npz', network=net_g)
###======================= TEST =============================###
start_time = time.time()
out = sess.run(net_g.outputs, {t_image: [input_image]})
print("took: %4.4fs" % (time.time() - start_time))
tl.files.exists_or_mkdir(save_dir)
tl.vis.save_image(truncate_imgs_fn(out[0,:,:,:]), save_dir+'/test_out.png')
tl.vis.save_image(input_image, save_dir+'/test_input.png')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--mode', choices=['train','test'], default='train', help='select mode')
parser.add_argument('-f','--file', help='input file')
args = parser.parse_args()
tl.global_flag['mode'] = args.mode
if tl.global_flag['mode'] == 'train':
train()
elif tl.global_flag['mode'] == 'test':
if (args.file is None):
raise Exception("Please enter input file name for test mode")
test(args.file)
else:
raise Exception("Unknow --mode")
|
tests/test_apdu/__init__.py
|
amih90/bacpypes
| 240 |
57973
|
#!/usr/bin/python
"""
Test BACpypes APDU Module
"""
from . import test_max_apdu_length_accepted, test_max_segments_accepted
|
loggroup-lambda-connector/test/test_loggroup_lambda_connector.py
|
langmtt/sumologic-aws-lambda
| 152 |
57982
|
<reponame>langmtt/sumologic-aws-lambda
import subprocess
import time
import unittest
import boto3
from time import sleep
import json
import os
import sys
import datetime
import cfn_flip
# Modify the name of the bucket prefix for testing
BUCKET_PREFIX = "appdevstore"
AWS_REGION = os.environ.get("AWS_DEFAULT_REGION", "us-east-1")
class TestLambda(unittest.TestCase):
'''
fail case newlgrp
success case testlggrp
already exists subscription filter idempotent
'''
def setUp(self):
# Set Up AWS Clients
self.log_group_client = boto3.client('logs', AWS_REGION)
self.cf = boto3.client('cloudformation', AWS_REGION)
self.lambda_cl = boto3.client('lambda', AWS_REGION)
# AWS Resource Names
self.log_group_name = 'testloggroup-%s' % (datetime.datetime.now().strftime("%d-%m-%y-%H-%M-%S"))
self.stack_name = "TestLogGrpConnectorStack-%s" % (datetime.datetime.now().strftime("%d-%m-%y-%H-%M-%S"))
self.bucket_name = get_bucket_name()
self.outputs = {}
# Read template
self.template_data = read_file("test/test-template.yaml")
def tearDown(self):
if self.stack_exists(self.stack_name):
self.delete_stack(self.stack_name)
self.delete_log_group()
def test_1_lambda(self):
self.create_stack(self.stack_name, self.template_data, self.create_stack_parameters("Lambda", "false"))
print("Testing Stack Creation")
self.assertTrue(self.stack_exists(self.stack_name))
self.create_log_group()
self.assert_subscription_filter("SumoLGLBDFilter")
def test_2_existing_logs(self):
self.create_log_group()
self.create_stack(self.stack_name, self.template_data, self.create_stack_parameters("Lambda", "true"))
print("Testing Stack Creation")
self.assertTrue(self.stack_exists(self.stack_name))
#self.invoke_lambda()
self.assert_subscription_filter("SumoLGLBDFilter")
def test_3_kinesis(self):
self.create_stack(self.stack_name, self.template_data, self.create_stack_parameters("Kinesis", "false"))
print("Testing Stack Creation")
self.assertTrue(self.stack_exists(self.stack_name))
self.create_log_group()
self.assert_subscription_filter("SumoLGLBDFilter")
def test_4_existing_kinesis(self):
self.create_log_group()
self.create_stack(self.stack_name, self.template_data, self.create_stack_parameters("Kinesis", "true"))
print("Testing Stack Creation")
self.assertTrue(self.stack_exists(self.stack_name))
#self.invoke_lambda()
self.assert_subscription_filter("SumoLGLBDFilter")
def create_stack_parameters(self, destination, existing, pattern='test'):
return [
{
'ParameterKey': 'DestinationType',
'ParameterValue': destination
},
{
'ParameterKey': 'LogGroupPattern',
'ParameterValue': pattern
},
{
'ParameterKey': 'UseExistingLogs',
'ParameterValue': existing
},
{
'ParameterKey': 'BucketName',
'ParameterValue': self.bucket_name
}
]
def stack_exists(self, stack_name):
stacks = self.cf.list_stacks()['StackSummaries']
for stack in stacks:
if stack['StackStatus'] == 'DELETE_COMPLETE':
continue
if stack_name == stack['StackName'] and stack['StackStatus'] == 'CREATE_COMPLETE':
print("%s stack exists" % stack_name)
stack_data = self.cf.describe_stacks(StackName=self.stack_name)
outputs_stacks = stack_data["Stacks"][0]["Outputs"]
for output in outputs_stacks:
self.outputs[output["OutputKey"]] = output["OutputValue"]
return True
return False
def create_stack(self, stack_name, template_data, parameters):
params = {
'StackName': stack_name,
'TemplateBody': template_data,
'Capabilities': ['CAPABILITY_IAM', 'CAPABILITY_AUTO_EXPAND'],
'Parameters': parameters
}
stack_result = self.cf.create_stack(**params)
print('Creating {}'.format(stack_name), stack_result)
waiter = self.cf.get_waiter('stack_create_complete')
print("...waiting for stack to be ready...")
waiter.wait(StackName=stack_name)
def delete_stack(self, stack_name):
params = {
'StackName': stack_name
}
stack_result = self.cf.delete_stack(**params)
print('Deleting {}'.format(stack_name), stack_result)
waiter = self.cf.get_waiter('stack_delete_complete')
print("...waiting for stack to be removed...")
waiter.wait(StackName=stack_name)
def delete_log_group(self):
response = self.log_group_client.delete_log_group(logGroupName=self.log_group_name)
print("deleting log group", response)
def create_log_group(self):
response = self.log_group_client.create_log_group(logGroupName=self.log_group_name)
print("creating log group", response)
def assert_subscription_filter(self, filter_name):
sleep(60)
response = self.log_group_client.describe_subscription_filters(
logGroupName=self.log_group_name,
filterNamePrefix=filter_name
)
print("testing subscription filter exists", response)
# Add multiple assert for name, destination arn, role arn.
assert len(response['subscriptionFilters']) > 0
assert response['subscriptionFilters'][0]['filterName'] == filter_name
assert response['subscriptionFilters'][0]['logGroupName'] == self.log_group_name
assert response['subscriptionFilters'][0]['destinationArn'] == self.outputs["destinationArn"]
if "roleArn" in self.outputs:
assert response['subscriptionFilters'][0]['roleArn'] == self.outputs["roleArn"]
def _parse_template(self, template_name):
output_file = cfn_flip.to_json(read_file(template_name))
template_data = json.loads(output_file)
print("Validating cloudformation template")
self.cf.validate_template(TemplateBody=template_data)
return template_data
def invoke_lambda(self):
lambda_arn = self.outputs["LambdaARN"]
output = self.lambda_cl.invoke(
FunctionName=lambda_arn,
InvocationType='Event',
LogType='None',
Payload=bytes(json.dumps({"value": "test"}), "utf-8")
)
if output["StatusCode"] != 202:
raise Exception("Failed to invoke Lambda")
time.sleep(60)
def read_file(file_path):
file_path = os.path.join(os.path.dirname(os.getcwd()), file_path)
with open(file_path, "r") as f:
return f.read().strip()
def get_bucket_name():
return '%s-%s' % (BUCKET_PREFIX, AWS_REGION)
def upload_to_s3(file_path):
print("Uploading %s file in S3 region: %s" % (file_path, AWS_REGION))
s3 = boto3.client('s3', AWS_REGION)
bucket_name = get_bucket_name()
key = os.path.basename(file_path)
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), file_path)
s3.upload_file(os.path.join(__file__, filename), bucket_name, key, ExtraArgs={'ACL': 'public-read'})
def create_sam_package_and_upload():
template_file_path = os.path.join(os.path.dirname(os.getcwd()), "sam/template.yaml")
packaged_template_path = os.path.join(os.path.dirname(os.getcwd()), "sam/packaged.yaml")
# Create packaged template
run_command(["sam", "package", "--template-file", template_file_path,
"--output-template-file", packaged_template_path, "--s3-bucket", get_bucket_name(),
"--s3-prefix", "test-log-group-lambda-connector"])
# Upload the packaged template to S3
upload_to_s3(packaged_template_path)
def _run(command, input=None, check=False, **kwargs):
if sys.version_info >= (3, 5):
return subprocess.run(command, capture_output=True)
if input is not None:
if 'stdin' in kwargs:
raise ValueError('stdin and input arguments may not both be used.')
kwargs['stdin'] = subprocess.PIPE
process = subprocess.Popen(command, **kwargs)
try:
stdout, stderr = process.communicate(input)
except:
process.kill()
process.wait()
raise
retcode = process.poll()
if check and retcode:
raise subprocess.CalledProcessError(
retcode, process.args, output=stdout, stderr=stderr)
return retcode, stdout, stderr
def run_command(cmdargs):
resp = _run(cmdargs)
if len(resp.stderr.decode()) > 0:
# traceback.print_exc()
raise Exception("Error in run command %s cmd: %s" % (resp, cmdargs))
return resp.stdout
if __name__ == '__main__':
if len(sys.argv) > 1:
BUCKET_PREFIX = sys.argv.pop()
create_sam_package_and_upload()
# upload_code_in_multiple_regions()
# Run the test cases
unittest.main()
|
DiffAugment-biggan-imagenet/compare_gan/gans/modular_gan_tpu_test.py
|
Rian-T/data-efficient-gans
| 1,902 |
57997
|
# coding=utf-8
# Copyright 2018 Google LLC & <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests TPU specfic parts of ModularGAN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from absl.testing import parameterized
from compare_gan import datasets
from compare_gan import test_utils
from compare_gan.gans import consts as c
from compare_gan.gans.modular_gan import ModularGAN
import tensorflow as tf
FLAGS = flags.FLAGS
class ModularGanTpuTest(parameterized.TestCase, test_utils.CompareGanTestCase):
def setUp(self):
super(ModularGanTpuTest, self).setUp()
self.model_dir = self._get_empty_model_dir()
self.run_config = tf.contrib.tpu.RunConfig(
model_dir=self.model_dir,
tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=1))
@parameterized.parameters([1, 2, 5])
def testBatchSize(self, disc_iters, use_tpu=True):
parameters = {
"architecture": c.DUMMY_ARCH,
"lambda": 1,
"z_dim": 128,
"disc_iters": disc_iters,
}
batch_size = 16
dataset = datasets.get_dataset("cifar10")
gan = ModularGAN(
dataset=dataset,
parameters=parameters,
model_dir=self.model_dir)
estimator = gan.as_estimator(self.run_config, batch_size=batch_size,
use_tpu=True)
estimator.train(gan.input_fn, steps=1)
gen_args = gan.generator.call_arg_list
disc_args = gan.discriminator.call_arg_list
self.assertLen(gen_args, disc_iters + 1) # D steps, G step.
self.assertLen(disc_args, disc_iters + 1) # D steps, G step.
for args in gen_args:
self.assertAllEqual(args["z"].shape.as_list(), [8, 128])
for args in disc_args:
self.assertAllEqual(args["x"].shape.as_list(), [16, 32, 32, 3])
@parameterized.parameters([1, 2, 5])
def testBatchSizeSplitDiscCalls(self, disc_iters):
parameters = {
"architecture": c.DUMMY_ARCH,
"lambda": 1,
"z_dim": 128,
"disc_iters": disc_iters,
}
batch_size = 16
dataset = datasets.get_dataset("cifar10")
gan = ModularGAN(
dataset=dataset,
parameters=parameters,
deprecated_split_disc_calls=True,
model_dir=self.model_dir)
estimator = gan.as_estimator(self.run_config, batch_size=batch_size,
use_tpu=True)
estimator.train(gan.input_fn, steps=1)
gen_args = gan.generator.call_arg_list
disc_args = gan.discriminator.call_arg_list
self.assertLen(gen_args, disc_iters + 1) # D steps, G step.
# Each D and G step calls discriminator twice: for real and fake images.
self.assertLen(disc_args, 2 * (disc_iters + 1))
for args in gen_args:
self.assertAllEqual(args["z"].shape.as_list(), [8, 128])
for args in disc_args:
self.assertAllEqual(args["x"].shape.as_list(), [8, 32, 32, 3])
@parameterized.parameters([1, 2, 5])
def testBatchSizeExperimentalJointGenForDisc(self, disc_iters):
parameters = {
"architecture": c.DUMMY_ARCH,
"lambda": 1,
"z_dim": 128,
"disc_iters": disc_iters,
}
batch_size = 16
dataset = datasets.get_dataset("cifar10")
gan = ModularGAN(
dataset=dataset,
parameters=parameters,
experimental_joint_gen_for_disc=True,
model_dir=self.model_dir)
estimator = gan.as_estimator(self.run_config, batch_size=batch_size,
use_tpu=True)
estimator.train(gan.input_fn, steps=1)
gen_args = gan.generator.call_arg_list
disc_args = gan.discriminator.call_arg_list
self.assertLen(gen_args, 2)
self.assertLen(disc_args, disc_iters + 1)
self.assertAllEqual(gen_args[0]["z"].shape.as_list(), [8 * disc_iters, 128])
self.assertAllEqual(gen_args[1]["z"].shape.as_list(), [8, 128])
for args in disc_args:
self.assertAllEqual(args["x"].shape.as_list(), [16, 32, 32, 3])
if __name__ == "__main__":
tf.test.main()
|
macros/_chrome.py
|
swipswaps/code-by-voice
| 139 |
58012
|
<gh_stars>100-1000
from dragonfly import (Grammar, AppContext, MappingRule, Dictation, Key, Text, Integer, Mimic)
context = AppContext(executable="chrome")
grammar = Grammar("chrome", context=context)
noSpaceNoCaps = Mimic("\\no-caps-on") + Mimic("\\no-space-on")
rules = MappingRule(
name = "chrome",
mapping = {
"edit": Key("w-a"),
"reload" : Key("f5"),
"open": Key("escape, o"),
"jump": Key("f"),
"new tab": Key("t"),
"search tabs": Key("T"),
"find": Key("slash"),
"console": Key("cs-j"),
"close tab": Key("c-w"),
"escape": Key('escape'),
},
extras = [
Dictation("text"),
Integer("n", 0, 20000),
],
defaults = {
"n" : 1
}
)
grammar.add_rule(rules)
grammar.load()
def unload():
global grammar
if grammar: grammar.unload()
grammar = None
|
scripts/datasets.py
|
HPG-AI/bachbot
| 380 |
58019
|
<gh_stars>100-1000
import click
import json, cPickle
import requests, zipfile
import os, glob
from music21 import analysis, converter, corpus, meter
from music21.note import Note
from constants import *
@click.group()
def datasets():
"""Constructs various datasets."""
pass
@click.command()
@click.option('--keep-fermatas', type=bool, default=True)
@click.option('--subset', type=bool, default=False)
@click.option('--mono', type=bool, default=False, help='Extract only monophonic Soprano part')
@click.option('--parts_to_mask', '-m', multiple=True, type=str)
def prepare(keep_fermatas, subset, mono, parts_to_mask=[]):
"""
Prepares polyphonic scores using a chord tuple representation.
Each score is transformed into a sequence of tuples with a constant
timestep of (1/`FRAMES_PER_CROTCHET`) crotchets between consecutive chords.
Each encoded chord has the following format:
Notes : List[(
Midi: Int,
Tied : Bool (true if note is continuation of previous note)
)]
"""
txt_to_utf, utf_to_txt = build_vocabulary()
txt_to_utf[BLANK_MASK_TXT] = BLANK_MASK_UTF # don't add to `utf_to_txt` because samples should never contain BLANK_MASK
it = iter_standardized_chorales()
if subset:
it = [next(it) for _ in range(5)]
for score in it:
bwv_id = score.metadata.title
print('Processing BWV {0}'.format(bwv_id))
# remove all except 'Soprano' part if --mono
if mono:
for part in score.parts:
if part.id != 'Soprano':
score.remove(part)
#key = score.analyze('key') # TODO: filter to only majors for task?
encoded_score = encode_score(score, keep_fermatas=keep_fermatas, parts_to_mask=parts_to_mask)
encoded_score_txt = to_text(encoded_score)
fname = 'BWV-{0}'.format(bwv_id)
if mono:
fname += '-mono'
if parts_to_mask:
fname += '-mask-{0}'.format('-'.join(parts_to_mask))
else:
fname += '-nomask'
if keep_fermatas:
fname += '-fermatas'
else:
fname += '-nofermatas'
out_path = SCRATCH_DIR + '/{0}'.format(fname)
print 'Writing {0}'.format(out_path)
with open(out_path + '.txt', 'w') as fd:
fd.write('\n'.join(encoded_score_txt))
with open(out_path + '.utf', 'w') as fd:
fd.write(to_utf(txt_to_utf, encoded_score_txt))
@click.command()
@click.argument('files', nargs=-1, required=True)
@click.option('-o', '--output', type=click.File('wb'), default=SCRATCH_DIR + '/concat_corpus.txt')
def concatenate_corpus(files, output):
"""Concatenates individual files together into single corpus.
Try `bachbot concatenate_corpus scratch/*.utf`.
"""
print 'Writing concatenated corpus to {0}'.format(output.name)
for fp in files:
with open(fp, 'rb') as fd:
output.write(''.join(filter(lambda x: x != '\n', fd.read())))
@click.command()
@click.option('--utf-to-txt-json', type=click.File('rb'), default=SCRATCH_DIR + '/utf_to_txt.json')
@click.argument('in-file', type=click.File('rb'))
@click.argument('out-file', type=click.File('wb'))
def encode_text(utf_to_txt_json, in_file, out_file):
utf_to_txt = json.load(utf_to_txt_json)
txt_to_utf = { v:k for k,v in utf_to_txt.items() }
out_file.write(to_utf(txt_to_utf, in_file))
def standardize_key(score):
"""Converts into the key of C major or A minor.
Adapted from https://gist.github.com/aldous-rey/68c6c43450517aa47474
"""
# major conversions
majors = dict([("A-", 4),("A", 3),("B-", 2),("B", 1),("C", 0),("C#",-1),("D-", -1),("D", -2),("E-", -3),("E", -4),("F", -5),("F#",6),("G-", 6),("G", 5)])
minors = dict([("A-", 1),("A", 0),("B-", -1),("B", -2),("C", -3),("C#",-4),("D-", -4),("D", -5),("E-", 6),("E", 5),("F", 4),("F#",3),("G-", 3),("G", 2)])
# transpose score
key = score.analyze('key')
if key.mode == "major":
halfSteps = majors[key.tonic.name]
elif key.mode == "minor":
halfSteps = minors[key.tonic.name]
tScore = score.transpose(halfSteps)
# transpose key signature
for ks in tScore.flat.getKeySignatures():
ks.transpose(halfSteps, inPlace=True)
return tScore
def extract_SATB(score):
"""
Extracts the Soprano, Alto, Tenor, and Bass parts from a piece. The returned score is guaranteed
to have parts with names 'Soprano', 'Alto', 'Tenor', and 'Bass'.
This method mutates its arguments.
"""
ids = dict()
ids['Soprano'] = {
'Soprano',
'S.',
'Soprano 1', # NOTE: soprano1 or soprano2?
'Soprano\rOboe 1\rViolin1'}
ids['Alto'] = { 'Alto', 'A.'}
ids['Tenor'] = { 'Tenor', 'T.'}
ids['Bass'] = { 'Bass', 'B.'}
id_to_name = {id:name for name in ids for id in ids[name] }
for part in score.parts:
if part.id in id_to_name:
part.id = id_to_name[part.id]
else:
score.remove(part)
return score
def build_vocabulary():
if os.path.exists(SCRATCH_DIR + '/utf_to_txt.json'):
with open(SCRATCH_DIR + '/utf_to_txt.json', 'r') as f:
utf_to_txt = json.load(f)
txt_to_utf = {v:k for k,v in utf_to_txt.items()}
else:
vocabulary = set([str((midi, tie)) for tie in [True, False] for midi in range(128)]) # all MIDI notes and tie/notie
vocabulary.update(set([CHORD_BOUNDARY_DELIM, FERMATA_SYM]))
txt_to_utf = dict(map(lambda x: (x[1], unichr(x[0])), enumerate(vocabulary)))
txt_to_utf['START'] = START_DELIM
txt_to_utf['END'] = END_DELIM
utf_to_txt = {utf:txt for txt,utf in txt_to_utf.items()}
# save vocabulary
with open(SCRATCH_DIR + '/utf_to_txt.json', 'w') as fd:
print 'Writing vocabulary to ' + SCRATCH_DIR + '/utf_to_txt.json'
json.dump(utf_to_txt, fd)
return txt_to_utf, utf_to_txt
def iter_standardized_chorales():
"Iterator over 4/4 Bach chorales standardized to Cmaj/Amin with SATB parts extracted."
for score in corpus.chorales.Iterator(
numberingSystem='bwv',
returnType='stream'):
if score.getTimeSignatures()[0].ratioString == '4/4': # only consider 4/4
yield extract_SATB(standardize_key(score))
@click.command()
@click.argument('in_path', type=click.Path(exists=True))
@click.argument('outfile', type=click.File('w'))
def prepare_harm_input(in_path, outfile):
"Prepares and encodes a musicXML file containing a monophonic melody line as a Soprano voice to harmonize."
txt_to_utf, utf_to_txt = build_vocabulary()
txt_to_utf[BLANK_MASK_TXT] = BLANK_MASK_UTF # don't add to `utf_to_txt` because samples should never contain BLANK_MASK
sc = converter.parseFile(in_path)
encoded_score = []
for note in sc.flat.notesAndRests:
if note.isRest:
encoded_score.extend((int(note.quarterLength * FRAMES_PER_CROTCHET)) * [[]])
else:
has_fermata = any(map(lambda e: e.isClassOrSubclass(('Fermata',)), note.expressions))
has_tie = note.tie is not None and note.tie.type != 'start'
encoded_chord = [(note.pitch.midi, has_tie)] + ([BLANK_MASK_TXT for _ in range(3)])
encoded_score.append((has_fermata, encoded_chord))
encoded_score.extend((int(note.quarterLength * FRAMES_PER_CROTCHET) - 1) * [
(has_fermata,
map(lambda note: BLANK_MASK_TXT if note == BLANK_MASK_TXT else (note[0], True), encoded_chord))
])
outfile.write(to_utf(txt_to_utf, to_text(encoded_score)))
def encode_score(score, keep_fermatas=True, parts_to_mask=[]):
"""
Encodes a music21 score into a List of chords, where each chord is represented with
a (Fermata :: Bool, List[(Note :: Integer, Tie :: Bool)]).
If `keep_fermatas` is True, all `has_fermata`s will be False.
All tokens from parts in `parts_to_mask` will have output tokens `BLANK_MASK_TXT`.
Time is discretized such that each crotchet occupies `FRAMES_PER_CROTCHET` frames.
"""
encoded_score = []
for chord in (score
.quantize((FRAMES_PER_CROTCHET,))
.chordify(addPartIdAsGroup=bool(parts_to_mask))
.flat
.notesAndRests): # aggregate parts, remove markup
# expand chord/rest s.t. constant timestep between frames
if chord.isRest:
encoded_score.extend((int(chord.quarterLength * FRAMES_PER_CROTCHET)) * [[]])
else:
has_fermata = (keep_fermatas) and any(map(lambda e: e.isClassOrSubclass(('Fermata',)), chord.expressions))
encoded_chord = []
# TODO: sorts Soprano, Bass, Alto, Tenor without breaking ties
# c = chord.sortAscending()
# sorted_notes = [c[-1], c[0]] + c[1:-1]
# for note in sorted_notes:
for note in chord:
if parts_to_mask and note.pitch.groups[0] in parts_to_mask:
encoded_chord.append(BLANK_MASK_TXT)
else:
has_tie = note.tie is not None and note.tie.type != 'start'
encoded_chord.append((note.pitch.midi, has_tie))
encoded_score.append((has_fermata, encoded_chord))
# repeat pitches to expand chord into multiple frames
# all repeated frames when expanding a chord should be tied
encoded_score.extend((int(chord.quarterLength * FRAMES_PER_CROTCHET) - 1) * [
(has_fermata,
map(lambda note: BLANK_MASK_TXT if note == BLANK_MASK_TXT else (note[0], True), encoded_chord))
])
return encoded_score
def to_utf(txt_to_utf, score_txt):
"""
Converts a text-encoded score into UTF encoding (appending start/end delimiters).
Throws `KeyError` when out-of-vocabulary token is encountered
"""
return START_DELIM +\
''.join(map(lambda txt: txt_to_utf[txt.strip()], score_txt)) +\
END_DELIM
def to_text(encoded_score):
"Converts a Python encoded score into plain-text."
encoded_score_plaintext = []
for i,chord_pair in enumerate(encoded_score):
if i > 0:
encoded_score_plaintext.append(CHORD_BOUNDARY_DELIM) # chord boundary delimiter
if len(chord_pair) > 0:
is_fermata, chord = chord_pair
if is_fermata:
encoded_score_plaintext.append(FERMATA_SYM)
for note in chord:
encoded_score_plaintext.append(str(note))
return encoded_score_plaintext
map(datasets.add_command, [
prepare,
prepare_harm_input,
encode_text,
concatenate_corpus,
])
|
OracleWebCenterContent/dockerfiles/12.2.1.4/container-scripts/setTopology.py
|
rmohare/oracle-product-images
| 5,519 |
58050
|
#!/usr/bin/python
# Copyright (c) 2021, Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl
#
# ==============================================
import sys
admin_name = os.environ['ADMIN_USERNAME']
admin_pass = os.environ['ADMIN_PASSWORD']
admin_port = os.environ['ADMIN_PORT']
admin_container = os.environ['ADMIN_SERVER_CONTAINER_NAME']
admin_host = sys.argv[1]
url = admin_container + ':' + admin_port
print('url :' + url);
connect(admin_name, admin_pass, url)
edit()
startEdit()
cd('/Servers/IBR_server1')
cmo.setMachine(getMBean('/Machines/machine1'))
cmo.setCluster(getMBean('/Clusters/ibr_cluster1'))
cd('/Servers/UCM_server1')
cmo.setMachine(getMBean('/Machines/machine1'))
cmo.setCluster(getMBean('/Clusters/ucm_cluster1'))
cd('/CoherenceClusterSystemResources/defaultCoherenceCluster/CoherenceClusterResource/defaultCoherenceCluster/CoherenceClusterParams/defaultCoherenceCluster/CoherenceClusterWellKnownAddresses/defaultCoherenceCluster')
cmo.createCoherenceClusterWellKnownAddress('WKA-0')
cd('/CoherenceClusterSystemResources/defaultCoherenceCluster/CoherenceClusterResource/defaultCoherenceCluster/CoherenceClusterParams/defaultCoherenceCluster/CoherenceClusterWellKnownAddresses/defaultCoherenceCluster/CoherenceClusterWellKnownAddresses/WKA-0')
cmo.setListenAddress('localhost')
save()
activate()
disconnect()
exit()
|
examples/fred_series_to_bigquery_example.py
|
Ressmann/starthinker
| 138 |
58055
|
###########################################################################
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see scripts folder for possible source):
# - Command: "python starthinker_ui/manage.py example"
#
###########################################################################
import argparse
import textwrap
from starthinker.util.configuration import Configuration
from starthinker.task.fred.run import fred
def recipe_fred_series_to_bigquery(config, auth, fred_api_key, fred_series_id, fred_units, fred_frequency, fred_aggregation_method, project, dataset):
"""Download federal reserve series.
Args:
auth (authentication) - Credentials used for writing data.
fred_api_key (string) - 32 character alpha-numeric lowercase string.
fred_series_id (string) - Series ID to pull data from.
fred_units (choice) - A key that indicates a data value transformation.
fred_frequency (choice) - An optional parameter that indicates a lower frequency to aggregate values to.
fred_aggregation_method (choice) - A key that indicates the aggregation method used for frequency aggregation.
project (string) - Existing BigQuery project.
dataset (string) - Existing BigQuery dataset.
"""
fred(config, {
'auth':auth,
'api_key':fred_api_key,
'frequency':fred_frequency,
'series':[
{
'series_id':fred_series_id,
'units':fred_units,
'aggregation_method':fred_aggregation_method
}
],
'out':{
'bigquery':{
'project':project,
'dataset':dataset
}
}
})
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""
Download federal reserve series.
1. Specify the values for a <a href='https://fred.stlouisfed.org/docs/api/fred/series_observations.html' target='_blank'>Fred observations API call</a>.
2. A table will appear in the dataset.
"""))
parser.add_argument("-project", help="Cloud ID of Google Cloud Project.", default=None)
parser.add_argument("-key", help="API Key of Google Cloud Project.", default=None)
parser.add_argument("-client", help="Path to CLIENT credentials json file.", default=None)
parser.add_argument("-user", help="Path to USER credentials json file.", default=None)
parser.add_argument("-service", help="Path to SERVICE credentials json file.", default=None)
parser.add_argument("-verbose", help="Print all the steps as they happen.", action="store_true")
parser.add_argument("-auth", help="Credentials used for writing data.", default='service')
parser.add_argument("-fred_api_key", help="32 character alpha-numeric lowercase string.", default='')
parser.add_argument("-fred_series_id", help="Series ID to pull data from.", default='')
parser.add_argument("-fred_units", help="A key that indicates a data value transformation.", default='lin')
parser.add_argument("-fred_frequency", help="An optional parameter that indicates a lower frequency to aggregate values to.", default='')
parser.add_argument("-fred_aggregation_method", help="A key that indicates the aggregation method used for frequency aggregation.", default='avg')
parser.add_argument("-project", help="Existing BigQuery project.", default='')
parser.add_argument("-dataset", help="Existing BigQuery dataset.", default='')
args = parser.parse_args()
config = Configuration(
project=args.project,
user=args.user,
service=args.service,
client=args.client,
key=args.key,
verbose=args.verbose
)
recipe_fred_series_to_bigquery(config, args.auth, args.fred_api_key, args.fred_series_id, args.fred_units, args.fred_frequency, args.fred_aggregation_method, args.project, args.dataset)
|
src/lib/searchio/cmd/__init__.py
|
cgxxv/alfred-searchio
| 304 |
58087
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2016 <NAME> <<EMAIL>>
#
# MIT Licence. See http://opensource.org/licenses/MIT
#
# Created on 2016-12-17
#
"""CLI program sub-commands."""
|
evaluation.py
|
unluckydan/deep_metric_learning
| 169 |
58107
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 27 12:47:00 2017
@author: sakurai
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import AffinityPropagation
from sklearn.metrics import f1_score
from sklearn.metrics import normalized_mutual_info_score
from sklearn.preprocessing import LabelEncoder
def ap_cluster_k(x, K, preference_init=-1.0, max_iter=30,
c=None, iter_finetune=10):
'''
Clustering of x by affinity propagation which the number of cluster is K.
args:
x (ndarray):
Data matrix.
K (int):
Target number of clusters.
max_iter (int):
Number of trials for bisection search.
c (ndarray, optional):
Class labels of x. If this parameter is specified, the function
try to find the better solution by random search.
iter_finetune (int):
Number of steps for the random search.
'''
# first, search rough lower bound of the preference
assert preference_init < 0, "preference_init must be negative."
p = float(preference_init) # preference parameter
p_upper = 0
for i in range(5):
ap = AffinityPropagation(preference=p).fit(y)
k_current = len(ap.cluster_centers_indices_)
if k_current > K:
p_upper = p
k_upper = k_current
p *= 10
else:
p_lower = p
k_lower = k_current
break
else:
raise RuntimeError("Can't find initial lower bound for preference."
" Try another value of p_initial.")
# search the preference by bisection method
for i in range(max_iter):
p = (p_lower + p_upper) / 2
ap = AffinityPropagation(preference=p).fit(y)
k_current = len(ap.cluster_centers_indices_)
print('K = {}, k_current = {}, p = {}'.format(K, k_current, p))
print('{}:{}, {}:{}, {}:{}'.format(k_lower, p_lower, k_current, p,
k_upper, p_upper))
# if the current k goes out of bounds then retry with perturbed p
while k_current < k_lower or k_current > k_upper:
print("retry")
p += np.random.uniform(p_lower, p_upper) / 10
ap = AffinityPropagation(preference=p).fit(y)
k_current = len(ap.cluster_centers_indices_)
print('K = {}, k_current = {}, p = {}'.format(K, k_current, p))
print('{}:{}, {}:{}, {}:{}'.format(k_lower, p_lower, k_current, p,
k_upper, p_upper))
if k_current < K:
p_lower = p
k_lower = k_current
elif k_current > K:
p_upper = p
k_upper = k_current
else:
break
else:
raise RuntimeError("Can't find a preference to form K clusters."
" Try another value of p_initial.")
if c is None:
return ap
# Search further better preference in terms of NMI score by random search
p_best = p
score_best = normalized_mutual_info_score(c, ap.predict(y))
print('initial score:', score_best)
print()
for i in range(iter_finetune):
p = np.random.normal(p_best, (p_upper - p_lower) / 2)
if p < p_lower or p > p_upper: # where p is rejected
print('reject')
continue
ap = AffinityPropagation(preference=p).fit(y)
k_current = len(ap.cluster_centers_indices_)
if k_current < K and p > p_lower:
p_lower = p
elif k_current > K and p < p_upper:
p_upper = p
else: # wgere k_current is K
score = normalized_mutual_info_score(c, ap.predict(y))
if score > score_best:
print("update p {} -> {}".format(p_best, p))
p_best = p
score_best = score
print('p: {}, {}, {}'.format(p_lower, p, p_upper))
print('score: {}'.format(score_best))
print()
return AffinityPropagation(preference=p_best).fit(y)
if __name__ == '__main__':
y_train = np.load('y_train.npy')
c_train = np.load('c_train.npy').ravel()
y_test = np.load('y_test.npy')
c_test = np.load('c_test.npy').ravel()
c_train = LabelEncoder().fit_transform(c_train)
c_test = LabelEncoder().fit_transform(c_test)
K = 40
# K = len(np.unique(c_train))
y = y_train[c_train.ravel() < K]
c = c_train[c_train < K]
# y = y_test[c_test.ravel() < K]
# c = c_test[c_test < K]
ap = ap_cluster_k(y, K, preference_init=-1.0, c=c, iter_finetune=30)
c_pred = ap.predict(y)
print(normalized_mutual_info_score(c, c_pred))
plt.plot(np.vstack((c_pred, c)).T)
plt.show()
# print f1_score(c, c_pred)
|
__other__/parser/parser.py
|
whitmans-max/python-examples
| 140 |
58123
|
import sys
#print sys.argv[0], len( sys.argv )
if len(sys.argv) > 1:
with open(sys.argv[1], 'r') as f_in:
result = 0
for line in f_in:
data = line.strip().split()
# print('data:', data)
if data[0] == "+":
result += float(data[1])
elif data[0] == "-":
result -= float(data[1])
elif data[0] == "=":
print("RESULT:", result)
result = 0
else:
print('unknow:', data)
|
tests/core/transaction-utils/conftest.py
|
ggs134/py-evm
| 1,641 |
58175
|
import pytest
# from https://github.com/ethereum/tests/blob/c951a3c105d600ccd8f1c3fc87856b2bcca3df0a/BasicTests/txtest.json # noqa: E501
TRANSACTION_FIXTURES = [
{
"chainId": None,
"key": "c85ef7d79691fe79573b1a7064c19c1a9819ebdbd1faaab1a8ec92344438aaf4",
"nonce": 0,
"gasPrice": 1000000000000,
"gas": 10000,
"to": "13978aee95f38490e9769c39b2773ed763d9cd5f",
"value": 10000000000000000,
"data": "",
"signed": "f86b8085e8d4a510008227109413978aee95f38490e9769c39b2773ed763d9cd5f872386f26fc10000801ba0eab47c1a49bf2fe5d40e01d313900e19ca485867d462fe06e139e3a536c6d4f4a014a569d327dcda4b29f74f93c0e9729d2f49ad726e703f9cd90dbb0fbf6649f1" # noqa: E501
},
{
"chainId": None,
"key": "c87f65ff3f271bf5dc8643484f66b200109caffe4bf98c4cb393dc35740b28c0",
"nonce": 0,
"gasPrice": 1000000000000,
"gas": 10000,
"to": "",
"value": 0,
"data": "<KEY>", # noqa: E501
"signed": "<KEY>" # noqa: E501
},
{
"chainId": 1,
"key": "0x4c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318",
"nonce": 0,
"gasPrice": 234567897654321,
"gas": 2000000,
"to": "0xF0109fC8DF283027b6285cc889F5aA624EaC1F55",
"value": 1000000000,
"data": "",
"signed": "0xf86a8086d55698372431831e848094f0109fc8df283027b6285cc889f5aa624eac1f55843b9aca008025a009ebb6ca057a0535d6186462bc0b465b561c94a295bdb0621fc19208ab149a9ca0440ffd775ce91a833ab410777204d5341a6f9fa91216a6f3ee2c051fea6a0428", # noqa: E501
},
]
# Hand-built for 2930
TYPED_TRANSACTION_FIXTURES = [
{
"chainId": 1,
"nonce": 3,
"gasPrice": 1,
"gas": 25000,
"to": "b94f5374fce5edbc8e2a8697c15331677e6ebf0b",
"value": 10,
"data": "5544",
"access_list": [
[b'\xf0' * 20, [b'\0' * 32, b'\xff' * 32]],
],
"key": (b'\0' * 31) + b'\x01',
"sender": b'~_ER\t\x1ai\x12]]\xfc\xb7\xb8\xc2e\x90)9[\xdf',
"intrinsic_gas": 21000 + 32 + 2400 + 1900 * 2,
"for_signing": '01f87a0103018261a894b94f5374fce5edbc8e2a8697c15331677e6ebf0b0a825544f85994f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f842a00000000000000000000000000000000000000000000000000000000000000000a0ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff', # noqa: E501
"signed": '01f8bf0103018261a894b94f5374fce5edbc8e2a8697c15331677e6ebf0b0a825544f85bf85994f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f842a00000000000000000000000000000000000000000000000000000000000000000a0ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff80a017047e844eef895a876778a828731a33b67863aea7b9591a0001651ee47322faa043b4d0e8d59e8663c813ffa1bb99f020278a139f07c47f3858653071b3cec6b3', # noqa: E501
"hash": "13ab8b6371d8873405db20104705d7fecee2f9083f247250519e4b4c568b17fb",
}
]
@pytest.fixture(params=range(len(TRANSACTION_FIXTURES)))
def txn_fixture(request):
return TRANSACTION_FIXTURES[request.param]
@pytest.fixture(params=range(len(TYPED_TRANSACTION_FIXTURES)))
def typed_txn_fixture(request):
return TYPED_TRANSACTION_FIXTURES[request.param]
|
tools/convert_datasets/lasot/lasot2coco.py
|
BigBen0519/mmtracking
| 2,226 |
58188
|
<reponame>BigBen0519/mmtracking<filename>tools/convert_datasets/lasot/lasot2coco.py
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from collections import defaultdict
import mmcv
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser(
description='LaSOT test dataset to COCO Video format')
parser.add_argument(
'-i',
'--input',
help='root directory of LaSOT test dataset',
)
parser.add_argument(
'-o',
'--output',
help='directory to save coco formatted label file',
)
parser.add_argument(
'--split',
help='the split set of lasot, all denotes the whole dataset',
choices=['train', 'test', 'all'],
default='all')
return parser.parse_args()
def convert_lasot(ann_dir, save_dir, split='test'):
"""Convert lasot dataset to COCO style.
Args:
ann_dir (str): The path of lasot dataset
save_dir (str): The path to save `lasot`.
split (str): the split ('train' or 'test') of dataset.
"""
assert split in ['train', 'test'], f'split [{split}] does not exist'
lasot = defaultdict(list)
records = dict(vid_id=1, img_id=1, ann_id=1, global_instance_id=1)
lasot['categories'] = [dict(id=0, name=0)]
videos_list = mmcv.list_from_file(
osp.join(osp.dirname(__file__), 'testing_set.txt'))
if split == 'train':
train_videos_list = []
for video_class in os.listdir(ann_dir):
for video_id in os.listdir(osp.join(ann_dir, video_class)):
if video_id not in videos_list:
train_videos_list.append(video_id)
videos_list = train_videos_list
for video_name in tqdm(videos_list, desc=split):
video_class = video_name.split('-')[0]
video_path = osp.join(ann_dir, video_class, video_name)
video = dict(id=records['vid_id'], name=video_name)
lasot['videos'].append(video)
gt_bboxes = mmcv.list_from_file(
osp.join(video_path, 'groundtruth.txt'))
full_occlusion = mmcv.list_from_file(
osp.join(video_path, 'full_occlusion.txt'))
full_occlusion = full_occlusion[0].split(',')
out_of_view = mmcv.list_from_file(
osp.join(video_path, 'out_of_view.txt'))
out_of_view = out_of_view[0].split(',')
img = mmcv.imread(osp.join(video_path, 'img/00000001.jpg'))
height, width, _ = img.shape
for frame_id, gt_bbox in enumerate(gt_bboxes):
file_name = '%08d' % (frame_id + 1) + '.jpg'
file_name = osp.join(video_class, video_name, 'img', file_name)
image = dict(
file_name=file_name,
height=height,
width=width,
id=records['img_id'],
frame_id=frame_id,
video_id=records['vid_id'])
lasot['images'].append(image)
x1, y1, w, h = gt_bbox.split(',')
ann = dict(
id=records['ann_id'],
video_id=records['vid_id'],
image_id=records['img_id'],
instance_id=records['global_instance_id'],
category_id=0,
bbox=[int(x1), int(y1), int(w),
int(h)],
area=int(w) * int(h),
full_occlusion=full_occlusion[frame_id] == '1',
out_of_view=out_of_view[frame_id] == '1')
lasot['annotations'].append(ann)
records['ann_id'] += 1
records['img_id'] += 1
records['global_instance_id'] += 1
records['vid_id'] += 1
if not osp.isdir(save_dir):
os.makedirs(save_dir)
mmcv.dump(lasot, osp.join(save_dir, f'lasot_{split}.json'))
print(f'-----LaSOT {split} Dataset------')
print(f'{records["vid_id"]- 1} videos')
print(f'{records["global_instance_id"]- 1} instances')
print(f'{records["img_id"]- 1} images')
print(f'{records["ann_id"] - 1} objects')
print('-----------------------------')
def main():
args = parse_args()
if args.split == 'all':
for split in ['train', 'test']:
convert_lasot(args.input, args.output, split=split)
else:
convert_lasot(args.input, args.output, split=args.split)
if __name__ == '__main__':
main()
|
semantic_seg_synthia/synthia_dataset_chained_flow.py
|
HarmoniaLeo/meteornet
| 127 |
58218
|
<reponame>HarmoniaLeo/meteornet
'''
Provider for duck dataset from <NAME>
'''
import os
import os.path
import json
import numpy as np
import sys
import pickle
import copy
import psutil
from pyquaternion import Quaternion
import class_mapping
class SegDataset():
def __init__(self, root='processed_pc', \
chained_flow_root='chained_flow',
filelist_name='data_prep/train_raw.txt', \
labelweight_filename = 'data_prep/labelweights.npz', \
npoints = 16384, num_frames=1, train=True):
self.npoints = npoints
self.train = train
self.root = root
self.chained_flow_root = chained_flow_root
self.num_frames = num_frames
self.num_max_nonkey = num_frames - 1
self.labelweights = np.load(labelweight_filename)['labelweights']
filenames = []
raw_txt_file = open(filelist_name, 'r')
l = raw_txt_file.readline()
while len(l) > 0:
l = l.split(' ')[0]
l = l.split('/')
sequence_name = l[0]
frame_id = int(l[-1].split('.')[0])
filenames.append([sequence_name, frame_id])
l = raw_txt_file.readline()
filenames.sort()
self.filenames = filenames
##### debug
# self.filenames = [f for f in self.filenames if 'SYNTHIA-SEQS-01-DAWN' in f[0]]
self.cache = {}
self.cache_mem_usage = 0.95
def read_data(self, sequence_name, frame_id):
if sequence_name in self.cache:
if frame_id in self.cache[sequence_name]:
pc, rgb, semantic, chained_flowed, center = self.cache[sequence_name][frame_id]
return pc, rgb, semantic, chained_flowed, center
fn = os.path.join(self.root, sequence_name + '-' + str(frame_id).zfill(6) + '.npz')
if os.path.exists(fn):
data = np.load(fn)
pc = data['pc']
rgb = data['rgb']
semantic = data['semantic']
center = data['center']
chained_flow = []
##### read flow
basename_split = os.path.basename(fn).split('.npz')[0].split('-')
for f in range(-self.num_max_nonkey, self.num_max_nonkey+1):
if f != 0:
new_basename = '-'.join(basename_split + [str(int(basename_split[-1]) + f).zfill(6)]) + '.npz'
chained_flow_fn = os.path.join(self.chained_flow_root, new_basename)
if os.path.exists(chained_flow_fn):
chained_flow_data = np.load(chained_flow_fn)['chained_flow']
else:
chained_flow_data = None
else:
chained_flow_data = pc
chained_flow.append(chained_flow_data)
for i in range(self.num_max_nonkey+1, self.num_max_nonkey*2 + 1):
if chained_flow[i] is None:
chained_flow[i] = chained_flow[i-1]
else:
chained_flow[i] = chained_flow[i-1] + chained_flow[i]
for i in range(self.num_max_nonkey-1, -1, -1):
if chained_flow[i] is None:
chained_flow[i] = chained_flow[i+1]
else:
chained_flow[i] = chained_flow[i+1] + chained_flow[i]
chained_flowed = np.stack(chained_flow, axis=-2)
semantic = semantic.astype('uint8')
else:
pc, rgb, semantic, chained_flowed, center = None, None, None, None, None
mem = psutil.virtual_memory()
if (mem.used / mem.total) < self.cache_mem_usage:
if sequence_name not in self.cache:
self.cache[sequence_name] = {}
self.cache[sequence_name][frame_id] = (pc, rgb, semantic, chained_flowed, center)
return pc, rgb, semantic, chained_flowed, center
def read_training_data_point(self, index):
sequence_name, frame_id = self.filenames[index]
pcs = []
rgbs = []
semantics = []
chained_floweds_raw = []
center_0 = None
exist_frame_id = []
most_recent_success = -1
for diff in range(0, self.num_frames):
##### combination of (sequence_name, frame_id) is guaranteed to exist, therefore diff=0 will not return none
pc, rgb, semantic, chained_flowed, center = self.read_data(sequence_name, frame_id - diff)
if pc is None:
pc, rgb, semantic, chained_flowed, center = self.read_data(sequence_name, most_recent_success)
else:
most_recent_success = frame_id - diff
exist_frame_id.append(most_recent_success)
if diff == 0:
center_0 = center
pcs.append(pc)
rgbs.append(rgb)
semantics.append(semantic)
chained_floweds_raw.append(chained_flowed)
exist_frame_id.reverse()
##### resolve the cases for repeated frames, at the start of the sequence in the dataset
chained_floweds_list = []
for f_dest in range(self.num_frames):
chained_floweds = []
for f_src in range(self.num_frames):
f_diff = exist_frame_id[f_dest] - exist_frame_id[f_src]
chained_floweds.append(chained_floweds_raw[f_dest][:, f_diff + self.num_max_nonkey])
chained_floweds = np.stack(chained_floweds, axis=-2)
chained_floweds_list.append(chained_floweds)
pc = np.stack(pcs, axis=0)
rgb = np.stack(rgbs, axis=0)
semantic = np.stack(semantics, axis=0)
chained_flowed = np.stack(chained_floweds_list, axis=0)
return pc, rgb, semantic, chained_flowed, center_0
def half_crop_w_context(self, half, context, pc, rgb, semantic, chained_flowed, center):
num_frames = pc.shape[0]
all_idx = np.arange(pc.shape[1])
sample_indicies_half_w_context = []
if half == 0:
for f in range(num_frames):
sample_idx_half_w_context = all_idx[pc[f, :, 2] > (center[2] - context)]
sample_indicies_half_w_context.append(sample_idx_half_w_context)
else:
for f in range(num_frames):
sample_idx_half_w_context = all_idx[pc[f, :, 2] < (center[2] + context)]
sample_indicies_half_w_context.append(sample_idx_half_w_context)
pc_half_w_context = [pc[f, s] for f, s in enumerate(sample_indicies_half_w_context)]
rgb_half_w_context = [rgb[f, s] for f, s in enumerate(sample_indicies_half_w_context)]
semantic_half_w_context = [semantic[f, s] for f, s in enumerate(sample_indicies_half_w_context)]
chained_flowed_half_w_context = [chained_flowed[f, s] for f, s in enumerate(sample_indicies_half_w_context)]
if half == 0:
loss_masks = [p[:, 2] > center[2] for p in pc_half_w_context]
else:
loss_masks = [p[:, 2] < center[2] for p in pc_half_w_context]
valid_pred_idx_in_full = sample_indicies_half_w_context
return pc_half_w_context, rgb_half_w_context, semantic_half_w_context, chained_flowed_half_w_context, \
loss_masks, valid_pred_idx_in_full
def augment(self, pc, chained_flowed, center):
flip = np.random.uniform(0, 1) > 0.5
if flip:
pc = (pc - center)
pc[:, 0] *= -1
pc += center
chained_flowed = (chained_flowed - center)
chained_flowed[:, :, 0] *= -1
chained_flowed += center
scale = np.random.uniform(0.8, 1.2)
pc = (pc - center) * scale + center
chained_flowed = (chained_flowed - center) * scale + center
rot_axis = np.array([0, 1, 0])
rot_angle = np.random.uniform(np.pi * 2)
q = Quaternion(axis=rot_axis, angle=rot_angle)
R = q.rotation_matrix
pc = np.dot(pc - center, R) + center
chained_flowed = np.dot(chained_flowed - center, R) + center
return pc, chained_flowed
def mask_and_label_conversion(self, semantic, loss_mask):
labels = []
loss_masks = []
for i, s in enumerate(semantic):
sem = s.astype('int32')
label = class_mapping.index_to_label_vec_func(sem)
loss_mask_ = (label != 12) * loss_mask[i]
label[label == 12] = 0
labels.append(label)
loss_masks.append(loss_mask_)
return labels, loss_masks
def choice_to_num_points(self, pc, rgb, label, chained_flowed, loss_mask, valid_pred_idx_in_full):
# shuffle idx to change point order (change FPS behavior)
for f in range(self.num_frames):
idx = np.arange(pc[f].shape[0])
choice_num = self.npoints
if pc[f].shape[0] > choice_num:
shuffle_idx = np.random.choice(idx, choice_num, replace=False)
else:
shuffle_idx = np.concatenate([np.random.choice(idx, choice_num - idx.shape[0]), \
np.arange(idx.shape[0])])
pc[f] = pc[f][shuffle_idx]
rgb[f] = rgb[f][shuffle_idx]
chained_flowed[f] = chained_flowed[f][shuffle_idx]
label[f] = label[f][shuffle_idx]
loss_mask[f] = loss_mask[f][shuffle_idx]
valid_pred_idx_in_full[f] = valid_pred_idx_in_full[f][shuffle_idx]
pc = np.concatenate(pc, axis=0)
rgb = np.concatenate(rgb, axis=0)
label = np.concatenate(label, axis=0)
chained_flowed = np.concatenate(chained_flowed, axis=0)
loss_mask = np.concatenate(loss_mask, axis=0)
valid_pred_idx_in_full = np.concatenate(valid_pred_idx_in_full, axis=0)
return pc, rgb, label, chained_flowed, loss_mask, valid_pred_idx_in_full
def get(self, index, half=0, context=1.):
pc, rgb, semantic, chained_flowed, center = self.read_training_data_point(index)
pc, rgb, semantic, chained_flowed, loss_mask, valid_pred_idx_in_full = \
self.half_crop_w_context(half, context, pc, rgb, semantic, chained_flowed, center)
label, loss_mask = self.mask_and_label_conversion(semantic, loss_mask)
pc, rgb, label, chained_flowed, loss_mask, valid_pred_idx_in_full = \
self.choice_to_num_points(pc, rgb, label, chained_flowed, loss_mask, valid_pred_idx_in_full)
if self.train:
pc, chained_flowed = self.augment(pc, chained_flowed, center)
if self.train:
labelweights = 1/np.log(1.2 + self.labelweights)
# labelweights = 1 / self.labelweights
labelweights = labelweights / labelweights.min()
else:
labelweights = np.ones_like(self.labelweights)
return pc, rgb, label, chained_flowed, labelweights, loss_mask, valid_pred_idx_in_full
def __len__(self):
return len(self.filenames)
if __name__ == '__main__':
import mayavi.mlab as mlab
import class_mapping
NUM_POINT = 8192
num_frames = 3
d = SegDataset(root='processed_pc', chained_flow_root='chained_flow', npoints=NUM_POINT, train=True, num_frames=num_frames)
print(len(d))
import time
tic = time.time()
point_size = 0.2
for idx in range(200, len(d)):
for half in [0, 1]:
print(d.filenames[idx])
batch_data = np.zeros((NUM_POINT * num_frames, 3 + 3))
batch_chained_flowed = np.zeros((NUM_POINT * num_frames, 3))
batch_label = np.zeros((NUM_POINT * num_frames), dtype='int32')
batch_mask = np.zeros((NUM_POINT * num_frames), dtype=np.bool)
pc, rgb, label, chained_flowed, labelweights, loss_mask, valid_pred_idx_in_full = d.get(idx, half)
batch_data[:, :3] = pc
batch_data[:, 3:] = rgb
batch_chained_flowed = chained_flowed
batch_label = label
batch_mask = loss_mask
print(batch_data[0*NUM_POINT:1*NUM_POINT, :3] - batch_chained_flowed[0*NUM_POINT:1*NUM_POINT, 0])
print(batch_data[1*NUM_POINT:2*NUM_POINT, :3] - batch_chained_flowed[1*NUM_POINT:2*NUM_POINT, 1])
print(batch_data[2*NUM_POINT:3*NUM_POINT, :3] - batch_chained_flowed[2*NUM_POINT:3*NUM_POINT, 2])
batch_labelweights = labelweights[batch_label]
##### select only the first frame, for viz
batch_data = batch_data[:NUM_POINT]
batch_label = batch_label[:NUM_POINT]
batch_chained_flowed = batch_chained_flowed[:NUM_POINT]
batch_mask = batch_mask[:NUM_POINT]
batch_labelweights = batch_labelweights[:NUM_POINT]
##### mlab viz, with semantic
mlab.figure(bgcolor=(1,1,1))
pc_valid = batch_data[:, :3][batch_mask]
rgb_valid = batch_data[:, 3:][batch_mask]
label_valid = batch_label[batch_mask]
chained_flowed_valid = batch_chained_flowed[batch_mask]
for i in range(12):
pc_sem = pc_valid[label_valid == i]
color = class_mapping.index_to_color[class_mapping.label_to_index[i]]
mlab.points3d(pc_sem[:,0], pc_sem[:,1], pc_sem[:,2], scale_factor=point_size, color=(color[0]/255,color[1]/255,color[2]/255))
pc_non_valid = batch_data[:, :3][np.logical_not(batch_mask)]
mlab.points3d(pc_non_valid[:,0], pc_non_valid[:,1], pc_non_valid[:,2], scale_factor=point_size, color=(0, 0, 0))
color = np.array([[1,0,0], [1,1,0], [0,1,0], [0,1,1], [0,0,1]])
fwrite = open('view.pts', 'w')
for i in range(batch_data.shape[0]):
# p = batch_data[i, :3]
for f in range(0, num_frames):
p = batch_chained_flowed[i,f]
fwrite.write('{} {} {} {} {} {}\n'.format(p[0], p[1], p[2], color[f,0], color[f,1], color[f,2]))
input()
print(time.time() - tic)
|
smr/test/integrated/test_mem_disk_change.py
|
lynix94/nbase-arc
| 176 |
58285
|
<reponame>lynix94/nbase-arc
#
# Copyright 2015 Naver Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import time
import Util, Conf, Cm, Pg, Pgs, Smr, Client, Log
class TestMemDiskChange (unittest.TestCase):
BASE_PORT = 1900
BE_PORT = 1909
def make_some_logs(self, runtime=10):
# make some logs
clients = []
num_clients = 20
for i in range (0, num_clients):
C = Client.Client()
clients.append(C)
C.slotid = i
C.add(C.slotid, 'localhost', self.BE_PORT)
C.size(64*1024, C.slotid)
C.tps(100, C.slotid)
for C in clients:
C.start(C.slotid)
runtime_limit = 10
runtime = 0
while runtime < runtime_limit:
time.sleep(1)
runtime = runtime + 1
for C in clients:
C.stop(C.slotid)
def test_mem_disk_change (self):
saved = Conf.USE_MEM_LOG
Conf.USE_MEM_LOG = True
cm = None
pgs = None
expected_seqs = {}
try:
cm = Cm.CM("tmpTestMemDiskChange")
cm.create_workspace()
pg = Pg.PG(0)
pgs = Pgs.PGS(0, 'localhost', self.BASE_PORT, cm.dir)
expected_seqs['min'] = 0
expected_seqs['max'] = 0
for i in range (0, 5):
if i % 2 == 0:
Log.createlog(pgs.dir)
else:
Log.syncdeletelog(pgs.dir)
# start replicator
pgs.start_smr()
pgs.smr.wait_role(Smr.SMR.NONE)
# check seqs
seqs = pgs.smr.getseq_log()
print "expected =======>", expected_seqs
print "seqs =======>", seqs
assert seqs['min'] == expected_seqs['min']
assert seqs['max'] == expected_seqs['max']
# pgs -> master
pgs.start_be()
pgs.smr.wait_role(Smr.SMR.LCONN)
pg.join(pgs)
pgs.smr.wait_role(Smr.SMR.MASTER)
# kill be
self.make_some_logs(runtime=3)
# wait for be to apply all logs
time.sleep(1)
# checkpoint
pgs.be.ckpt()
# remember original sequences
expected_seqs = pgs.smr.getseq_log()
# kill pgs
pg.leave(pgs.id, kill=True)
finally:
#Util.tstop('Check output!')
if pgs is not None:
pgs.kill_smr()
pgs.kill_be()
if cm is not None:
cm.remove_workspace()
Conf.USE_MEM_LOG = saved
def test_log_hole(self):
''' make randome hole in log files and check '''
pass
if __name__ == '__main__':
unittest.main()
|
src/hg/encode/encodeCharts/encodeStatus.py
|
andypohl/kent
| 171 |
58319
|
#!/hive/groups/recon/local/bin/python
# Requires Python 2.6, current default python on hgwdev is 2.4
"""CGI script that outputs the ENCODE status based upon a specified
field.
"""
import cgi, cgitb
import datetime
import json
import sys
# Import local modules found in "/hive/groups/encode/dcc/charts"
sys.path.append("/hive/groups/encode/dcc/charts")
import encodeReportLib
import gviz_api
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = "1.0.0"
cgitb.enable()
# Parse report file and return result in the proper format
# for the Google Visualization API
def processReportFile (reportFile, statusLabel, keyIndex, norelease, species):
hash = {}
f = open(reportFile, "r")
for line in f:
line = line.rstrip()
if line.startswith('Project'):
continue
splitArray = line.split('\t')
keyLabel = splitArray[keyIndex]
status = splitArray[8]
assembly = splitArray[9]
if status == 'revoked' or status == 'replaced':
continue
if norelease == 1 and status == 'released':
continue
if keyIndex == 5:
keyLabel=encodeReportLib.parseFreezeLabel(keyLabel)
if species == 'all':
pass
else:
if species == 'human' and assembly.startswith('hg'):
pass
elif species == 'mouse' and assembly.startswith('mm'):
pass
elif species == assembly:
pass
else:
continue
if not keyLabel in hash:
hash[keyLabel] = {}
for i in statusLabel:
hash[keyLabel][i] = 0
hash[keyLabel][status] += 1
f.close()
if keyIndex == 5:
sortKey = encodeReportLib.orderFreezeDateLabels(hash.keys())
else:
sortKey = sorted(hash)
# Populate dataArray with the contents of the matrix
dataArray = []
for labKey in sortKey:
array = []
array.append(labKey)
for statusKey in statusLabel:
array.append(hash[labKey][statusKey])
dataArray.append(array)
return dataArray
def main():
form = cgi.FieldStorage()
# CGI Variables
# key = project, lab, data, freeze, or species
# Display data based on the key variable
# norelease = 0 or 1
# 0 = Output all data
# 1 = Output only unreleased data
# species = human, mouse, all
# human = Output only human data
# mouse = Output only mouse data
# all = Output all data
keyField = form.getvalue('key')
if keyField == None:
keyField = 'project'
norelease = form.getvalue('norelease')
if norelease == None:
norelease = 0
norelease = int(norelease)
species = form.getvalue('species')
if species == None:
species = 'all'
switch = {'project':0, 'lab':1, 'data':2, 'freeze':5, 'status':8}
titleTag = {'project':"Project", 'lab':"Lab", 'data':"Data_Type",
'freeze':"Freeze", 'status':"Status"}
if keyField not in switch:
keyField = 'project'
keyIndex = switch[keyField]
# Headers for the columns in the data matrix
description = [(titleTag[keyField], "string")]
fullLabel = ['released', 'reviewing', 'approved', 'displayed', 'downloads', 'loaded']
statusLabel = []
for label in fullLabel:
if label == 'released' and norelease == 1:
continue
tmpDesc = [(label, 'number')]
description += tmpDesc
statusLabel.append(label)
reportFile, currentDate = encodeReportLib.getRecentReport()
matrix = processReportFile(reportFile, statusLabel, keyIndex, norelease,
species)
# Create the data table
data_table = gviz_api.DataTable(description)
data_table.LoadData(matrix)
# Convert to JavaScript code
jscode = data_table.ToJSCode("jscode_data")
# Set variables for HTML output
template_vars = {}
template_vars['jscode'] = jscode
template_vars['dateStamp'] = encodeReportLib.dateIntToDateStr(currentDate)
template_vars['title'] = "ENCODE (%s) Status by %s" % (species,
titleTag[keyField])
template_vars['packageName'] = 'columnchart'
template_vars['visClass'] = 'ColumnChart'
template_vars['style'] = ""
template_vars['species'] = species
template_vars['keyField'] = keyField
template_vars['norelease']= norelease
# Set the chart specific configuration options
chart_config = {}
chart_config['isStacked'] = 'true'
chart_config['legendFontSize'] = 16
chart_config['width'] = 854
chart_config['height'] = 480
chart_config['titleX'] = titleTag[keyField]
chart_config['titleY'] = "# of Submissions"
chart_config['tooltipFontSize'] = 16
chart_config['enableTooltip'] = 'true'
colors = encodeReportLib.getColorArray(len(statusLabel))
colors.reverse()
chart_config['colors'] = colors
template_vars['chart_config'] = json.dumps(chart_config)
encodeReportLib.renderHtml(template_vars, 0, 1)
return
if __name__ == '__main__':
main()
sys.exit(0)
|
amadeus/namespaces/_reference_data.py
|
minjikarin/amadeus-python
| 125 |
58391
|
<filename>amadeus/namespaces/_reference_data.py<gh_stars>100-1000
from amadeus.client.decorator import Decorator
from amadeus.reference_data._urls import Urls
from amadeus.reference_data._location import Location
from amadeus.reference_data._locations import Locations
from amadeus.reference_data._airlines import Airlines
from amadeus.reference_data._recommended_locations import RecommendedLocations
class ReferenceData(Decorator, object):
def __init__(self, client):
Decorator.__init__(self, client)
self.urls = Urls(client)
self.locations = Locations(client)
self.airlines = Airlines(client)
self.recommended_locations = RecommendedLocations(client)
def location(self, location_id):
return Location(self.client, location_id)
|
tiny_benchmark/maskrcnn_benchmark/layers/sigmoid_focal_loss.py
|
ucas-vg/TinyBenchmark
| 495 |
58455
|
<reponame>ucas-vg/TinyBenchmark
import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from maskrcnn_benchmark import _C
# TODO: Use JIT to replace CUDA implementation in the future.
class _SigmoidFocalLoss(Function):
@staticmethod
def forward(ctx, logits, targets, gamma, alpha):
ctx.save_for_backward(logits, targets)
num_classes = logits.shape[1]
ctx.num_classes = num_classes
ctx.gamma = gamma
ctx.alpha = alpha
losses = _C.sigmoid_focalloss_forward(
logits, targets, num_classes, gamma, alpha
)
return losses
@staticmethod
@once_differentiable
def backward(ctx, d_loss):
logits, targets = ctx.saved_tensors
num_classes = ctx.num_classes
gamma = ctx.gamma
alpha = ctx.alpha
d_loss = d_loss.contiguous()
d_logits = _C.sigmoid_focalloss_backward(
logits, targets, d_loss, num_classes, gamma, alpha
)
return d_logits, None, None, None, None
sigmoid_focal_loss_cuda = _SigmoidFocalLoss.apply
def sigmoid_focal_loss_cpu(logits, targets, gamma, alpha):
num_classes = logits.shape[1]
gamma = gamma[0]
alpha = alpha[0]
dtype = targets.dtype
device = targets.device
class_range = torch.arange(1, num_classes+1, dtype=dtype, device=device).unsqueeze(0)
t = targets.unsqueeze(1)
p = torch.sigmoid(logits)
term1 = (1 - p) ** gamma * torch.log(p)
term2 = p ** gamma * torch.log(1 - p)
return -(t == class_range).float() * term1 * alpha - ((t != class_range) * (t >= 0)).float() * term2 * (1 - alpha)
class SigmoidFocalLoss(nn.Module):
def __init__(self, gamma, alpha):
super(SigmoidFocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
def forward(self, logits, targets):
device = logits.device
if logits.is_cuda:
loss_func = sigmoid_focal_loss_cuda
else:
loss_func = sigmoid_focal_loss_cpu
loss = loss_func(logits, targets, self.gamma, self.alpha)
return loss.sum()
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "gamma=" + str(self.gamma)
tmpstr += ", alpha=" + str(self.alpha)
tmpstr += ")"
return tmpstr
from maskrcnn_benchmark.modeling.rpn.gaussian_net.gau_label_infer import three_points_solve
class FixedIOULoss(nn.Module):
def three_point_solve(self, li, lj, lk, a, b, eps=1e-6):
lkj, lji = lk - lj, lj - li
inverse_w2 = (lkj / b - lji / a) / (a + b)
dx = -(w2 * lji / a + a) / 2
# dx = (lkj * a * a + lji * b * b) / (lji*b - lkj * a) / 2
return w2, dx
def cross_points_set_solve_3d(self, L, points, a, b, step=1, solver=1):
# points_set: (N, 3), # (c, y, x)
"""
L[cj, yj-a, xj]
L[cj, yj, xj-a] L[cj, yj, xj] L[cj, yj, xj + b]
L[cj, yj+b, xj]
"""
cj, yj, xj = points[:, 0], points[:, 1], points[:, 2]
idx = torch.arange(len(points))
lx = L[cj, yj] # (N, W)
lxi, lxj, lxk = lx[idx, xj - a], lx[idx, xj], lx[idx, xj + b]
ly = L[cj, :, xj] # (N, H) not (H, N)
lyi, lyj, lyk = ly[idx, yj - a], lxj, ly[idx, yj + b]
li = torch.cat([lxi, lyi], dim=0)
lj = torch.cat([lxj, lyj], dim=0)
lk = torch.cat([lxk, lyk], dim=0)
s, d = self.three_point_solve(li, lj, lk, a, b)
n = len(s) // 2
w, h = s[:n], s[n:]
dx, dy = d[:n], d[n:]
# cx = xj.float() + dx # 1/2 cause use center point
# cy = yj.float() + dy
# x1 = cx - (w-1/step) / 2 # notice here
# y1 = cy - (h-1/step) / 2
# return torch.stack([x1 * step, y1 * step, w * step, h * step, lxj], dim=1) # lxj == lyj
return dx, dy, w, h
def forward(self, bbox, target, sf=0.125):
def center2corner(dx, dy, w, h):
l = w / 2 - dx
r = w / 2 + dx
t = h / 2 - dy
b = h / 2 + dy
return l, t, r, b
pred_l, pred_t, pred_r, pred_b = center2corner(*bbox)
targ_l, targ_t, targ_r, targ_b = center2corner(*target)
l_range = (0, 4)
pred_l = pred_l.clamp(*l_range)
pred_r = pred_r.clamp(*l_range)
pred_t = pred_t.clamp(*l_range)
pred_b = pred_b.clamp(*l_range)
target_aera = target[2] * target[3]
pred_aera = (pred_l + pred_r) * (pred_t + pred_b)
w_intersect = torch.min(pred_l, targ_l) + torch.min(pred_r, targ_r)
h_intersect = torch.min(pred_b, targ_b) + torch.min(pred_t, targ_t)
area_intersect = w_intersect * h_intersect
area_union = target_aera + pred_aera - area_intersect
# iou_losses = -torch.log((area_intersect.clamp(0) + 1.0) / (area_union.clamp(0) + 1.0))
iou_losses = -torch.log(((area_intersect.clamp(0) + 1.0) / (area_union.clamp(0) + 1.0)).clamp(0.1))
# if iou_losses.max() > 10:
# print("ok")
# targ_w, targ_h = target[2], target[3]
# l1_losses = 0.
# for p, t, s in zip([pred_l, pred_t, pred_r, pred_b],
# [targ_l, targ_t, targ_r, targ_b],
# [targ_w, targ_h, targ_w, targ_h]):
# l1_losses += torch.log(1 + 3 * smooth_l1((p - t) / s))
# l1_losses /= 4 # cause loss from 4 sub-loss: l, t, r, b
# valid = ((bbox[2] > 0) & (bbox[3] > 0) & (pred_l > 0) & (pred_r > 0) & (pred_t > 0) & (pred_b > 0)).float()
# assert (targ_h <= 0).sum() == 0 and (targ_w <= 0).sum() == 0 and (targ_l <= 0).sum() == 0 and (targ_r <= 0).sum() == 0 \
# and (targ_t <= 0).sum() == 0 and (targ_b <= 0).sum() == 0, ""
# return iou_losses * valid, l1_losses * (1 - valid)
return iou_losses * 0, iou_losses * 0
def smooth_l1(error, beta=1. / 9):
"""
very similar to the smooth_l1_loss from pytorch, but with
the extra beta parameter
"""
n = torch.abs(error)
cond = n < beta
loss = torch.where(cond, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
class FixSigmoidFocalLoss(nn.Module):
def __init__(self, gamma, alpha, sigma, fpn_strides, c, EPS=1e-6):
super(FixSigmoidFocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
self.sigma = sigma
self.EPS = EPS
self.fpn_strides = fpn_strides
self.c = c # (0.5, 2, 1, 2)
print("c1, c2, c3, c4 for pos loss:", self.c)
self.g_mul_p = False
self.iou_loss = FixedIOULoss()
def forward(self, cls_logits, gau_logits, targets, valid=None):
"""
:param logits: shape=(B, H, W, C)
:param targets: shape=(B, H, W, C)
:return:
"""
gamma = self.gamma
alpha = self.alpha
eps = self.EPS
c1, c2, c3, c4, c5 = self.c
# num_classes = logits.shape[1]
# dtype = targets.dtype
# device = targets.device
# # class_range = torch.arange(1, num_classes + 1, dtype=dtype, device=device).unsqueeze(0)
q = targets
p = torch.sigmoid(cls_logits)
g = torch.sigmoid(gau_logits)
# if self.g_mul_p: g = g * p
# loss = -(q - p) ** gamma * (torch.log(p) * alpha + torch.log(1-p) * (1 - alpha)) # origin
# loss = -(q - p) ** gamma * (q * torch.log(p) * alpha + (1 - q) * torch.log(1-p) * (1 - alpha)) # correct 1
# loss = -(q - p) ** gamma * (q * torch.log(p/(q+eps)) * alpha + (1 - q) * torch.log((1-p)/(1-q+eps)) * (1 - alpha)) # correct 2
# correct 3
# loss = -(q - p) ** gamma * (q * torch.log(p/(q+eps)) + (1 - q) * torch.log((1-p)/(1-q+eps)))
# neg_loss = (1-alpha) * (q <= eps).float() * loss
# pos_loss = alpha * (q > eps).float() * loss
# correct 4
# loss = -(q - p) ** gamma * (q * torch.log(p/(q+eps)) + (1 - q) * torch.log((1-p)/(1-q+eps)))
# neg_loss = (1-alpha) * (q <= eps).float() * loss
# pos_loss = 4 * alpha * (q > eps).float() * loss
# correct 5
# loss = - (q * torch.log(p) + (1 - q) * torch.log(1-p)) # correct 1-2
# neg_loss = (q <= eps).float() * (- torch.log(1 - p)) * (1 - alpha) * ((q - p) ** gamma)
# q * |log(p) - log(q)|^2, cause inference need -log(p), so use log L2 Loss, q to weight like centerness.
# pos_loss = q * (torch.log(p / (q + eps)) ** 2) * alpha # * (q > eps).float()
# loss 1
# loss = (- q * torch.log(p) - (1 - q) * torch.log(1 - p)) * ((q - p) ** gamma)
# neg_loss = (q <= eps).float() * loss * (1 - alpha)
# pos_loss = (q > eps).float() * loss * alpha
# loss 1, FL
# loss = (- q * torch.log(p / (q + eps)) - (1 - q) * torch.log((1 - p)/(1 - q + eps))) * ((q - p) ** gamma)
# neg_loss = (q <= eps).float() * loss * (1 - alpha)
# pos_loss = (q > eps).float() * loss * alpha
# print((q > eps).sum(), (q <= eps).sum())
# # loss 2, log loss
# neg_loss = (q <= eps).float() * (- torch.log(1 - p) * (p ** gamma)) * (1 - alpha) # FL
# pos_loss = (q * smooth_l1(torch.log(p / (q + eps)))) * alpha # smoothl1([ln(p) - ln(q)]) # should be (p + eps) / (q+ eps)
# # loss3, log diff loss
# # use p
# neg_loss = (q <= eps).float() * (1 - alpha) * (- p ** gamma * torch.log(1 - p))
# pos_loss = (q > eps).float() * alpha * (- (1 - p) ** gamma * torch.log(p))
#
# # use g
# gau_neg_loss = (q <= eps).float() * (1 - alpha) * (- g ** gamma * torch.log(1 - g)) * c5
# fpn_stride, object_range, out_factor = self.fpn_strides[0], torch.Tensor([32, 64]), 2 # out_factor==2 means accept object range is [min/2, max*2]
# # object_range[1] *= out_factor
# # object_range[0] /= out_factor
# # w**2=2/L * s**2(fpn_stride) in [32**2, 64**2], L in [2*(s/32)**2, 2*(s/64)**2], L*sf=[0.5, 2]
# sf = object_range[0] / fpn_stride * object_range[1] / fpn_stride / 2 # 1/2 * (O1 * O2) / S**2=16, make 1/d2(log_q) to (0.5, 2)
# factor = self.sigma * self.sigma * sf # 1/diff2(log_q) in (8, 32), log_q*16 make it in (0.5, 2)
#
# log_p = -torch.log(g + eps) * factor
# log_q = -torch.log(q + eps) * factor
# center_log_p, center_log_q = log_p[:, 1:-1, 1:-1, :], log_q[:, 1:-1, 1:-1, :]
# # qx_diff1, qy_diff1 = (center_log_q - log_q[:, :-2, 1:-1, :]), (center_log_q - log_q[:, 1:-1, :-2, :])
# # px_diff1, py_diff1 = (center_log_p - log_p[:, :-2, 1:-1, :]), (center_log_p - log_p[:, 1:-1, :-2, :])
# left, right = lambda x: x[:, 1:-1, :-2, :], lambda x: x[:, 1:-1, 2:, :]
# top, bottom = lambda x: x[:, :-2, 1:-1, :], lambda x: x[:, 2:, 1:-1, :]
# qx_diff1 = center_log_q - left(log_q)
# qy_diff1 = center_log_q - top(log_q)
# px_diff1 = center_log_p - left(log_p)
# py_diff1 = center_log_p - top(log_p)
# qx_diff2 = left(log_q) + right(log_q) - 2 * center_log_q
# qy_diff2 = top(log_q) + bottom(log_q) - 2 * center_log_q
# px_diff2 = left(log_p) + right(log_p) - 2 * center_log_p
# py_diff2 = top(log_p) + bottom(log_p) - 2 * center_log_p
# # print('px_diff', px_diff1.max(), px_diff1[qx_diff1 > 0].mean())
# # print('qy_diff', qy_diff1.max(), qy_diff1[qy_diff1 > 0].mean())
# # valid_x = (q[:, :-2, 1:-1, :] > eps) & (q[:, 2:, 1:-1, :] > eps)
# # valid_y = (q[:, 1:-1, :-2, :] > eps) & (q[:, 1:-1, 2:, :] > eps)
#
# # abs(dx) = s/8/2, (32, 64) -> t in (2, 4), (-tf/2, tf/2)
# tf = (object_range[1] / fpn_stride)
# dqx = -((qx_diff1+eps) / (qx_diff2+eps) + 0.5)[valid] / tf
# dqy = -((qy_diff1+eps) / (qy_diff2+eps) + 0.5)[valid] / tf
# dpx = -((px_diff1+eps) / (qx_diff2+eps) + 0.5)[valid] / tf # use qx_diff2, not px_diff2 to get smooth grad.
# dpy = -((py_diff1+eps) / (qy_diff2+eps) + 0.5)[valid] / tf
# x_loss = torch.log(1 + 3 * (dqx - dpx).clamp(-1, 1).abs())
# y_loss = torch.log(1 + 3 * (dqy - dpy).clamp(-1, 1).abs())
# xy_loss = (smooth_l1(x_loss, beta=0.25) + smooth_l1(y_loss, beta=0.25))
#
# d2_range = 1./2/out_factor, 2 * out_factor
# px_diff2 = px_diff2.clamp(*d2_range)[valid]
# py_diff2 = py_diff2.clamp(*d2_range)[valid]
# qx_diff2 = qx_diff2.clamp(*d2_range)[valid]
# qy_diff2 = qy_diff2.clamp(*d2_range)[valid]
#
# gau_loss = (q[:, 1:-1, 1:-1, :] > 0).float() * smooth_l1(center_log_p - center_log_q)
# wh_loss = (smooth_l1(c3 * torch.log(qx_diff2/px_diff2), beta=0.25) +
# smooth_l1(c3 * torch.log(qy_diff2/py_diff2), beta=0.25))
#
# # def ri(x): return round(x.item(), 3)
# # print("neg_loss", ri(neg_loss.max()), ri(neg_loss.mean()), end=';')
# #
# # def ri(x): return round(x.item(), 3) if valid.sum() > 0 else 0
# # print('gau_loss', ri(gau_loss.max()), ri(gau_loss.mean()), end=";")
# # print('wh_loss', ri(wh_loss.max()), ri(wh_loss.mean()), end=';')
# # print('xy_loss', ri(xy_loss.max()), ri(xy_loss.mean()), )
# valid_q = q[:, 1:-1, 1:-1, :][valid]
# gau_loss = q[:, 1:-1, 1:-1, :] * (c1*gau_loss)
# wh_loss = valid_q * (c2*wh_loss)
# xy_loss = valid_q * (c4*xy_loss)
# return neg_loss.sum(), pos_loss.sum(), gau_neg_loss.sum() * 0, gau_loss.sum(), wh_loss.sum(), xy_loss.sum()
# loss4, IOU
neg_loss = (q <= eps).float() * (1 - alpha) * (- p ** gamma * torch.log(1 - p))
pos_loss = (q > eps).float() * alpha * (- (1 - p) ** gamma * torch.log(p))
g = g.permute((0, 3, 1, 2))
q = q.permute((0, 3, 1, 2))
valid = valid.permute((0, 3, 1, 2))
factor = self.sigma * self.sigma
log_p = -torch.log(g + eps) * factor
log_q = -torch.log(q + eps) * factor
fpn_stride, object_range, out_factor = self.fpn_strides[0], torch.Tensor([32, 64]), 2
sf = 1 / ((object_range[0] / fpn_stride * object_range[1] / fpn_stride) ** 0.5)
iou_losses = 0.
l1_losses = 0.
for b in range(len(valid)):
idx = torch.nonzero(valid[b])
if len(idx) == 0: continue
idx[:, 1:] += 1
p_bboxes = self.iou_loss.cross_points_set_solve_3d(log_p[b], idx, 1, 1, step=1, solver=1)
q_bboxes = self.iou_loss.cross_points_set_solve_3d(log_q[b], idx, 1, 1, step=1, solver=1)
iou_loss, l1_loss = self.iou_loss(p_bboxes, q_bboxes, sf)
valid_q = q[b, :, 1:-1, 1:-1][valid[b]]
iou_losses += (valid_q * iou_loss).sum()
l1_losses += (valid_q * l1_loss).sum()
def ri(x): return round(x.item(), 3)
print("neg_loss", ri(neg_loss.max()), ri(neg_loss.mean()), end=';')
print(iou_losses, l1_losses)
return neg_loss.sum(), pos_loss.sum(), iou_losses * 0, l1_losses * 0
class L2LossWithLogit(nn.Module):
def __init__(self):
super(L2LossWithLogit, self).__init__()
self.mse = nn.MSELoss(reduction='sum')
def forward(self, logits, targets):
p = torch.sigmoid(logits)
return self.mse(p, targets)
|
reddit2telegram/channels/~inactive/r_hqdesi/app.py
|
mainyordle/reddit2telegram
| 187 |
58471
|
<reponame>mainyordle/reddit2telegram
#encoding:utf-8
subreddit = 'HQDesi'
t_channel = '@r_HqDesi'
def send_post(submission, r2t):
return r2t.send_simple(submission)
|
setup.py
|
Mahima-ai/ALEPython
| 107 |
58516
|
# -*- coding: utf-8 -*-
from setuptools import find_packages, setup
with open("README.md") as f:
long_description = f.read()
with open("requirements.txt", "r") as f:
required = f.read().splitlines()
setup(
name="alepython",
description="Python Accumulated Local Effects (ALE) package.",
author="<NAME>",
author_email="<EMAIL>",
license="Apache 2",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/MaximeJumelle/alepython/",
install_requires=required,
extras_require={"test": ["pytest>=5.4", "pytest-cov>=2.8"]},
setup_requires=["setuptools-scm"],
python_requires=">=3.5",
use_scm_version=dict(write_to="src/alepython/_version.py"),
keywords="alepython",
package_dir={"": "src"},
packages=find_packages(where="src"),
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approched :: Apache 2",
"Operating System :: OS Independent",
],
)
|
tproxy/__init__.py
|
romabysen/tproxy
| 170 |
58535
|
<reponame>romabysen/tproxy<filename>tproxy/__init__.py
# -*- coding: utf-8 -
#
# This file is part of tproxy released under the MIT license.
# See the NOTICE for more information.
version_info = (0, 5, 4)
__version__ = ".".join(map(str, version_info))
|
env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_histogram.py
|
acrucetta/Chicago_COVI_WebApp
| 11,750 |
58545
|
<reponame>acrucetta/Chicago_COVI_WebApp<gh_stars>1000+
from plotly.graph_objs import Histogram
|
tests/model/create_model_repr_test.py
|
Timothyyung/bravado-core
| 122 |
58589
|
<reponame>Timothyyung/bravado-core
# -*- coding: utf-8 -*-
import pytest
import six
def test_success(user):
expected = "User(email=None, firstName=None, id=None, lastName=None, password=<PASSWORD>, phone=None, userStatus=None, username=None)" # noqa
assert expected == repr(user)
def test_allOf(cat, cat_spec, cat_swagger_spec):
expected = "Cat(category=None, id=None, name=None, neutered=None, photoUrls=None, tags=None)" # noqa
assert expected == repr(cat)
@pytest.mark.skipif(six.PY3, reason="py2 has ascii default strings")
def test_unicode_py2(user):
user.firstName = 'Ümlaut'
expected = r"User(email=None, firstName='\xc3\x9cmlaut', id=None, lastName=None, password=<PASSWORD>, phone=None, userStatus=None, username=None)" # noqa
assert expected == repr(user)
@pytest.mark.skipif(six.PY2, reason="py3 has unicode default strings")
def test_unicode_py3(user):
user.firstName = 'Ümlaut'
expected = "User(email=None, firstName='Ümlaut', id=None, lastName=None, password=<PASSWORD>, phone=None, userStatus=None, username=None)" # noqa
assert expected == repr(user)
|
py-polars/polars/eager/__init__.py
|
elferherrera/polars
| 1,595 |
58591
|
<reponame>elferherrera/polars<gh_stars>1000+
# flake8: noqa
from . import frame, series
from .frame import *
from .series import *
__all__ = frame.__all__ + series.__all__
|
pydlm/dlm.py
|
onnheimm/pydlm
| 423 |
58603
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
===============================================================================
The code for the class dlm
===============================================================================
This is the main class of the Bayeisan dynamic linear model.
It provides the modeling, filtering, forecasting and smoothing function
of a dlm. The dlm use the @builder to construct the @baseModel based
on user supplied @components and then run @kalmanFilter to filter the result.
Example:
>>> # randomly generate fake data on 1000 days
>>> import numpy as np
>>> data = np.random.random((1, 1000))
>>> # construct the dlm of a linear trend and a 7-day seasonality
>>> from pydlm import dlm, trend, seasonality
>>> myDlm = dlm(data) + trend(degree = 2, 0.98) + seasonality(period = 7, 0.98)
>>> # filter the result
>>> myDlm.fitForwardFilter()
>>> # extract the filtered result
>>> myDlm.getFilteredObs()
"""
# This is the major class for fitting time series data using the
# dynamic linear model. dlm is a subclass of builder, with adding the
# Kalman filter functionality for filtering the data
from copy import deepcopy
from numpy import matrix
from pydlm.predict.dlmPredictMod import dlmPredictModule
from pydlm.access.dlmAccessMod import dlmAccessModule
from pydlm.tuner.dlmTuneMod import dlmTuneModule
from pydlm.plot.dlmPlotMod import dlmPlotModule
class dlm(dlmPlotModule, dlmPredictModule, dlmAccessModule, dlmTuneModule):
""" The main class of the dynamic linear model.
This is the main class of the Bayeisan dynamic linear model.
It provides the modeling, filtering, forecasting and smoothing
function of a dlm.
The dlm use the @builder to construct the @baseModel based on user supplied
@components and then run @kalmanFilter to filter the result.
Example 1:
>>> # randomly generate fake data on 1000 days
>>> import numpy as np
>>> data = np.random.random((1, 1000))
>>> # construct the dlm of a linear trend and a 7-day seasonality
>>> myDlm = dlm(data) + trend(degree = 2, 0.98) + seasonality(period = 7, 0.98)
>>> # filter the result
>>> myDlm.fitForwardFilter()
>>> # extract the filtered result
>>> myDlm.getFilteredObs()
Example 2 (fit a linear regression):
>>> from pydlm import dynamic
>>> data = np.random.random((1, 100))
>>> mydlm = dlm(data) + trend(degree=1, 0.98, name='a') +
dynamic(features=[[i] for i in range(100)], 1, name='b')
>>> mydlm.fit()
>>> coef_a = mydlm.getLatentState('a')
>>> coef_b = mydlm.getLatentState('b')
Attributes:
data: a list of doubles of the raw time series data.
It could be either the python's built-in list of
doubles or numpy 1d array.
"""
# define the basic members
# initialize the result
def __init__(self, data, **options):
super(dlm, self).__init__(data, **options)
# This model is used for prediction. Prediction functions
# will change the model status to forecast at a particular
# date. Using a copied model will help the main model from
# being changed and behaving abnormally.
self._predictModel = None
def exportModel(self):
""" Export the dlm builder. Currently the method only support dlm without
dynamic components.
"""
if length(self.builder.dynamicComponents) > 0:
raise ValueError('Cannot export dlm builder with dynamic components.')
if not self.initialized:
raise ValueError('Cannot export dlm before the model was initilized.')
return deepcopy(self.builder)
def buildFromModel(self, model):
""" Construct the dlm with exported model from other DLM with status.
Args:
model: The exported model from other dlm. Must be the return from
dlm.exportModel()
"""
self._initializeFromBuilder(exported_builder=model)
# ===================== modeling components =====================
# add component
def add(self, component):
""" Add new modeling component to the dlm.
Currently support: trend, seasonality, autoregression
and dynamic component.
Args:
component: the modeling component, could be either one
of the following:\n
trend, seasonality, dynamic, autoReg.
Returns:
A dlm object with added component.
"""
self.__add__(component)
def __add__(self, component):
self.builder.__add__(component)
self.initialized = False
return self
# list all components
def ls(self):
""" List out all existing components
"""
self.builder.ls()
# delete one component
def delete(self, name):
""" Delete model component by its name
Args:
name: the name of the component.
"""
self.builder.delete(name)
self.initialized = False
# ========================== model training component =======================
def fitForwardFilter(self, useRollingWindow=False, windowLength=3):
""" Fit forward filter on the available data.
User can choose use rolling windowFront
or not. If user choose not to use the rolling window,
then the filtering will be based on all the previous data.
If rolling window is used, then the filtering for a particular
date will only consider previous dates that are
within the rolling window length.
Args:
useRollingWindow: indicate whether rolling window should be used.
windowLength: the length of the rolling window if used.
"""
# check if the feature size matches the data size
self._checkFeatureSize()
# see if the model has been initialized
if not self.initialized:
self._initialize()
if self._printInfo:
print('Starting forward filtering...')
if not useRollingWindow:
# we start from the last step of previous filtering
if self.result.filteredType == 'non-rolling':
start = self.result.filteredSteps[1] + 1
else:
start = 0
# because we refit the forward filter, we need to reset the
# backward smoother as well.
self.result.smoothedSteps = [0, -1]
# determine whether renew should be used
self._forwardFilter(start=start,
end=self.n - 1,
renew=self.options.stable)
self.result.filteredType = 'non-rolling'
else:
if self.result.filteredType == 'rolling':
windowFront = self.result.filteredSteps[1] + 1
else:
windowFront = 0
# because we refit the forward filter, we need to reset the
# backward smoother as well.
self.result.smoothedSteps = [0, -1]
self.result.filteredType = 'rolling'
# if end is still within (0, windowLength - 1), we should run the
# usual ff from
if windowFront < windowLength:
self._forwardFilter(start=self.result.filteredSteps[1] + 1,
end=min(windowLength - 1, self.n - 1))
# for the remaining date, we use a rolling window
for today in range(max(windowFront, windowLength), self.n):
self._forwardFilter(start=today - windowLength + 1,
end=today,
save=today,
ForgetPrevious=True)
self.result.filteredSteps = [0, self.n - 1]
self.turnOn('filtered plot')
self.turnOn('predict plot')
if self._printInfo:
print('Forward filtering completed.')
def fitBackwardSmoother(self, backLength=None):
""" Fit backward smoothing on the data. Starting from the last observed date.
Args:
backLength: integer, indicating how many days the backward smoother
should go, starting from the last date.
"""
# see if the model has been initialized
if not self.initialized:
raise NameError('Backward Smoother has to be run after' +
' forward filter')
if self.result.filteredSteps[1] != self.n - 1:
raise NameError('Forward Fiter needs to run on full data before' +
'using backward Smoother')
# default value for backLength
if backLength is None:
backLength = self.n
if self._printInfo:
print('Starting backward smoothing...')
# if the smoothed dates has already been done, we do nothing
if self.result.smoothedSteps[1] == self.n - 1 and \
self.result.smoothedSteps[0] <= self.n - 1 - backLength + 1:
return None
# if the smoothed dates start from n - 1, we just need to continue
elif self.result.smoothedSteps[1] == self.n - 1:
self._backwardSmoother(start=self.result.smoothedSteps[0] - 1,
days=backLength)
# if the smoothed dates are even earlier,
# we need to start from the beginning
elif self.result.smoothedSteps[1] < self.n - 1:
self._backwardSmoother(start=self.n - 1, days=backLength)
self.result.smoothedSteps = [self.n - backLength, self.n - 1]
self.turnOn('smoothed plot')
if self._printInfo:
print('Backward smoothing completed.')
def fit(self):
""" An easy caller for fitting both the forward filter and backward smoother.
"""
self.fitForwardFilter()
self.fitBackwardSmoother()
# ======================= data appending, popping and altering ===============
# Append new data or features to the dlm
def append(self, data, component='main'):
""" Append the new data to the main data or the components (new feature data)
Args:
data: the new data
component: the name of which the new data to be added to.\n
'main': the main time series data\n
other omponent name: add new feature data to other
component.
"""
# initialize the model to ease the modification
if not self.initialized:
self._initialize()
# if we are adding new data to the time series
if component == 'main':
# add the data to the self.data
self.data.extend(list(data))
# update the length
self.n += len(data)
self.result._appendResult(len(data))
# update the automatic components as well
for component in self.builder.automaticComponents:
comp = self.builder.automaticComponents[component]
comp.appendNewData(data)
# give a warning to remind to append dynamic components
if len(self.builder.dynamicComponents) > 0:
print('Remember to append the new features for the' +
' dynamic components as well')
# if we are adding new data to the features of dynamic components
elif component in self.builder.dynamicComponents:
comp = self.builder.dynamicComponents[component]
comp.appendNewData(data)
else:
raise NameError('Such dynamic component does not exist.')
# pop the data of a specific date out
def popout(self, date):
""" Pop out the data for a given date
Args:
date: the index indicates which date to be popped out.
"""
if date < 0 or date > self.n - 1:
raise NameError('The date should be between 0 and ' +
str(self.n - 1))
# initialize the model to ease the modification
if not self.initialized:
self._initialize()
# pop out the data at date
self.data.pop(date)
self.n -= 1
# pop out the feature at date
for name in self.builder.dynamicComponents:
comp = self.builder.dynamicComponents[name]
comp.popout(date)
# pop out the results at date
self.result._popout(date)
# update the filtered and the smoothed steps
self.result.filteredSteps[1] = date - 1
self.result.smoothedSteps[1] = date - 1
if self.result.filteredSteps[0] > self.result.filteredSteps[1]:
self.result.filteredSteps = [0, -1]
self.result.smoothedSteps = [0, -1]
elif self.result.smoothedSteps[0] > self.result.smoothedSteps[1]:
self.result.smoothedSteps = [0, -1]
# alter the data of a specific days
def alter(self, date, data, component='main'):
""" To alter the data for a specific date and a specific component.
Args:
date: the date of the altering data
data: the new data. data must be a numeric value for main time
series and must be a list of numerical values for dynamic
components.
component: the component for which the new data need to be
supplied to.\n
'main': the main time series data\n
other component name: other component feature data
"""
if date < 0 or date > self.n - 1:
raise NameError('The date should be between 0 and ' +
str(self.n - 1))
# initialize the model to ease the modification
if not self.initialized:
self._initialize()
# to alter the data for the observed chain
if component == 'main':
self.data[date] = data
# we also automatically alter all the automatic components
for component in self.builder.automaticComponents:
comp = self.builder.automaticComponents[component]
comp.alter(date, data)
# to alter the feature of a component
elif component in self.builder.dynamicComponents:
comp = self.builder.dynamicComponents[component]
comp.alter(date, data)
else:
raise NameError('Such dynamic component does not exist.')
# update the filtered and the smoothed steps
self.result.filteredSteps[1] = date - 1
self.result.smoothedSteps[1] = date - 1
if self.result.filteredSteps[0] > self.result.filteredSteps[1]:
self.result.filteredSteps = [0, -1]
self.result.smoothedSteps = [0, -1]
elif self.result.smoothedSteps[0] > self.result.smoothedSteps[1]:
self.result.smoothedSteps = [0, -1]
# ignore the data of a given date
def ignore(self, date):
""" Ignore the data for a specific day. treat it as missing data
Args:
date: the date to ignore.
"""
if date < 0 or date > self.n - 1:
raise NameError('The date should be between 0 and ' +
str(self.n - 1))
self.alter(date=date, data=None, component='main')
# ================================ control options =========================
def showOptions(self):
""" Print out all the option values
"""
allItems = vars(self.options)
for item in allItems:
print(item + ': ' + str(allItems[item]))
def stableMode(self, use=True):
""" Turn on the stable mode, i.e., using the renewal strategy.
Indicate whether the renew strategy should be used to add numerical
stability. When the filter goes over certain steps,
the information contribution of the previous data has decayed
to minimum. In the stable mode, We then ignore those days and
refit the time series starting from current - renewTerm, where
renewTerm is computed according to the discount. Thus,
the effective sample size of the dlm is twice
renewTerm. When discount = 1, there will be no renewTerm,
since all the information will be passed along.
"""
# if option changes, reset everything
if self.options.stable != use:
self.initialized = False
if use is True:
self.options.stable = True
elif use is False:
self.options.stable = False
else:
raise NameError('Incorrect option input')
def evolveMode(self, evoType='dependent'):
""" Control whether different component evolve indpendently. If true,
then the innovation will only be added on each component but not the
correlation between the components, so that for component with discount
equals to 1, the smoothed results will always be constant.
Args:
evoType: If set to 'independent', then each component will evolve
independently. If set to 'dependent', then the components
will proceed jointly. Default to 'independent'. Switch to
'dependent' if efficiency is a concern.
Returns:
a dlm object (for chaining purpose)
"""
# if option changes, reset everything
if (self.options.innovationType == 'whole' and
evoType == 'independent') or \
(self.options.innovationType == 'component' and
evoType == 'dependent'):
self.initialized = False
if evoType == 'independent':
self.options.innovationType = 'component'
elif evoType == 'dependent':
self.options.innovationType = 'whole'
else:
raise NameError('Incorrect option input')
# for chaining
return self
def noisePrior(self, prior=0):
""" To set the prior for the observational noise. Calling with empty
argument will enable the auto noise intializer (currently, the min of 1
and the variance of time series).
Args:
prior: the prior of the observational noise.
Returns:
A dlm object (for chaining purpose)
"""
if prior > 0:
self.options.noise=prior
self.initialized = False
else:
self.options.useAutoNoise = True
self.initialized = False
# for chaining
return self
|
Calibration/HcalAlCaRecoProducers/python/alcaiterphisym_cfi.py
|
malbouis/cmssw
| 852 |
58606
|
<reponame>malbouis/cmssw
import FWCore.ParameterSet.Config as cms
# producer for alcaiterativephisym (HCAL Iterative Phi Symmetry)
import Calibration.HcalAlCaRecoProducers.alcaEcalHcalReadoutsProducer_cfi
IterativePhiSymProd = Calibration.HcalAlCaRecoProducers.alcaEcalHcalReadoutsProducer_cfi.alcaEcalHcalReadoutsProducer.clone()
|
testsuite/databases/pgsql/utils.py
|
okutane/yandex-taxi-testsuite
| 128 |
58615
|
import pathlib
import typing
import urllib.parse
_PLUGIN_DIR = pathlib.Path(__file__).parent
PLUGIN_DIR = str(_PLUGIN_DIR)
CONFIGS_DIR = str(_PLUGIN_DIR.joinpath('configs'))
SCRIPTS_DIR = str(_PLUGIN_DIR.joinpath('scripts'))
def scan_sql_directory(root: str) -> typing.List[pathlib.Path]:
return [
path
for path in sorted(pathlib.Path(root).iterdir())
if path.is_file() and path.suffix == '.sql'
]
def connstr_replace_dbname(connstr: str, dbname: str) -> str:
"""Replace dbname in existing connection string."""
if connstr.endswith(' dbname='):
return connstr + dbname
if connstr.startswith('postgresql://'):
url = urllib.parse.urlparse(connstr)
url = url._replace(path=dbname) # pylint: disable=protected-access
return url.geturl()
raise RuntimeError(
f'Unsupported PostgreSQL connection string format {connstr!r}',
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.