max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
Python3/480.py
|
rakhi2001/ecom7
| 854 |
106911
|
__________________________________________________________________________________________________
sample 120 ms submission
class Solution:
def medianSlidingWindow(self, nums: List[int], k: int) -> List[float]:
window = sorted(nums[:k])
res = []
if k % 2 == 0:
res.append((window[k // 2] + window[k // 2 - 1]) / 2)
else:
res.append(window[k // 2])
for i in range(k, len(nums)):
bisect.insort(window, nums[i])
index = bisect.bisect_left(window, nums[i - k])
window.pop(index)
if k % 2 == 0:
res.append((window[k // 2] + window[k // 2 - 1]) / 2)
else:
res.append(window[k // 2])
return res
__________________________________________________________________________________________________
sample 14492 kb submission
def sortedInsert(arr, ele):
pos = len(arr)
for i, n in enumerate(arr):
if ele < n:
pos = i
break
arr.insert(pos, ele)
def populate(nums, k):
low = 0
high = k - 1
window = sorted(nums[:k])
return low, high, window
class Solution:
def medianSlidingWindow(self, nums: List[int], k: int) -> List[float]:
low, high, window = populate(nums, k)
result = []
while high < len(nums) - 1:
if k % 2 == 1:
result.append(float(window[int(k/2)]))
else:
a = window[int(k/2)]
b = window[int(k/2) - 1]
result.append(float((a+b)/2))
window.remove(nums[low])
low += 1
high += 1
sortedInsert(window, nums[high])
if k % 2 == 1:
result.append(float(window[int(k/2)]))
else:
a = window[int(k/2)]
b = window[int(k/2) - 1]
result.append(float((a+b)/2))
return result
__________________________________________________________________________________________________
|
inside/Driver/Driver_Manage.py
|
kangzai228/learning-power
| 318 |
106925
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Author : lisztomania
# @Date : 2021/1/16
# @Software : Pycharm
# @Version : Python 3.8.5
# @File : Driver_Manage.py
# @Function : 驱动管理
import os
from selenium.webdriver.chrome.webdriver import WebDriver
from inside.Config.Api import API
from inside.Config.Path import PATH
from inside.Config.System import SYSTEM
from inside.Driver.Driver_Check import DRIVER_CHECK
from inside.Driver.Driver_Download import DRIVER_DOWNLOAD
from inside.Driver.Driver_Init import DRIVER_INIT
from inside.Template.Meta_Singleton import SINGLETON
__all__ = ['DRIVER_MANAGE']
class DRIVER_MANAGE(metaclass=SINGLETON):
"""驱动管理类"""
def __init__(self):
"""
初始化时会要求设置选项
"""
self.__Driver_Check()
@classmethod
def __Check_Dir(cls) -> bool:
"""
Check_Dir() -> None
检测驱动目录是否存在,如不存在则自动创建
:return: bool
"""
if not DRIVER_CHECK().Dir:
print(f"检测到驱动目录未创建\n"
f"自动创建中")
os.mkdir(PATH().Driver)
print(f"驱动目录为{PATH().Driver}")
return False
return True
def __Driver_Check(self) -> None:
"""
__Driver_Check() -> None
驱动检查,确保驱动能够正常使用
:return: None
"""
temp = False
if not temp and not self.__Check_Dir():
temp = True
if not temp and not DRIVER_CHECK().File:
print(f"检测到驱动未下载")
temp = True
if not temp and not DRIVER_CHECK().Driver_Chrome_Version(
system=SYSTEM()):
print(f"检测到驱动不支持本机Chrome")
temp = True
if temp:
print(f"驱动自动下载中")
size = DRIVER_DOWNLOAD().Download(
link=API().Driver.Download(system=SYSTEM()))
print(f"驱动自动下载完毕\n"
f"文件大小为{size / 1024 / 1024}MB")
if not DRIVER_CHECK().Execute_Permission:
print(f"检测到驱动没有执行权限\n"
f"自动添加执行权限")
DRIVER_CHECK().Add_Execute_Permission(system=SYSTEM())
print(f"添加执行权限完毕")
@property
def Task(self) -> WebDriver:
"""
Task -> WebDriver
任务浏览器驱动器,关闭时,请务必使用Task_Quit
:return: WebDriver
"""
return DRIVER_INIT().Task_Driver
@property
def Task_Quit(self) -> str:
"""
Task_Quit -> str
任务浏览器驱动器关闭
:return: str
"""
return DRIVER_INIT().Task_Quit
@property
def QR(self) -> WebDriver:
"""
QR -> WebDriver
二维码浏览器驱动器,关闭时,请务必使用QR_Quit
:return: WebDriver
"""
return DRIVER_INIT().QR_Driver
@property
def QR_Quit(self) -> str:
"""
QR_Quit -> str
二维码浏览器驱动器关闭
:return: str
Success: 退出成功
Nonexistence: 不存在任务浏览器
"""
return DRIVER_INIT().QR_Quit
|
rls/envs/unity/wrappers/wrappers.py
|
StepNeverStop/RLs
| 371 |
106949
|
#!/usr/bin/env python3
# encoding: utf-8
from collections import defaultdict
from copy import deepcopy
from typing import Dict, List
import numpy as np
from mlagents_envs.environment import UnityEnvironment
from mlagents_envs.side_channel.engine_configuration_channel import \
EngineConfigurationChannel
from mlagents_envs.side_channel.environment_parameters_channel import \
EnvironmentParametersChannel
from rls.common.data import Data
from rls.common.specs import EnvAgentSpec, SensorSpec
from rls.common.yaml_ops import load_config
from rls.envs.unity.wrappers.core import ObservationWrapper
from rls.utils.np_utils import get_discrete_action_list
class BasicUnityEnvironment(object):
def __init__(self,
worker_id=0,
file_name=None,
port=5005,
render=False,
seed=42,
timeout_wait=60,
env_copies=12,
env_name='3DBall',
real_done=True,
initialize_config={},
engine_config={
'width': 84,
'height': 84,
'quality_level': 5,
'time_scale': 20,
'target_frame_rate': -1,
'capture_frame_rate': 60
},
**kwargs):
self._n_copies = env_copies
self._real_done = real_done
self._side_channels = self.initialize_all_side_channels(
initialize_config, engine_config)
env_kwargs = dict(seed=seed,
worker_id=worker_id,
timeout_wait=timeout_wait,
side_channels=list(self._side_channels.values())) # 注册所有初始化后的通讯频道
if file_name is not None:
env_dict = load_config('rls/configs/unity/env_dict.yaml')
env_kwargs.update(file_name=file_name,
base_port=port,
no_graphics=not render,
additional_args=[
'--scene', str(env_dict.get(env_name, 'None'))
])
self.env = UnityEnvironment(**env_kwargs)
self.env.reset()
self.initialize_environment()
def initialize_all_side_channels(self, initialize_config, engine_config):
"""
初始化所有的通讯频道
"""
engine_configuration_channel = EngineConfigurationChannel()
engine_configuration_channel.set_configuration_parameters(**engine_config)
float_properties_channel = EnvironmentParametersChannel()
float_properties_channel.set_float_parameter('env_copies', self._n_copies)
for k, v in initialize_config.items():
float_properties_channel.set_float_parameter(k, v)
return dict(engine_configuration_channel=engine_configuration_channel,
float_properties_channel=float_properties_channel)
def initialize_environment(self):
"""
初始化环境,获取必要的信息,如状态、动作维度等等
"""
self.behavior_names = list(self.env.behavior_specs.keys())
self._vector_idxs = defaultdict(list)
self._vector_dims = defaultdict(list)
self._visual_idxs = defaultdict(list)
self._visual_dims = defaultdict(list)
self._a_dim = defaultdict(int)
self._discrete_action_lists = {}
self._is_continuous = {}
self._actiontuples = {}
self.env.reset()
for bn, spec in self.env.behavior_specs.items():
for i, obs_spec in enumerate(spec.observation_specs): # TODO: optimize
if len(obs_spec.shape) == 1:
self._vector_idxs[bn].append(i)
self._vector_dims[bn].append(obs_spec.shape[0])
elif len(obs_spec.shape) == 3:
self._visual_idxs[bn].append(i)
self._visual_dims[bn].append(list(obs_spec.shape))
else:
raise ValueError(
"shape of observation cannot be understood.")
action_spec = spec.action_spec
if action_spec.is_continuous():
self._a_dim[bn] = action_spec.continuous_size
self._discrete_action_lists[bn] = None
self._is_continuous[bn] = True
elif action_spec.is_discrete():
self._a_dim[bn] = int(np.asarray(
action_spec.discrete_branches).prod())
self._discrete_action_lists[bn] = get_discrete_action_list(
action_spec.discrete_branches)
self._is_continuous[bn] = False
else:
raise NotImplementedError(
"doesn't support continuous and discrete actions simultaneously for now.")
self._actiontuples[bn] = action_spec.empty_action(
n_agents=self._n_copies)
def reset(self, reset_config):
for k, v in reset_config.items():
self._side_channels['float_properties_channel'].set_float_parameter(
k, v)
self.env.reset()
return self.get_obs(only_obs=True)
def step(self, actions, step_config):
"""
params: actions, type of dict or np.ndarray, if the type of actions is
not dict, then set those actions for the first behavior controller.
"""
for k, v in step_config.items():
self._side_channels['float_properties_channel'].set_float_parameter(
k, v)
actions = deepcopy(actions)
# TODO: fix this
for bn in self.behavior_names:
if self._is_continuous[bn]:
self._actiontuples[bn].add_continuous(actions[bn])
else:
self._actiontuples[bn].add_discrete(
self._discrete_action_lists[bn][actions[bn]].reshape(self._n_copies, -1))
self.env.set_actions(bn, self._actiontuples[bn])
self.env.step()
return self.get_obs()
@property
def AgentSpecs(self):
ret = {}
for bn in self.behavior_names:
ret[bn] = EnvAgentSpec(
obs_spec=SensorSpec(
vector_dims=self._vector_dims[bn],
visual_dims=self._visual_dims[bn]),
a_dim=self._a_dim[bn],
is_continuous=self._is_continuous[bn]
)
return ret
@property
def StateSpec(self) -> SensorSpec:
return SensorSpec()
@property
def agent_ids(self) -> List[str]:
return self.behavior_names
def get_obs(self, behavior_names=None, only_obs=False):
"""
解析环境反馈的信息,将反馈信息分为四部分:向量、图像、奖励、done信号
"""
behavior_names = behavior_names or self.behavior_names
whole_done = np.full(self._n_copies, False)
whole_info_max_step = np.full(self._n_copies, False)
all_obs_fa, all_obs_fs = {}, {}
all_reward = {}
for bn in behavior_names:
ps = []
# TODO: optimize
while True:
ds, ts = self.env.get_steps(bn)
if len(ts):
ps.append(ts)
if len(ds) == self._n_copies:
break
elif len(ds) == 0:
self.env.step() # some of environments done, but some of not
else:
raise ValueError(
f'agents number error. Expected 0 or {self._n_copies}, received {len(ds)}')
obs_fs, reward = ds.obs, ds.reward
obs_fa = deepcopy(obs_fs)
done = np.full(self._n_copies, False)
begin_mask = np.full(self._n_copies, False)
info_max_step = np.full(self._n_copies, False)
info_real_done = np.full(self._n_copies, False)
for ts in ps: # TODO: 有待优化
_ids = ts.agent_id
reward[_ids] = ts.reward
info_max_step[_ids] = ts.interrupted # 因为达到episode最大步数而终止的
# 去掉因为max_step而done的,只记录因为失败/成功而done的
info_real_done[_ids[~ts.interrupted]] = True
done[_ids] = True
begin_mask[_ids] = True
# zip: vector, visual, ...
for _obs, _tobs in zip(obs_fa, ts.obs):
_obs[_ids] = _tobs
if self._real_done:
done = np.array(info_real_done)
_obs_fa = Data()
_obs_fs = Data()
if len(self._vector_idxs[bn]) > 0:
_obs_fa.update(vector={f'vector_{i}': obs_fa[vi] for i, vi in enumerate(self._vector_idxs[bn])})
_obs_fs.update(vector={f'vector_{i}': obs_fs[vi] for i, vi in enumerate(self._vector_idxs[bn])})
if len(self._visual_idxs[bn]) > 0:
_obs_fa.update(visual={f'visual_{i}': obs_fa[vi] for i, vi in enumerate(self._visual_idxs[bn])})
_obs_fs.update(visual={f'visual_{i}': obs_fs[vi] for i, vi in enumerate(self._visual_idxs[bn])})
all_obs_fa[bn] = _obs_fa
all_obs_fs[bn] = _obs_fs
all_reward[bn] = reward
whole_done = np.logical_or(whole_done, done)
whole_info_max_step = np.logical_or(whole_info_max_step, info_max_step)
if only_obs:
all_obs_fa.update(
{'global': Data(begin_mask=np.full((self._n_copies, 1), True))})
return all_obs_fa
else:
rets = {}
for bn in self.behavior_names:
rets[bn] = Data(obs_fa=all_obs_fa[bn],
obs_fs=all_obs_fs[bn],
reward=all_reward[bn],
done=whole_done,
info=dict(max_step=whole_info_max_step))
rets.update(
{'global': Data(begin_mask=begin_mask[:, np.newaxis])}) # [B, 1]
return rets
def __getattr__(self, name):
"""
不允许获取BasicUnityEnvironment中以'_'开头的属性
"""
if name.startswith('_'):
raise AttributeError(
"attempted to get missing private attribute '{}'".format(name))
return getattr(self.env, name)
class ScaleVisualWrapper(ObservationWrapper):
def observation(self, observation: Dict[str, Data]):
def func(x): return np.asarray(x * 255).astype(np.uint8)
for k in observation.keys():
observation[k].obs.visual.convert_(func)
observation[k].obs_.visual.convert_(func)
return observation
|
co2meter/__init__.py
|
alpxp/co2meter
| 232 |
106953
|
<filename>co2meter/__init__.py
# Top level __init__ file
from .co2meter import *
from ._version import __version__
|
models/wideresnet_noise_conditional.py
|
addisand/NSCN
| 533 |
106956
|
# Code adapted from https://github.com/google-research/google-research/tree/master/flax_models/cifar
# Original copyright statement:
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wide Resnet Model.
Reference:
Wide Residual Networks, <NAME>, <NAME>
https://arxiv.org/abs/1605.07146
Initially forked from
github.com/google/flax/blob/master/examples/cifar10/models/wideresnet.py
This implementation mimics the one from
github.com/tensorflow/models/blob/master/research/autoaugment/wrn.py
that is widely used as a benchmark.
It uses identity + zero padding skip connections, with kaiming normal
initialization for convolutional kernels (mode = fan_out, gain=2.0).
The final dense layer uses a uniform distribution U[-scale, scale] where
scale = 1 / sqrt(num_classes) as per the autoaugment implementation.
Using the default initialization instead gives error rates approximately 0.5%
greater on cifar100, most likely because the parameters used in the literature
were finetuned for this particular initialization.
Finally, the autoaugment implementation adds more residual connections between
the groups (instead of just between the blocks as per the original paper and
most implementations). It is possible to safely remove those connections without
degrading the performance, which we do by default to match the original
wideresnet paper. Setting `use_additional_skip_connections` to True will add
them back and then reproduces exactly the model used in autoaugment.
"""
import numpy as np
import flax
from flax import linen as nn
import jax
import jax.numpy as jnp
from typing import Any, Tuple, Optional
_BATCHNORM_MOMENTUM = 0.9
_BATCHNORM_EPSILON = 1e-5
# Kaiming initialization with fan out mode. Should be used to initialize
# convolutional kernels.
conv_kernel_init_fn = jax.nn.initializers.variance_scaling(
2.0, 'fan_out', 'normal')
def dense_layer_init_fn(key,
shape,
dtype=jnp.float32):
"""Initializer for the final dense layer.
Args:
key: PRNG key to use to sample the weights.
shape: Shape of the tensor to initialize.
dtype: Data type of the tensor to initialize.
Returns:
The initialized tensor.
"""
num_units_out = shape[1]
unif_init_range = 1.0 / (num_units_out) ** (0.5)
return jax.random.uniform(key, shape, dtype, -1) * unif_init_range
def shake_shake_train(xa,
xb,
rng=None):
"""Shake-shake regularization in training mode.
Shake-shake regularization interpolates between inputs A and B
with *different* random uniform (per-sample) interpolation factors
for the forward and backward/gradient passes.
Args:
xa: Input, branch A.
xb: Input, branch B.
rng: PRNG key.
Returns:
Mix of input branches.
"""
if rng is None:
rng = flax.nn.make_rng()
gate_forward_key, gate_backward_key = jax.random.split(rng, num=2)
gate_shape = (len(xa), 1, 1, 1)
# Draw different interpolation factors (gate) for forward and backward pass.
gate_forward = jax.random.uniform(
gate_forward_key, gate_shape, dtype=jnp.float32, minval=0.0, maxval=1.0)
gate_backward = jax.random.uniform(
gate_backward_key, gate_shape, dtype=jnp.float32, minval=0.0, maxval=1.0)
# Compute interpolated x for forward and backward.
x_forward = xa * gate_forward + xb * (1.0 - gate_forward)
x_backward = xa * gate_backward + xb * (1.0 - gate_backward)
# Combine using stop_gradient.
return x_backward + jax.lax.stop_gradient(x_forward - x_backward)
def shake_shake_eval(xa, xb):
"""Shake-shake regularization in testing mode.
Args:
xa: Input, branch A.
xb: Input, branch B.
Returns:
Mix of input branches.
"""
# Blend between inputs A and B 50%-50%.
return (xa + xb) * 0.5
def shake_drop_train(x,
mask_prob,
alpha_min,
alpha_max,
beta_min,
beta_max,
rng=None):
"""ShakeDrop training pass.
See https://arxiv.org/abs/1802.02375
Args:
x: Input to apply ShakeDrop to.
mask_prob: Mask probability.
alpha_min: Alpha range lower.
alpha_max: Alpha range upper.
beta_min: Beta range lower.
beta_max: Beta range upper.
rng: PRNG key (if `None`, uses `flax.nn.make_rng`).
Returns:
The regularized tensor.
"""
if rng is None:
rng = flax.nn.make_rng()
bern_key, alpha_key, beta_key = jax.random.split(rng, num=3)
rnd_shape = (len(x), 1, 1, 1)
# Bernoulli variable b_l in Eqn 6, https://arxiv.org/abs/1802.02375.
mask = jax.random.bernoulli(bern_key, mask_prob, rnd_shape)
mask = mask.astype(jnp.float32)
alpha_values = jax.random.uniform(
alpha_key,
rnd_shape,
dtype=jnp.float32,
minval=alpha_min,
maxval=alpha_max)
beta_values = jax.random.uniform(
beta_key, rnd_shape, dtype=jnp.float32, minval=beta_min, maxval=beta_max)
# See Eqn 6 in https://arxiv.org/abs/1802.02375.
rand_forward = mask + alpha_values - mask * alpha_values
rand_backward = mask + beta_values - mask * beta_values
return x * rand_backward + jax.lax.stop_gradient(
x * rand_forward - x * rand_backward)
def shake_drop_eval(x,
mask_prob,
alpha_min,
alpha_max):
"""ShakeDrop eval pass.
See https://arxiv.org/abs/1802.02375
Args:
x: Input to apply ShakeDrop to.
mask_prob: Mask probability.
alpha_min: Alpha range lower.
alpha_max: Alpha range upper.
Returns:
The regularized tensor.
"""
expected_alpha = (alpha_max + alpha_min) / 2
# See Eqn 6 in https://arxiv.org/abs/1802.02375.
return (mask_prob + expected_alpha - mask_prob * expected_alpha) * x
def activation(x,
train,
apply_relu=True,
name=''):
x = nn.GroupNorm(name=name, epsilon=1e-5, num_groups=min(x.shape[-1] // 4, 32))(x)
if apply_relu:
x = jax.nn.relu(x)
return x
def _output_add(block_x, orig_x):
"""Add two tensors, padding them with zeros or pooling them if necessary.
Args:
block_x: Output of a resnet block.
orig_x: Residual branch to add to the output of the resnet block.
Returns:
The sum of blocks_x and orig_x. If necessary, orig_x will be average pooled
or zero padded so that its shape matches orig_x.
"""
stride = orig_x.shape[-2] // block_x.shape[-2]
strides = (stride, stride)
if block_x.shape[-1] != orig_x.shape[-1]:
orig_x = nn.avg_pool(orig_x, strides, strides)
channels_to_add = block_x.shape[-1] - orig_x.shape[-1]
orig_x = jnp.pad(orig_x, [(0, 0), (0, 0), (0, 0), (0, channels_to_add)])
return block_x + orig_x
class GaussianFourierProjection(nn.Module):
"""Gaussian Fourier embeddings for noise levels."""
embedding_size: int = 256
scale: float = 1.0
@nn.compact
def __call__(self, x):
W = self.param('W', jax.nn.initializers.normal(stddev=self.scale), (self.embedding_size,))
W = jax.lax.stop_gradient(W)
x_proj = x[:, None] * W[None, :] * 2 * jnp.pi
return jnp.concatenate([jnp.sin(x_proj), jnp.cos(x_proj)], axis=-1)
class WideResnetBlock(nn.Module):
"""Defines a single WideResnetBlock."""
channels: int
strides: Tuple[int] = (1, 1)
activate_before_residual: bool = False
@nn.compact
def __call__(self, x, temb=None, train=True):
if self.activate_before_residual:
x = activation(x, train, name='init_bn')
orig_x = x
else:
orig_x = x
block_x = x
if not self.activate_before_residual:
block_x = activation(block_x, train, name='init_bn')
block_x = nn.Conv(
self.channels, (3, 3),
self.strides,
padding='SAME',
use_bias=False,
kernel_init=conv_kernel_init_fn,
name='conv1')(block_x)
if temb is not None:
block_x += nn.Dense(self.channels)(nn.swish(temb))[:, None, None, :]
block_x = activation(block_x, train=train, name='bn_2')
block_x = nn.Conv(
self.channels, (3, 3),
padding='SAME',
use_bias=False,
kernel_init=conv_kernel_init_fn,
name='conv2')(block_x)
return _output_add(block_x, orig_x)
class WideResnetGroup(nn.Module):
"""Defines a WideResnetGroup."""
blocks_per_group: int
channels: int
strides: Tuple[int] = (1, 1)
activate_before_residual: bool = False
@nn.compact
def __call__(self, x, temb=None, train=True):
for i in range(self.blocks_per_group):
x = WideResnetBlock(self.channels, self.strides if i == 0 else (1, 1),
activate_before_residual=self.activate_before_residual and not i,
)(x, temb, train)
return x
class WideResnet(nn.Module):
"""Defines the WideResnet Model."""
blocks_per_group: int
channel_multiplier: int
num_outputs: int
@nn.compact
def __call__(self, x, sigmas, train=True):
# per image standardization
N = np.prod(x.shape[1:])
x = (x - jnp.mean(x, axis=(1, 2, 3), keepdims=True)) / jnp.maximum(jnp.std(x, axis=(1, 2, 3), keepdims=True),
1. / np.sqrt(N))
temb = GaussianFourierProjection(embedding_size=128, scale=16)(jnp.log(sigmas))
temb = nn.Dense(128 * 4)(temb)
temb = nn.Dense(128 * 4)(nn.swish(temb))
x = nn.Conv(16, (3, 3), padding='SAME', name='init_conv', kernel_init=conv_kernel_init_fn, use_bias=False)(x)
x = WideResnetGroup(self.blocks_per_group, 16 * self.channel_multiplier,
activate_before_residual=True)(x, temb, train)
x = WideResnetGroup(self.blocks_per_group, 32 * self.channel_multiplier, (2, 2))(x, temb, train)
x = WideResnetGroup(self.blocks_per_group, 64 * self.channel_multiplier, (2, 2))(x, temb, train)
x = activation(x, train=train, name='pre-pool-bn')
x = nn.avg_pool(x, x.shape[1:3])
x = x.reshape((x.shape[0], -1))
x = nn.Dense(self.num_outputs, kernel_init=dense_layer_init_fn)(x)
return x
|
light_cnns/Transformer/__init__.py
|
murufeng/awesome_lightweight_networks
| 318 |
106957
|
<reponame>murufeng/awesome_lightweight_networks
from .mobile_vit import *
from .levit import *
from .ConvNeXt import *
|
pclib/cl/InValRdyRandStallAdapter.py
|
belang/pymtl
| 206 |
106959
|
#=========================================================================
# InValRdyRandStallAdapter
#=========================================================================
# Randomly stalls an input interface.
from copy import deepcopy
from random import Random
from pymtl import *
#-------------------------------------------------------------------------
# InValRdyRandStallAdapter
#-------------------------------------------------------------------------
class InValRdyRandStallAdapter (object):
def __init__( s, in_, stall_prob=0, seed=0x9dd809a6 ):
s.in_ = in_
s.stall_prob = stall_prob
s.data = None
# We keep our own internal random number generator to keep the state
# of this generator completely separate from other generators. This
# ensure that any delays are reproducable.
s.rgen = Random()
s.rgen.seed(seed)
def empty( s ):
return s.data == None
def deq( s ):
assert not s.empty()
item = s.data
s.data = None
s.in_.rdy.next = ( s.rgen.random() > s.stall_prob )
return item
def first( s ):
return s.data
def xtick( s ):
if s.in_.rdy and s.in_.val:
s.data = deepcopy(s.in_.msg)
s.in_.rdy.next = ( s.data == None ) and ( s.rgen.random() > s.stall_prob )
|
data/operator/bbox/spatial/xywh2cxcywh.py
|
zhangzhengde0225/SwinTrack
| 143 |
106976
|
<reponame>zhangzhengde0225/SwinTrack
def bbox_xywh2cxcywh(bbox):
cx = bbox[0] + bbox[2] / 2
cy = bbox[1] + bbox[3] / 2
return (cx, cy, bbox[2], bbox[3])
|
source/playbooks/AFSBP/ssmdocs/scripts/afsbp_parse_input.py
|
sybeck2k/aws-security-hub-automated-response-and-remediation
| 129 |
106991
|
<filename>source/playbooks/AFSBP/ssmdocs/scripts/afsbp_parse_input.py
#!/usr/bin/python
###############################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License Version 2.0 (the "License"). You may not #
# use this file except in compliance with the License. A copy of the License #
# is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0/ #
# #
# or in the "license" file accompanying this file. This file is distributed #
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express #
# or implied. See the License for the specific language governing permis- #
# sions and limitations under the License. #
###############################################################################
import re
def get_control_id_from_arn(finding_id_arn):
check_finding_id = re.match(
'^arn:(?:aws|aws-cn|aws-us-gov):securityhub:(?:[a-z]{2}(?:-gov)?-[a-z]+-\\d):\\d{12}:subscription/aws-foundational-security-best-practices/v/1\\.0\\.0/(.*)/finding/(?i:[0-9a-f]{8}-(?:[0-9a-f]{4}-){3}[0-9a-f]{12})$',
finding_id_arn
)
if check_finding_id:
control_id = check_finding_id.group(1)
return control_id
else:
exit(f'ERROR: Finding Id is invalid: {finding_id_arn}')
def parse_event(event, context):
expected_control_id = event['expected_control_id']
parse_id_pattern = event['parse_id_pattern']
resource_id_matches = []
finding = event['Finding']
testmode = bool('testmode' in finding)
finding_id = finding['Id']
account_id = finding.get('AwsAccountId', '')
if not re.match('^\\d{12}$', account_id):
exit(f'ERROR: AwsAccountId is invalid: {account_id}')
control_id = get_control_id_from_arn(finding['Id'])
# ControlId present and valid
if not control_id:
exit(f'ERROR: Finding Id is invalid: {finding_id} - missing Control Id')
# ControlId is the expected value
if control_id not in expected_control_id:
exit(f'ERROR: Control Id from input ({control_id}) does not match {str(expected_control_id)}')
# ProductArn present and valid
product_arn = finding['ProductArn']
if not re.match('^arn:(?:aws|aws-cn|aws-us-gov):securityhub:(?:[a-z]{2}(?:-gov)?-[a-z]+-\\d)::product/aws/securityhub$', product_arn):
exit(f'ERROR: ProductArn is invalid: {product_arn}')
resource = finding['Resources'][0]
# Details
details = finding['Resources'][0].get('Details', {})
# Regex match Id to get remediation-specific identifier
identifier_raw = finding['Resources'][0]['Id']
resource_id = identifier_raw
if parse_id_pattern:
identifier_match = re.match(
parse_id_pattern,
identifier_raw
)
if identifier_match:
for group in range(1, len(identifier_match.groups())+1):
resource_id_matches.append(identifier_match.group(group))
resource_id = identifier_match.group(event.get('resource_index', 1))
else:
exit(f'ERROR: Invalid resource Id {identifier_raw}')
if not resource_id:
exit('ERROR: Resource Id is missing from the finding json Resources (Id)')
affected_object = {'Type': resource['Type'], 'Id': resource_id, 'OutputKey': 'Remediation.Output'}
return {
"account_id": account_id,
"resource_id": resource_id,
"finding_id": finding_id,
"control_id": control_id,
"product_arn": product_arn,
"object": affected_object,
"matches": resource_id_matches,
"details": details,
"testmode": testmode,
"resource": resource
}
|
corehq/apps/userreports/ui/widgets.py
|
dimagilg/commcare-hq
| 471 |
107027
|
<filename>corehq/apps/userreports/ui/widgets.py
import json
from django import forms
class JsonWidget(forms.Textarea):
def render(self, name, value, attrs=None, renderer=None):
if isinstance(value, str):
# It's probably invalid JSON
return super(JsonWidget, self).render(name, value, attrs, renderer)
return super(JsonWidget, self).render(name, json.dumps(value, indent=2), attrs, renderer)
|
src/odb/test/python/16-db-read-write-octilinear-def_test.py
|
erictaur/OpenROAD
| 525 |
107028
|
import opendbpy as odb
import os
current_dir = os.path.dirname(os.path.realpath(__file__))
tests_dir = os.path.abspath(os.path.join(current_dir, os.pardir))
opendb_dir = os.path.abspath(os.path.join(tests_dir, os.pardir))
data_dir = os.path.join(tests_dir, "data")
db = odb.dbDatabase.create()
odb.read_lef(db, os.path.join(data_dir, "Nangate45","NangateOpenCellLibrary.mod.lef"))
odb.read_lef(db, os.path.join(data_dir, "ICEWall","dummy_pads.lef"))
odb.read_def(db, os.path.join(data_dir, "ICEWall","octilinear.def"))
chip = db.getChip()
if chip == None:
exit("Read DEF Failed")
result = odb.write_def(chip.getBlock(), os.path.join(opendb_dir, "build","generated_octilinear.def"))
assert result==1, "DEF not written"
db_file = os.path.join(opendb_dir, "build","export_oct.db")
export_result = odb.write_db(db, db_file)
if export_result!=1:
exit("Export DB Failed")
new_db = odb.dbDatabase.create()
new_db = odb.read_db(new_db, db_file)
if odb.db_diff(db, new_db):
exit("Error: Difference found between exported and imported DB")
|
envi/tests/msp430/icmp.py
|
rnui2k/vivisect
| 716 |
107058
|
<reponame>rnui2k/vivisect
from envi.archs.msp430.regs import *
checks = [
# CMP
(
'CMP r14, r15 (src == dst)',
{ 'regs': [(REG_R14, 0xaaaa), (REG_R15, 0xaaaa)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "0f9e", 'data': "" },
{ 'regs': [(REG_R14, 0xaaaa), (REG_R15, 0xaaaa)], 'flags': [(SR_N, 0), (SR_Z, 1), (SR_C, 1), (SR_V, 0)], 'code': "0f9e", 'data': "" }
),
(
'CMP r14, r15 (src < dst)',
{ 'regs': [(REG_R14, 0x3333), (REG_R15, 0x4444)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "0f9e", 'data': "" },
{ 'regs': [(REG_R14, 0x3333), (REG_R15, 0x4444)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 1), (SR_V, 0)], 'code': "0f9e", 'data': "" },
),
(
'CMP r14, r15 (src < dst) result overflow',
{ 'regs': [(REG_R14, 0x1), (REG_R15, 0x8000)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "0f9e", 'data': "" },
{ 'regs': [(REG_R14, 0x1), (REG_R15, 0x8000)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 1), (SR_V, 1)], 'code': "0f9e", 'data': "" }
),
(
'CMP r14, r15 (src > dst) result carry',
{ 'regs': [(REG_R14, 0xffff), (REG_R15, 0x3333)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "0f9e", 'data': "" },
{ 'regs': [(REG_R14, 0xffff), (REG_R15, 0x3333)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "0f9e", 'data': "" }
),
(
'CMP r14, r15 (src > dst) result negative + carry + overflow',
{ 'regs': [(REG_R14, 0xffff), (REG_R15, 0x7fff)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "0f9e", 'data': "" },
{ 'regs': [(REG_R14, 0xffff), (REG_R15, 0x7fff)], 'flags': [(SR_N, 1), (SR_Z, 0), (SR_C, 0), (SR_V, 1)], 'code': "0f9e", 'data': "" }
),
# CMP.b
(
'CMP.b r14, r15 (src == dst)',
{ 'regs': [(REG_R14, 0x11aa), (REG_R15, 0x12aa)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "4f9e", 'data': "" },
{ 'regs': [(REG_R14, 0x11aa), (REG_R15, 0x12aa)], 'flags': [(SR_N, 0), (SR_Z, 1), (SR_C, 1), (SR_V, 0)], 'code': "4f9e", 'data': "" }
),
(
'CMP.b r14, r15 (src < dst)',
{ 'regs': [(REG_R14, 0x1133), (REG_R15, 0x0044)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "4f9e", 'data': "" },
{ 'regs': [(REG_R14, 0x1133), (REG_R15, 0x0044)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 1), (SR_V, 0)], 'code': "4f9e", 'data': "" },
),
(
'CMP.b r14, r15 (src < dst) result overflow',
{ 'regs': [(REG_R14, 0x1101), (REG_R15, 0x0080)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "4f9e", 'data': "" },
{ 'regs': [(REG_R14, 0x1101), (REG_R15, 0x0080)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 1), (SR_V, 1)], 'code': "4f9e", 'data': "" }
),
(
'CMP.b r14, r15 (src > dst) result carry',
{ 'regs': [(REG_R14, 0x00ff), (REG_R15, 0x1133)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "4f9e", 'data': "" },
{ 'regs': [(REG_R14, 0x00ff), (REG_R15, 0x1133)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "4f9e", 'data': "" }
),
(
'CMP.b r14, r15 (src > dst) result negative + carry + overflow',
{ 'regs': [(REG_R14, 0x00ff), (REG_R15, 0x117f)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "4f9e", 'data': "" },
{ 'regs': [(REG_R14, 0x00ff), (REG_R15, 0x117f)], 'flags': [(SR_N, 1), (SR_Z, 0), (SR_C, 0), (SR_V, 1)], 'code': "4f9e", 'data': "" }
),
]
|
Supervised Learning with scikit-learn/Chapter 1 - Classification.py
|
nabeelsana/DataCamp-courses
| 464 |
107060
|
<gh_stars>100-1000
#==============================================================================================================================#
#Chapter 1 Classification
#==============================================================================================================================#
#k-Nearest Neighbors: Fit
# Import KNeighborsClassifier from sklearn.neighbors
from sklearn.neighbors import KNeighborsClassifier
# Create arrays for the features and the response variable
y = df['party'].values
X = df.drop('party', axis=1).values
# Create a k-NN classifier with 6 neighbors
knn = KNeighborsClassifier(n_neighbors=6)
# Fit the classifier to the data
knn.fit(X, y)
#==============================================================================================================================#
#k-Nearest Neighbors: Predict
# Import KNeighborsClassifier from sklearn.neighbors
from sklearn.neighbors import KNeighborsClassifier
# Create arrays for the features and the response variable
y = df['party'].values
X = df.drop('party', axis=1).values
# Create a k-NN classifier with 6 neighbors: knn
knn = KNeighborsClassifier(n_neighbors=6)
# Fit the classifier to the data
knn.fit(X, y)
# Predict the labels for the training data X: y_pred
y_pred = knn.predict(X)
# Predict and print the label for the new data point X_new
new_prediction = knn.predict(X_new)
print("Prediction: {}".format(new_prediction))
#==============================================================================================================================#
#The digits recognition dataset
# Import necessary modules
from sklearn import datasets
import matplotlib.pyplot as plt
# Load the digits dataset: digits
digits = datasets.load_digits()
# Print the keys and DESCR of the dataset
print(digits.keys())
print(digits.DESCR)
# Print the shape of the images and data keys
print(digits.images.shape)
print(digits.data.shape)
# Display digit 1010
plt.imshow(digits.images[1010], cmap=plt.cm.gray_r, interpolation='nearest')
plt.show()
#==============================================================================================================================#
#Train/Test Split + Fit/Predict/Accuracy
# Import necessary modules
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
# Create feature and target arrays
X = digits.data
y = digits.target
# Split into training and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=42, stratify=y)
# Create a k-NN classifier with 7 neighbors: knn
knn = KNeighborsClassifier(n_neighbors=7)
# Fit the classifier to the training data
knn.fit(X_train, y_train)
# Print the accuracy
print(knn.score(X_test, y_test))
#==============================================================================================================================#
#Overfitting and underfitting
# Setup arrays to store train and test accuracies
neighbors = np.arange(1, 9)
train_accuracy = np.empty(len(neighbors))
test_accuracy = np.empty(len(neighbors))
# Loop over different values of k
for i, k in enumerate(neighbors):
# Setup a k-NN Classifier with k neighbors: knn
knn = KNeighborsClassifier(n_neighbors=k)
# Fit the classifier to the training data
knn.fit(X_train, y_train)
#Compute accuracy on the training set
train_accuracy[i] = knn.score(X_train, y_train)
#Compute accuracy on the testing set
test_accuracy[i] = knn.score(X_test, y_test)
# Generate plot
plt.title('k-NN: Varying Number of Neighbors')
plt.plot(neighbors, test_accuracy, label = 'Testing Accuracy')
plt.plot(neighbors, train_accuracy, label = 'Training Accuracy')
plt.legend()
plt.xlabel('Number of Neighbors')
plt.ylabel('Accuracy')
plt.show()
#==============================================================================================================================#
#Importing data for supervised learning
# Import numpy and pandas
import numpy as np
import pandas as pd
# Read the CSV file into a DataFrame: df
df = pd.read_csv('gapminder.csv')
# Create arrays for features and target variable
y = df['life'].values
X = df['fertility'].values
# Print the dimensions of X and y before reshaping
print("Dimensions of y before reshaping: {}".format(y.shape))
print("Dimensions of X before reshaping: {}".format(X.shape))
# Reshape X and y
y = y.reshape(-1, 1)
X = X.reshape(-1, 1)
# Print the dimensions of X and y after reshaping
print("Dimensions of y after reshaping: {}".format(y.shape))
print("Dimensions of X after reshaping: {}".format(X.shape))
#==============================================================================================================================#
#==============================================================================================================================#
#==============================================================================================================================#
#==============================================================================================================================#
#==============================================================================================================================#
#==============================================================================================================================#
#==============================================================================================================================#
|
test/visuals/_test_inline.py
|
colinmford/coldtype
| 142 |
107082
|
<reponame>colinmford/coldtype
from test._test_inline2 import * #INLINE
from coldtype import *
@renderable()
def stub(r):
return test_function(r).f(0.3)
return (DATPen()
.oval(r.inset(50))
.f(0.8))
|
examples/cifar10_tensorflow/cifar10.py
|
jurgisp/xmanager
| 392 |
107154
|
# Copyright 2021 The Tensorflow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code based on https://www.tensorflow.org/tutorials/images/cnn."""
import os
from absl import app
from absl import flags
import tensorflow as tf
from tensorflow.keras import datasets
from tensorflow.keras import layers
from tensorflow.keras import models
# When using Vertex Tensorboard, the tensorboard will be present as a
# environment variable.
LOG_DIR = os.environ.get('AIP_TENSORBOARD_LOG_DIR', '')
FLAGS = flags.FLAGS
flags.DEFINE_integer('epochs', 5, 'epochs')
flags.DEFINE_float('learning_rate', 0.001, 'learning rate')
def main(_):
(train_images, train_labels), (test_images, test_labels) = (
datasets.cifar10.load_data())
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = models.Sequential()
model.add(
layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10))
optimizer = tf.keras.optimizers.Adam(learning_rate=FLAGS.learning_rate)
model.compile(
optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
callbacks = []
if LOG_DIR:
callbacks = [
tf.keras.callbacks.TensorBoard(
log_dir=LOG_DIR,
histogram_freq=1,
),
]
model.fit(
train_images,
train_labels,
epochs=FLAGS.epochs,
validation_data=(test_images, test_labels),
callbacks=callbacks,
verbose=2)
if __name__ == '__main__':
app.run(main)
|
rdkit/sping/PS/__init__.py
|
kazuyaujihara/rdkit
| 1,609 |
107158
|
# package
from .pidPS import *
|
flexxamples/howtos/python_side_widget2.py
|
levinbgu/flexx
| 1,662 |
107177
|
<filename>flexxamples/howtos/python_side_widget2.py
from flexx import flx
class UserInput(flx.PyWidget):
def init(self):
with flx.VBox():
self.edit = flx.LineEdit(placeholder_text='Your name')
flx.Widget(flex=1)
@flx.reaction('edit.user_done')
def update_user(self, *events):
new_text = self.root.store.username + "\n" + self.edit.text
self.root.store.set_username(new_text)
self.edit.set_text("")
class SomeInfoWidget(flx.PyWidget):
def init(self):
with flx.FormLayout():
self.label = flx.Label(title='name:')
flx.Widget(flex=1)
@flx.reaction
def update_label(self):
self.label.set_text(self.root.store.username)
class Store(flx.PyComponent):
username = flx.StringProp(settable=True)
class Example(flx.PyWidget):
store = flx.ComponentProp()
def init(self):
# Create our store instance
self._mutate_store(Store())
# Imagine this being a large application with many sub-widgets,
# and the UserInput and SomeInfoWidget being used somewhere inside it.
with flx.HSplit():
UserInput()
flx.Widget(style='background:#eee;')
SomeInfoWidget()
if __name__ == '__main__':
m = flx.launch(Example, 'default-browser', backend='flask')
flx.run()
|
music21/tree/verticality.py
|
cuthbertLab/music21
| 1,449 |
107183
|
<gh_stars>1000+
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Name: tree/verticality.py
# Purpose: Object for dealing with vertical simultaneities in a
# fast way w/o Chord's overhead
#
# Authors: <NAME>
# <NAME>
#
# Copyright: Copyright © 2013-16 <NAME> and the music21
# Project
# License: BSD, see license.txt
# ----------------------------------------------------------------------------
'''
Object for dealing with vertical simultaneities in a fast way w/o Chord's overhead.
'''
import collections.abc
import copy
import itertools
import unittest
from music21 import chord
from music21 import common
from music21 import environment
from music21 import exceptions21
from music21 import note
from music21 import prebase
from music21 import tie
# from music21 import key
# from music21 import pitch
from music21.tree import spans
environLocal = environment.Environment('tree.verticality')
class VerticalityException(exceptions21.TreeException):
pass
class Verticality(prebase.ProtoM21Object):
r'''
A collection of information about elements that are sounding at a given
offset or just finished at that offset or are continuing from before, etc..
Create a timespan-stream from a score:
>>> score = corpus.parse('bwv66.6')
>>> scoreTree = tree.fromStream.asTimespans(score, flatten=True,
... classList=(note.Note, chord.Chord))
Find the verticality at offset 6.5, or beat 2.5 of measure 2 (there's a one
beat pickup)
>>> verticality = scoreTree.getVerticalityAt(6.5)
>>> verticality
<music21.tree.verticality.Verticality 6.5 {E3 D4 G#4 B4}>
The representation of a verticality gives the pitches from lowest to
highest (in sounding notes).
A verticality knows its offset, but because elements might end at
different times, it doesn't know its endTime
>>> verticality.offset
6.5
>>> verticality.endTime
Traceback (most recent call last):
AttributeError: 'Verticality' object has no attribute 'endTime'
However, we can find when the next verticality starts by looking at the nextVerticality
>>> nv = verticality.nextVerticality
>>> nv
<music21.tree.verticality.Verticality 7.0 {A2 C#4 E4 A4}>
>>> nv.offset
7.0
Or more simply:
>>> verticality.nextStartOffset
7.0
(There is also a previousVerticality, but not a previousStartOffset)
What we just demonstrated is actually very powerful: a Verticality keeps a
record of exactly where it is in the timespanTree -- scores can be
recreated with this information.
Getting back to the task at hand, we can find all the PitchedTimespans (and
from there the elements) that start at exactly 6.5. There's one, it's a
passing tone D in the tenor and it lasts from offset 6.5 to offset 7.0,
with respect to the beginning of the score, not to the beginning of the
measure. That is to say, it's an eighth note
>>> verticality.startTimespans
(<PitchedTimespan (6.5 to 7.0) <music21.note.Note D>>,)
And we can get all the PitchedTimespans that were already sounding at the
moment (that is to say, the non-passing tones):
>>> verticality.overlapTimespans
(<PitchedTimespan (6.0 to 7.0) <music21.note.Note B>>,
<PitchedTimespan (6.0 to 7.0) <music21.note.Note G#>>,
<PitchedTimespan (6.0 to 7.0) <music21.note.Note E>>)
And we can get all the things that stop right at this moment. It's the E
in the tenor preceding the passing tone D:
>>> verticality.stopTimespans
(<PitchedTimespan (6.0 to 6.5) <music21.note.Note E>>,)
'''
# CLASS VARIABLES #
__slots__ = (
'timespanTree',
'overlapTimespans',
'startTimespans',
'offset',
'stopTimespans',
)
_DOC_ATTR = {
'timespanTree': r'''
Returns the timespanTree initially set.
''',
'overlapTimespans': r'''
Gets timespans overlapping the start offset of a verticality.
>>> score = corpus.parse('bwv66.6')
>>> scoreTree = tree.fromStream.asTimespans(score, flatten=True,
... classList=(note.Note, chord.Chord))
>>> verticality = scoreTree.getVerticalityAt(0.5)
>>> verticality
<music21.tree.verticality.Verticality 0.5 {G#3 B3 E4 B4}>
>>> verticality.overlapTimespans
(<PitchedTimespan (0.0 to 1.0) <music21.note.Note E>>,)
''',
'startTimespans': r'''
Gets the timespans starting at a verticality's start offset.
>>> score = corpus.parse('bwv66.6')
>>> scoreTree = tree.fromStream.asTimespans(score, flatten=True,
... classList=(note.Note, chord.Chord))
>>> verticality = scoreTree.getVerticalityAt(1.0)
>>> verticality
<music21.tree.verticality.Verticality 1.0 {F#3 C#4 F#4 A4}>
>>> for timespan in verticality.startTimespans:
... timespan
...
<PitchedTimespan (1.0 to 2.0) <music21.note.Note A>>
<PitchedTimespan (1.0 to 2.0) <music21.note.Note F#>>
<PitchedTimespan (1.0 to 2.0) <music21.note.Note C#>>
<PitchedTimespan (1.0 to 2.0) <music21.note.Note F#>>
''',
'offset': r'''
Gets the start offset of a verticality.
>>> score = corpus.parse('bwv66.6')
>>> scoreTree = tree.fromStream.asTimespans(score, flatten=True,
... classList=(note.Note, chord.Chord))
>>> verticality = scoreTree.getVerticalityAt(1.0)
>>> verticality
<music21.tree.verticality.Verticality 1.0 {F#3 C#4 F#4 A4}>
>>> verticality.offset
1.0
''',
'stopTimespans': r'''
Gets the timespans stopping at a verticality's start offset.
>>> score = corpus.parse('bwv66.6')
>>> scoreTree = tree.fromStream.asTimespans(score, flatten=True,
... classList=(note.Note, chord.Chord))
>>> verticality = scoreTree.getVerticalityAt(1.0)
>>> verticality
<music21.tree.verticality.Verticality 1.0 {F#3 C#4 F#4 A4}>
Note that none of the elements in the stopTimespans are listed in
the repr for the Verticality
>>> for timespan in verticality.stopTimespans:
... timespan
...
<PitchedTimespan (0.0 to 1.0) <music21.note.Note E>>
<PitchedTimespan (0.5 to 1.0) <music21.note.Note B>>
<PitchedTimespan (0.5 to 1.0) <music21.note.Note B>>
<PitchedTimespan (0.5 to 1.0) <music21.note.Note G#>>
''',
}
# INITIALIZER #
def __init__(self,
offset=None,
overlapTimespans=None,
startTimespans=None,
stopTimespans=None,
timespanTree=None,
):
from music21.tree import trees
if timespanTree is not None and not isinstance(timespanTree, trees.OffsetTree):
raise VerticalityException(
f'timespanTree {timespanTree!r} is not a OffsetTree or None')
self.timespanTree = timespanTree
self.offset = offset
if not isinstance(startTimespans, tuple):
raise VerticalityException(f'startTimespans must be a tuple, not {startTimespans!r}')
if not isinstance(stopTimespans, (tuple, type(None))):
raise VerticalityException(
f'stopTimespans must be a tuple or None, not {stopTimespans!r}')
if not isinstance(overlapTimespans, (tuple, type(None))):
raise VerticalityException(
f'overlapTimespans must be a tuple or None, not {overlapTimespans!r}')
self.startTimespans = startTimespans
self.stopTimespans = stopTimespans
self.overlapTimespans = overlapTimespans
# SPECIAL METHODS #
def _reprInternal(self):
sortedPitches = sorted(self.pitchSet)
enclosedNames = '{' + ' '.join(x.nameWithOctave for x in sortedPitches) + '}'
return f'{self.offset} {enclosedNames}'
# PUBLIC PROPERTIES #
@property
def bassTimespan(self):
r'''
Gets the bass timespan in this verticality.
>>> score = corpus.parse('bwv66.6')
>>> scoreTree = tree.fromStream.asTimespans(score, flatten=True,
... classList=(note.Note, chord.Chord))
>>> verticality = scoreTree.getVerticalityAt(1.0)
>>> verticality
<music21.tree.verticality.Verticality 1.0 {F#3 C#4 F#4 A4}>
>>> verticality.bassTimespan
<PitchedTimespan (1.0 to 2.0) <music21.note.Note F#>>
'''
overallLowestPitch = None
lowestTimespan = None
for ts in self.startAndOverlapTimespans:
if not hasattr(ts, 'pitches'):
continue
tsPitches = ts.pitches
if not tsPitches:
continue
lowestPitch = sorted(tsPitches)[0]
if overallLowestPitch is None:
overallLowestPitch = lowestPitch
lowestTimespan = ts
if lowestPitch <= overallLowestPitch:
overallLowestPitch = lowestPitch
lowestTimespan = ts
return lowestTimespan
@property
def beatStrength(self):
r'''
Gets the beat strength of a verticality.
>>> score = corpus.parse('bwv66.6')
>>> scoreTree = tree.fromStream.asTimespans(score, flatten=True,
... classList=(note.Note, chord.Chord))
>>> verticality = scoreTree.getVerticalityAt(1.0)
>>> verticality.beatStrength
1.0
Note that it will return None if there are no startTimespans at this point:
>>> verticality = scoreTree.getVerticalityAt(1.25)
>>> verticality
<music21.tree.verticality.Verticality 1.25 {F#3 C#4 F#4 A4}>
>>> verticality.startTimespans
()
>>> verticality.beatStrength is None
True
'''
try:
thisTimespan = self.startTimespans[0]
except IndexError:
return None
return thisTimespan.element.beatStrength
def toChord(self):
'''
creates a chord.Chord object of default length (1.0 or
the duration of some note object) from the verticality.
Does nothing about ties, etc. -- a very dumb chord, but useful
for querying consonance, etc. See makeElement() for the smart version.
It may be a zero- or one-pitch chord.
>>> score = corpus.parse('bwv66.6')
>>> scoreTree = score.asTimespans()
>>> verticality = scoreTree.getVerticalityAt(4.0)
>>> verticality.toChord()
<music21.chord.Chord G#3 B3 E4 E5>
'''
c = chord.Chord(sorted(self.pitchSet))
return c
@property
def measureNumber(self):
r'''
Gets the measure number of the verticality's starting elements.
>>> score = corpus.parse('bwv66.6')
>>> scoreTree = tree.fromStream.asTimespans(score, flatten=True,
... classList=(note.Note, chord.Chord))
>>> verticality = scoreTree.getVerticalityAt(7.0)
>>> verticality.measureNumber
2
'''
return self.startTimespans[0].measureNumber
@property
def nextStartOffset(self):
r'''
Gets the next start-offset in the verticality's offset-tree.
>>> score = corpus.parse('bwv66.6')
>>> scoreTree = tree.fromStream.asTimespans(score, flatten=True,
... classList=(note.Note, chord.Chord))
>>> verticality = scoreTree.getVerticalityAt(1.0)
>>> verticality.nextStartOffset
2.0
If a verticality has no tree attached, then it will return None
'''
tree = self.timespanTree
if tree is None:
return None
offset = tree.getPositionAfter(self.offset)
return offset
@property
def nextVerticality(self):
r'''
Gets the next verticality after a verticality.
>>> score = corpus.parse('bwv66.6')
>>> scoreTree = tree.fromStream.asTimespans(score, flatten=True,
... classList=(note.Note, chord.Chord))
>>> verticality = scoreTree.getVerticalityAt(1.0)
>>> print(verticality)
<music21.tree.verticality.Verticality 1.0 {F#3 C#4 F#4 A4}>
>>> nextVerticality = verticality.nextVerticality
>>> print(nextVerticality)
<music21.tree.verticality.Verticality 2.0 {G#3 B3 E4 B4}>
Verticality objects created by an offset-tree hold a reference back to
that offset-tree. This means that they determine their next or previous
verticality dynamically based on the state of the offset-tree only when
asked. Because of this, it is safe to mutate the offset-tree by
inserting or removing timespans while iterating over it.
>>> scoreTree.removeTimespanList(nextVerticality.startTimespans)
>>> verticality.nextVerticality
<music21.tree.verticality.Verticality 3.0 {A3 E4 C#5}>
'''
tree = self.timespanTree
if tree is None:
return None
offset = tree.getPositionAfter(self.offset)
if offset is None:
return None
return tree.getVerticalityAt(offset)
@property
def pitchSet(self):
r'''
Gets the pitch set of all elements in a verticality.
>>> score = corpus.parse('bwv66.6')
>>> scoreTree = tree.fromStream.asTimespans(score, flatten=True,
... classList=(note.Note, chord.Chord))
>>> verticality = scoreTree.getVerticalityAt(1.0)
>>> for pitch in sorted(verticality.pitchSet):
... pitch
...
<music21.pitch.Pitch F#3>
<music21.pitch.Pitch C#4>
<music21.pitch.Pitch F#4>
<music21.pitch.Pitch A4>
'''
pitchNameSet = set()
pitchSet = set()
for timespan in self.startAndOverlapTimespans:
if not hasattr(timespan, 'pitches'):
continue
for p in timespan.pitches:
pName = p.nameWithOctave
if pName in pitchNameSet:
continue
pitchNameSet.add(pName)
pitchSet.add(p)
return pitchSet
@property
def pitchClassSet(self):
r'''
Gets a set of all pitches in a verticality with distinct pitchClasses
>>> n1 = note.Note('C4')
>>> n2 = note.Note('B#5')
>>> s = stream.Stream()
>>> s.insert(4.0, n1)
>>> s.insert(4.0, n2)
>>> scoreTree = s.asTimespans()
>>> verticality = scoreTree.getVerticalityAt(4.0)
>>> pitchSet = verticality.pitchSet
>>> list(sorted(pitchSet))
[<music21.pitch.Pitch C4>, <music21.pitch.Pitch B#5>]
PitchClassSet will return only one pitch. Which of these
is returned is arbitrary.
>>> pitchClassSet = verticality.pitchClassSet
>>> #_DOCS_SHOW list(sorted(pitchClassSet))
>>> print('[<music21.pitch.Pitch B#5>]') #_DOCS_HIDE
[<music21.pitch.Pitch B#5>]
'''
outPitchSet = set()
pitchClassSet = set()
for currentPitch in self.pitchSet:
pitchClass = currentPitch.pitchClass
if pitchClass in pitchClassSet:
continue
pitchClassSet.add(pitchClass)
outPitchSet.add(currentPitch)
return outPitchSet
@property
def previousVerticality(self):
r'''
Gets the previous verticality before a verticality.
>>> score = corpus.parse('bwv66.6')
>>> scoreTree = tree.fromStream.asTimespans(score, flatten=True,
... classList=(note.Note, chord.Chord))
>>> verticality = scoreTree.getVerticalityAt(1.0)
>>> print(verticality)
<music21.tree.verticality.Verticality 1.0 {F#3 C#4 F#4 A4}>
>>> previousVerticality = verticality.previousVerticality
>>> print(previousVerticality)
<music21.tree.verticality.Verticality 0.5 {G#3 B3 E4 B4}>
Continue it:
>>> v = scoreTree.getVerticalityAt(1.0)
>>> while v is not None:
... print(v)
... v = v.previousVerticality
<music21.tree.verticality.Verticality 1.0 {F#3 C#4 F#4 A4}>
<music21.tree.verticality.Verticality 0.5 {G#3 B3 E4 B4}>
<music21.tree.verticality.Verticality 0.0 {A3 E4 C#5}>
Verticality objects created by an offset-tree hold a reference back to
that offset-tree. This means that they determine their next or previous
verticality dynamically based on the state of the offset-tree only when
asked. Because of this, it is safe to mutate the offset-tree by
inserting or removing timespans while iterating over it.
>>> scoreTree.removeTimespanList(previousVerticality.startTimespans)
>>> verticality.previousVerticality
<music21.tree.verticality.Verticality 0.0 {A3 E4 C#5}>
'''
tree = self.timespanTree
if tree is None:
return None
offset = tree.getPositionBefore(self.offset)
if offset is None:
return None
return tree.getVerticalityAt(offset)
@property
def startAndOverlapTimespans(self):
'''
Return a tuple adding the start and overlap timespans into one.
>>> n1 = note.Note('C4')
>>> n2 = note.Note('D4')
>>> s = stream.Stream()
>>> s.insert(4.0, n1)
>>> s.insert(4.5, n2)
>>> scoreTree = s.asTimespans()
>>> verticality = scoreTree.getVerticalityAt(4.5)
>>> verticality.startTimespans
(<PitchedTimespan (4.5 to 5.5) <music21.note.Note D>>,)
>>> verticality.overlapTimespans
(<PitchedTimespan (4.0 to 5.0) <music21.note.Note C>>,)
>>> verticality.startAndOverlapTimespans
(<PitchedTimespan (4.5 to 5.5) <music21.note.Note D>>,
<PitchedTimespan (4.0 to 5.0) <music21.note.Note C>>)
>>> verticality = scoreTree.getVerticalityAt(4.0)
>>> verticality.startAndOverlapTimespans
(<PitchedTimespan (4.0 to 5.0) <music21.note.Note C>>,)
'''
if self.overlapTimespans is None:
return tuple(self.startTimespans)
return tuple(self.startTimespans[:] + self.overlapTimespans[:])
# makeElement
def makeElement(self,
quarterLength=1.0,
*,
addTies=True,
addPartIdAsGroup=False,
removeRedundantPitches=True,
gatherArticulations='single',
gatherExpressions='single',
copyPitches=True,
):
r'''
Makes a Chord or Rest from this verticality and quarterLength.
>>> score = tree.makeExampleScore()
>>> scoreTree = tree.fromStream.asTimespans(score, flatten=True,
... classList=(note.Note, chord.Chord))
>>> verticality = scoreTree.getVerticalityAt(4.0)
>>> verticality
<music21.tree.verticality.Verticality 4.0 {E#3 G3}>
>>> verticality.startTimespans
(<PitchedTimespan (4.0 to 5.0) <music21.note.Note G>>,
<PitchedTimespan (4.0 to 6.0) <music21.note.Note E#>>)
>>> el = verticality.makeElement(2.0)
>>> el
<music21.chord.Chord E#3 G3>
>>> el.duration.quarterLength
2.0
>>> el.duration.type
'half'
If there is nothing there, then a Rest is created
>>> verticality = scoreTree.getVerticalityAt(400.0)
>>> verticality
<music21.tree.verticality.Verticality 400.0 {}>
>>> el = verticality.makeElement(1/3)
>>> el
<music21.note.Rest 1/3ql>
>>> el.duration.fullName
'Eighth Triplet (1/3 QL)'
>>> n1 = note.Note('C4')
>>> n2 = note.Note('C4')
>>> s = stream.Score()
>>> s.insert(0, n1)
>>> s.insert(0.5, n2)
>>> scoreTree = s.asTimespans()
>>> verticality = scoreTree.getVerticalityAt(0.5)
>>> c = verticality.makeElement(0.5)
>>> c
<music21.chord.Chord C4>
>>> c = verticality.makeElement(0.5, removeRedundantPitches=False)
>>> c
<music21.chord.Chord C4 C4>
Generally the pitches of the new element are not connected to the original pitch:
>>> c[0].pitch.name = 'E'
>>> c[1].pitch.name = 'F'
>>> (n1.name, n2.name)
('C', 'C')
But if `copyPitches` is False then the original pitch will be used:
>>> n1.name = 'D'
>>> n2.name = 'E'
>>> c = verticality.makeElement(0.5, removeRedundantPitches=False, copyPitches=False)
>>> c
<music21.chord.Chord D4 E4>
>>> c[0].pitch.name = 'F'
>>> c[1].pitch.name = 'G'
>>> (n1.name, n2.name)
('F', 'G')
gatherArticulations and gatherExpressions can be True, False, or (default) 'single'.
* If False, no articulations (or expressions) are transferred to the chord.
* If True, all articulations are transferred to the chord.
* If 'single', then no more than one articulation of each class (chosen from the lowest
note) will be added. This way, the chord does not get 4 fermatas, etc.
>>> n1 = note.Note('C4')
>>> n2 = note.Note('D4')
>>> s = stream.Stream()
>>> s.insert(0, n1)
>>> s.insert(0.5, n2)
>>> class AllAttachArticulation(articulations.Articulation):
... def __init__(self):
... super().__init__()
... self.tieAttach = 'all'
>>> class OtherAllAttachArticulation(articulations.Articulation):
... def __init__(self):
... super().__init__()
... self.tieAttach = 'all'
>>> n1.articulations.append(articulations.Accent())
>>> n1.articulations.append(AllAttachArticulation())
>>> n1.expressions.append(expressions.Fermata())
>>> n2.articulations.append(articulations.Staccato())
>>> n2.articulations.append(AllAttachArticulation())
>>> n2.articulations.append(OtherAllAttachArticulation())
>>> n2.expressions.append(expressions.Fermata())
>>> scoreTree = s.asTimespans()
>>> verticality = scoreTree.getVerticalityAt(0.0)
>>> c = verticality.makeElement(1.0)
>>> c.expressions
[<music21.expressions.Fermata>]
>>> c.articulations
[<music21.articulations.Accent>, <...AllAttachArticulation>]
>>> verticality = scoreTree.getVerticalityAt(0.5)
Here there will be no expressions, because there is no note ending
at 0.75 and Fermatas attach to the last note:
>>> c = verticality.makeElement(0.25)
>>> c.expressions
[]
>>> c = verticality.makeElement(0.5)
>>> c.expressions
[<music21.expressions.Fermata>]
Only two articulations, since accent attaches to beginning and staccato attaches to last
and we are beginning after the start of the first note (with an accent)
and cutting right through the second note (with a staccato)
>>> c.articulations
[<...AllAttachArticulation>,
<...OtherAllAttachArticulation>]
>>> c = verticality.makeElement(0.5, gatherArticulations=True)
>>> c.articulations
[<...AllAttachArticulation>,
<...AllAttachArticulation>,
<...OtherAllAttachArticulation>]
>>> c = verticality.makeElement(0.5, gatherArticulations=False)
>>> c.articulations
[]
>>> verticality = scoreTree.getVerticalityAt(1.0)
>>> c = verticality.makeElement(0.5)
>>> c.expressions
[<music21.expressions.Fermata>]
>>> c.articulations
[<music21.articulations.Staccato>,
<...AllAttachArticulation>,
<...OtherAllAttachArticulation>]
Added in v6.3: copyPitches option
OMIT_FROM_DOCS
Test that copyPitches works with expressions:
>>> c = verticality.makeElement(0.5, copyPitches=False)
>>> c
<music21.chord.Chord D4>
>>> c.pitches[0].accidental = pitch.Accidental('sharp')
>>> n2
<music21.note.Note D#>
'''
if not self.pitchSet:
r = note.Rest()
r.duration.quarterLength = common.opFrac(quarterLength)
return r
# easy stuff done, time to get to the hard stuff...
c = chord.Chord()
c.duration.quarterLength = common.opFrac(quarterLength)
dur = c.duration
seenPitches = set()
notesToAdd = {}
startStopSet = {'start', 'stop'}
pitchBust = 0 # used if removeRedundantPitches is False.
def newNote(ts, n):
'''
Make a copy of the note and clear some settings
'''
nNew = copy.deepcopy(n)
nNew.duration = dur
if not copyPitches:
nNew.pitch = n.pitch
if nNew.stemDirection != 'noStem':
nNew.stemDirection = None
if not addTies:
return nNew
offsetDifference = common.opFrac(self.offset - ts.offset)
endTimeDifference = common.opFrac(ts.endTime - (self.offset + quarterLength))
if offsetDifference == 0 and endTimeDifference <= 0:
addTie = None
elif offsetDifference > 0:
if endTimeDifference > 0:
addTie = 'continue'
else:
addTie = 'stop'
elif endTimeDifference > 0:
addTie = 'start'
else:
raise VerticalityException('What possibility was missed?',
offsetDifference, endTimeDifference, ts, self)
if nNew.tie is not None and {nNew.tie.type, addTie} == startStopSet:
nNew.tie.type = 'continue'
elif nNew.tie is not None and nNew.tie.type == 'continue':
nNew.tie.placement = None
elif addTie is None and nNew.tie is not None:
nNew.tie.placement = None
elif addTie:
nNew.tie = tie.Tie(addTie)
return nNew
def conditionalAdd(ts, n):
'''
Add an element only if it is not already in the chord.
If it has more tie information than the previously
added note, then remove the previously added note and add it
'''
nonlocal pitchBust # love Py3!!!
p = n.pitch
pitchKey = p.nameWithOctave
pitchGroup = None
if addPartIdAsGroup:
partContext = n.getContextByClass('Part')
if partContext is not None:
pidStr = str(partContext.id)
pitchGroup = pidStr.replace(' ', '_') # spaces are not allowed as group names
n.pitch.groups.append(pitchGroup)
n.groups.append(pitchGroup)
if pitchKey not in seenPitches:
seenPitches.add(pitchKey)
notesToAdd[pitchKey] = newNote(ts, n)
return
elif not removeRedundantPitches:
notesToAdd[pitchKey + str(pitchBust)] = newNote(ts, n)
pitchBust += 1
return
elif addPartIdAsGroup:
notesToAdd[pitchKey].groups.append(pitchGroup)
notesToAdd[pitchKey].pitch.groups.append(pitchGroup)
if not addTies:
return
# else add derivation once multiple derivations are allowed.
oldNoteTie = notesToAdd[pitchKey].tie
if oldNoteTie is not None and oldNoteTie.type == 'continue':
return # previous note was as good or better
possibleNewNote = newNote(ts, n)
possibleNewNote.groups = notesToAdd[pitchKey].groups
if possibleNewNote.tie is None:
return # do nothing
elif oldNoteTie is None:
notesToAdd[pitchKey] = possibleNewNote # a better note to add
elif {oldNoteTie.type, possibleNewNote.tie.type} == startStopSet:
notesToAdd[pitchKey].tie.type = 'continue'
elif possibleNewNote.tie.type == 'continue':
notesToAdd[pitchKey] = possibleNewNote # a better note to add
elif possibleNewNote.tie.type == oldNoteTie.type:
return
else:
raise VerticalityException('Did I miss one? ', possibleNewNote.tie, oldNoteTie)
for ts in self.startAndOverlapTimespans:
if not isinstance(ts, spans.PitchedTimespan):
continue
el = ts.element
if isinstance(el, chord.Chord):
if len(el) == 0: # pylint: disable=len-as-condition
continue
if el.articulations or el.expressions:
firstSubEl = copy.deepcopy(el[0]) # this makes an additional deepcopy
firstSubEl.articulations += el.articulations
firstSubEl.expressions += el.expressions
if not copyPitches:
firstSubEl.pitch = el[0].pitch
else:
firstSubEl = el[0]
conditionalAdd(ts, firstSubEl)
if len(el) > 1:
for subEl in list(el)[1:]:
conditionalAdd(ts, subEl)
else:
conditionalAdd(ts, el)
seenArticulations = set()
seenExpressions = set()
# pylint: disable=unidiomatic-typecheck
for n in sorted(notesToAdd.values(), key=lambda x: x.pitch.ps):
c.add(n)
if gatherArticulations:
for art in n.articulations:
if art.tieAttach == 'first' and n.tie is not None and n.tie.type != 'start':
continue
if art.tieAttach == 'last' and n.tie is not None and n.tie.type != 'stop':
continue
if gatherArticulations == 'single' and type(art) in seenArticulations:
continue
c.articulations.append(art)
seenArticulations.add(type(art))
if gatherExpressions:
for exp in n.expressions:
if exp.tieAttach == 'first' and n.tie is not None and n.tie.type != 'start':
continue
if exp.tieAttach == 'last' and n.tie is not None and n.tie.type != 'stop':
continue
if gatherExpressions == 'single' and type(exp) in seenExpressions:
continue
c.expressions.append(exp)
seenExpressions.add(type(exp))
return c
# Analysis type things...
def getAllVoiceLeadingQuartets(self, includeRests=True, includeOblique=True,
includeNoMotion=False, returnObjects=True,
partPairNumbers=None):
'''
>>> c = corpus.parse('luca/gloria').measures(1, 8)
>>> tsCol = tree.fromStream.asTimespans(c, flatten=True,
... classList=(note.Note, chord.Chord))
>>> verticality22 = tsCol.getVerticalityAt(22.0)
>>> from pprint import pprint as pp
>>> for vlq in verticality22.getAllVoiceLeadingQuartets():
... pp(vlq)
<music21.voiceLeading.VoiceLeadingQuartet
v1n1=G4, v1n2=C4, v2n1=E4, v2n2=F4>
<music21.voiceLeading.VoiceLeadingQuartet
v1n1=G4, v1n2=C4, v2n1=A3, v2n2=A3>
<music21.voiceLeading.VoiceLeadingQuartet
v1n1=E4, v1n2=F4, v2n1=A3, v2n2=A3>
>>> for vlq in verticality22.getAllVoiceLeadingQuartets(includeRests=False):
... pp(vlq)
<music21.voiceLeading.VoiceLeadingQuartet
v1n1=E4, v1n2=F4, v2n1=A3, v2n2=A3>
>>> for vlq in verticality22.getAllVoiceLeadingQuartets(includeOblique=False):
... pp(vlq)
<music21.voiceLeading.VoiceLeadingQuartet
v1n1=G4, v1n2=C4, v2n1=E4, v2n2=F4>
>>> verticality22.getAllVoiceLeadingQuartets(includeOblique=False, includeRests=False)
[]
Raw output
>>> for vlqRaw in verticality22.getAllVoiceLeadingQuartets(returnObjects=False):
... pp(vlqRaw)
((<PitchedTimespan (19.5 to 21.0) <music21.note.Note G>>,
<PitchedTimespan (22.0 to 22.5) <music21.note.Note C>>),
(<PitchedTimespan (21.0 to 22.0) <music21.note.Note E>>,
<PitchedTimespan (22.0 to 23.0) <music21.note.Note F>>))
((<PitchedTimespan (19.5 to 21.0) <music21.note.Note G>>,
<PitchedTimespan (22.0 to 22.5) <music21.note.Note C>>),
(<PitchedTimespan (21.5 to 22.5) <music21.note.Note A>>,
<PitchedTimespan (21.5 to 22.5) <music21.note.Note A>>))
((<PitchedTimespan (21.0 to 22.0) <music21.note.Note E>>,
<PitchedTimespan (22.0 to 23.0) <music21.note.Note F>>),
(<PitchedTimespan (21.5 to 22.5) <music21.note.Note A>>,
<PitchedTimespan (21.5 to 22.5) <music21.note.Note A>>))
>>> for vlq in verticality22.getAllVoiceLeadingQuartets(partPairNumbers=[(0, 1)]):
... pp(vlq)
<music21.voiceLeading.VoiceLeadingQuartet
v1n1=G4, v1n2=C4, v2n1=E4, v2n2=F4>
>>> for vlq in verticality22.getAllVoiceLeadingQuartets(partPairNumbers=[(0, 2), (1, 2)]):
... pp(vlq)
<music21.voiceLeading.VoiceLeadingQuartet
v1n1=G4, v1n2=C4, v2n1=A3, v2n2=A3>
<music21.voiceLeading.VoiceLeadingQuartet
v1n1=E4, v1n2=F4, v2n1=A3, v2n2=A3>
'''
from music21.voiceLeading import VoiceLeadingQuartet
pairedMotionList = self.getPairedMotion(includeRests=includeRests,
includeOblique=includeOblique)
allQuartets = itertools.combinations(pairedMotionList, 2)
filteredList = []
verticalityStreamParts = self.timespanTree.source.parts
for thisQuartet in allQuartets:
if includeNoMotion is False:
if (thisQuartet[0][0].pitches == thisQuartet[0][1].pitches
and thisQuartet[1][0].pitches == thisQuartet[1][1].pitches):
continue
if partPairNumbers is not None:
isAppropriate = False
for pp in partPairNumbers:
thisQuartetTopPart = thisQuartet[0][0].part
thisQuartetBottomPart = thisQuartet[1][0].part
if ((verticalityStreamParts[pp[0]] == thisQuartetTopPart
or verticalityStreamParts[pp[0]] == thisQuartetBottomPart)
and (verticalityStreamParts[pp[1]] == thisQuartetTopPart
or verticalityStreamParts[pp[1]] == thisQuartetBottomPart)):
isAppropriate = True
break
if not isAppropriate:
continue
if returnObjects is False:
filteredList.append(thisQuartet)
else:
n11 = thisQuartet[0][0].element
n12 = thisQuartet[0][1].element
n21 = thisQuartet[1][0].element
n22 = thisQuartet[1][1].element
if (n11 is not None
and n12 is not None
and n21 is not None
and n22 is not None):
vlq = VoiceLeadingQuartet(n11, n12, n21, n22)
filteredList.append(vlq)
return filteredList
def getPairedMotion(self, includeRests=True, includeOblique=True):
'''
Get a list of two-element tuples that are in the same part [TODO: or containing stream??]
and which move here.
>>> c = corpus.parse('luca/gloria').measures(1, 8)
>>> tsCol = tree.fromStream.asTimespans(c, flatten=True,
... classList=(note.Note, chord.Chord))
>>> verticality22 = tsCol.getVerticalityAt(22.0)
>>> for pm in verticality22.getPairedMotion():
... print(pm)
(<PitchedTimespan (19.5 to 21.0) <music21.note.Note G>>,
<PitchedTimespan (22.0 to 22.5) <music21.note.Note C>>)
(<PitchedTimespan (21.0 to 22.0) <music21.note.Note E>>,
<PitchedTimespan (22.0 to 23.0) <music21.note.Note F>>)
(<PitchedTimespan (21.5 to 22.5) <music21.note.Note A>>,
<PitchedTimespan (21.5 to 22.5) <music21.note.Note A>>)
Note that the second one contains a one-beat rest at 21.0-22.0; so includeRests = False will
get rid of that:
>>> for pm in verticality22.getPairedMotion(includeRests=False):
... print(pm)
(<PitchedTimespan (21.0 to 22.0) <music21.note.Note E>>,
<PitchedTimespan (22.0 to 23.0) <music21.note.Note F>>)
(<PitchedTimespan (21.5 to 22.5) <music21.note.Note A>>,
<PitchedTimespan (21.5 to 22.5) <music21.note.Note A>>)
Oblique here means a pair that does not move (it could be called noMotion,
because there's no motion
here in a two-note pair, but we still call it includeOblique so it's consistent with
getAllVoiceLeadingQuartets).
>>> for pm in verticality22.getPairedMotion(includeOblique=False):
... print(pm)
(<PitchedTimespan (19.5 to 21.0) <music21.note.Note G>>,
<PitchedTimespan (22.0 to 22.5) <music21.note.Note C>>)
(<PitchedTimespan (21.0 to 22.0) <music21.note.Note E>>,
<PitchedTimespan (22.0 to 23.0) <music21.note.Note F>>)
>>> for pm in verticality22.getPairedMotion(includeOblique=False, includeRests=False):
... print(pm)
(<PitchedTimespan (21.0 to 22.0) <music21.note.Note E>>,
<PitchedTimespan (22.0 to 23.0) <music21.note.Note F>>)
'''
stopTss = self.stopTimespans
startTss = self.startTimespans
overlapTss = self.overlapTimespans
allPairedMotions = []
for startingTs in startTss:
previousTs = self.timespanTree.findPreviousPitchedTimespanInSameStreamByClass(
startingTs)
if previousTs is None:
continue # first not in piece in this part...
if includeRests is False:
if previousTs not in stopTss:
continue
if includeOblique is False and startingTs.pitches == previousTs.pitches:
continue
tsTuple = (previousTs, startingTs)
allPairedMotions.append(tsTuple)
if includeOblique is True:
for overlapTs in overlapTss:
tsTuple = (overlapTs, overlapTs)
allPairedMotions.append(tsTuple)
return allPairedMotions
# -----------------------------------------------------------------------------
class VerticalitySequence(prebase.ProtoM21Object, collections.abc.Sequence):
r'''
A segment of verticalities.
'''
# INITIALIZER #
def __init__(self, verticalities):
self._verticalities = tuple(verticalities)
# SPECIAL METHODS #
def __getitem__(self, item):
return self._verticalities[item]
def __len__(self):
return len(self._verticalities)
# noinspection PyProtectedMember
def _reprInternal(self):
internalRepr = ',\n\t'.join('(' + x._reprInternal() + ')' for x in self)
out = f'[\n\t{internalRepr}\n\t]'
return out
# PUBLIC METHODS #
def unwrap(self):
from music21.tree.analysis import Horizontality
unwrapped = {}
for timespan in self[0].overlapTimespans:
if timespan.part not in unwrapped:
unwrapped[timespan.part] = []
unwrapped[timespan.part].append(timespan)
for timespan in self[0].startTimespans:
if timespan.part not in unwrapped:
unwrapped[timespan.part] = []
unwrapped[timespan.part].append(timespan)
for verticality in self[1:]:
for timespan in verticality.startTimespans:
if timespan.part not in unwrapped:
unwrapped[timespan.part] = []
unwrapped[timespan.part].append(timespan)
for part, timespans in unwrapped.items():
horizontality = Horizontality(timespans=timespans)
unwrapped[part] = horizontality
return unwrapped
# -----------------------------------------------------------------------------
class Test(unittest.TestCase):
pass
# -----------------------------------------------------------------------------
_DOC_ORDER = (Verticality, VerticalitySequence)
# -----------------------------------------------------------------------------
if __name__ == '__main__':
import music21
music21.mainTest(Test)
|
vendor/cuda/configure.bzl
|
redoclag/plaidml
| 4,535 |
107186
|
<filename>vendor/cuda/configure.bzl
_CUDA_TOOLKIT_PATH = "CUDA_TOOLKIT_PATH"
_VAI_CUDA_REPO_VERSION = "VAI_CUDA_REPO_VERSION"
_VAI_NEED_CUDA = "VAI_NEED_CUDA"
_DEFAULT_CUDA_TOOLKIT_PATH = "/usr/local/cuda"
# Lookup paths for CUDA / cuDNN libraries, relative to the install directories.
#
# Paths will be tried out in the order listed below. The first successful path
# will be used. For example, when looking for the cudart libraries, the first
# attempt will be lib64/cudart inside the CUDA toolkit.
CUDA_LIB_PATHS = [
"lib64/",
"lib64/stubs/",
"lib/x86_64-linux-gnu/",
"lib/x64/",
"lib/",
"",
]
# Lookup paths for CUDA headers (cuda.h) relative to the CUDA toolkit directory.
CUDA_INCLUDE_PATHS = [
"include/",
"include/cuda/",
]
def get_cpu_value(ctx):
os_name = ctx.os.name.lower()
if os_name.startswith("mac os"):
return "Darwin"
if os_name.find("windows") != -1:
return "Windows"
result = ctx.execute(["uname", "-s"])
return result.stdout.strip()
def _is_windows(ctx):
"""Returns true if the host operating system is windows."""
return get_cpu_value(ctx) == "Windows"
def _lib_name(lib, cpu_value, version = "", static = False):
"""Constructs the platform-specific name of a library.
Args:
lib: The name of the library, such as "cudart"
cpu_value: The name of the host operating system.
version: The version of the library.
static: True the library is static or False if it is a shared object.
Returns:
The platform-specific name of the library.
"""
if cpu_value in ("Linux", "FreeBSD"):
if static:
return "lib%s.a" % lib
else:
if version:
version = ".%s" % version
return "lib%s.so%s" % (lib, version)
if cpu_value == "Windows":
return "%s.lib" % lib
if cpu_value == "Darwin":
if static:
return "lib%s.a" % lib
if version:
version = ".%s" % version
return "lib%s%s.dylib" % (lib, version)
fail("Invalid cpu_value: %s" % cpu_value)
def _tpl(ctx, tpl, substitutions = {}, out = None):
if not out:
out = tpl.replace(":", "/")
ctx.template(
out,
Label("@com_intel_plaidml//vendor/cuda:%s.tpl" % tpl),
substitutions,
)
def _cuda_toolkit_path(ctx):
path = ctx.os.environ.get(_CUDA_TOOLKIT_PATH, _DEFAULT_CUDA_TOOLKIT_PATH)
if not ctx.path(path).exists:
fail("Cannot find CUDA toolkit path.")
return str(ctx.path(path).realpath)
def _get_cuda_config(ctx):
"""Detects and returns information about the CUDA installation on the system.
Args:
ctx: The repository context.
Returns:
A struct containing the following fields:
cuda_toolkit_path: The CUDA toolkit installation directory.
compute_capabilities: A list of the system's CUDA compute capabilities.
cpu_value: The name of the host operating system.
"""
cpu_value = get_cpu_value(ctx)
cuda_toolkit_path = _cuda_toolkit_path(ctx)
return struct(
cuda_toolkit_path = cuda_toolkit_path,
# compute_capabilities = _compute_capabilities(ctx),
cpu_value = cpu_value,
)
def _find_cuda_include_path(ctx, cuda_config):
"""Returns the path to the directory containing cuda.h
Args:
ctx: The repository context.
cuda_config: The CUDA config as returned by _get_cuda_config
Returns:
The path of the directory containing the CUDA headers.
"""
cuda_toolkit_path = cuda_config.cuda_toolkit_path
for relative_path in CUDA_INCLUDE_PATHS:
if ctx.path("%s/%scuda.h" % (cuda_toolkit_path, relative_path)).exists:
return ("%s/%s" % (cuda_toolkit_path, relative_path))[:-1]
fail("Cannot find cuda.h under %s" % cuda_toolkit_path)
def _create_dummy_repository(ctx):
cpu_value = get_cpu_value(ctx)
_tpl(ctx, "build_defs.bzl", {
"%{cuda_is_configured}": "False",
})
_tpl(ctx, "BUILD", {
"%{cuda_driver_lib}": _lib_name("cuda", cpu_value),
"%{nvrtc_lib}": _lib_name("nvrtc", cpu_value),
"%{nvrtc_builtins_lib}": _lib_name("nvrtc-builtins", cpu_value),
"%{cuda_include_genrules}": "",
"%{cuda_headers}": "",
})
ctx.file("include/cuda.h", "")
ctx.file("lib/%s" % _lib_name("cuda", cpu_value))
def _find_cuda_lib(lib, ctx, cpu_value, basedir, version = "", static = False):
"""Finds the given CUDA or cuDNN library on the system.
Args:
lib: The name of the library, such as "cudart"
ctx: The repository context.
cpu_value: The name of the host operating system.
basedir: The install directory of CUDA or cuDNN.
version: The version of the library.
static: True if static library, False if shared object.
Returns:
Returns a struct with the following fields:
file_name: The basename of the library found on the system.
path: The full path to the library.
"""
file_name = _lib_name(lib, cpu_value, version, static)
for relative_path in CUDA_LIB_PATHS:
path = ctx.path("%s/%s%s" % (basedir, relative_path, file_name))
if path.exists:
return struct(file_name = file_name, path = str(path.realpath))
fail("Cannot find cuda library %s" % file_name)
def _find_libs(ctx, cuda_config):
"""Returns the CUDA and cuDNN libraries on the system.
Args:
ctx: The repository context.
cuda_config: The CUDA config as returned by _get_cuda_config
Returns:
Map of library names to structs of filename and path.
"""
cpu_value = cuda_config.cpu_value
return {
"cuda": _find_cuda_lib("cuda", ctx, cpu_value, cuda_config.cuda_toolkit_path),
"nvrtc": _find_cuda_lib("nvrtc", ctx, cpu_value, cuda_config.cuda_toolkit_path),
"nvrtc_builtins": _find_cuda_lib("nvrtc-builtins", ctx, cpu_value, cuda_config.cuda_toolkit_path),
}
def _execute(ctx, cmdline, error_msg = None, error_details = None, empty_stdout_fine = False):
"""Executes an arbitrary shell command.
Args:
ctx: The repository context.
cmdline: list of strings, the command to execute
error_msg: string, a summary of the error if the command fails
error_details: string, details about the error or steps to fix it
empty_stdout_fine: bool, if True, an empty stdout result is fine, otherwise
it's an error
Return:
the result of ctx.execute(cmdline)
"""
result = ctx.execute(cmdline)
if result.stderr or not (empty_stdout_fine or result.stdout):
fail(
"\n".join([
error_msg.strip() if error_msg else "Repository command failed",
result.stderr.strip(),
error_details if error_details else "",
]),
)
return result
def _read_dir(ctx, src_dir):
"""Returns a string with all files in a directory.
Finds all files inside a directory, traversing subfolders and following
symlinks. The returned string contains the full path of all files
separated by line breaks.
"""
if _is_windows(ctx):
src_dir = src_dir.replace("/", "\\")
find_result = _execute(
ctx,
["cmd.exe", "/c", "dir", src_dir, "/b", "/s", "/a-d"],
empty_stdout_fine = True,
)
# src_files will be used in genrule.outs where the paths must
# use forward slashes.
result = find_result.stdout.replace("\\", "/")
else:
find_result = _execute(
ctx,
["find", src_dir, "-follow", "-type", "f"],
empty_stdout_fine = True,
)
result = find_result.stdout
return result
def _norm_path(path):
"""Returns a path with '/' and remove the trailing slash."""
path = path.replace("\\", "/")
if path[-1] == "/":
path = path[:-1]
return path
def symlink_genrule_for_dir(ctx, src_dir, dest_dir, genrule_name, src_files = [], dest_files = []):
"""Returns a genrule to symlink(or copy if on Windows) a set of files.
If src_dir is passed, files will be read from the given directory; otherwise
we assume files are in src_files and dest_files
"""
if src_dir != None:
src_dir = _norm_path(src_dir)
dest_dir = _norm_path(dest_dir)
files = "\n".join(sorted(_read_dir(ctx, src_dir).splitlines()))
# Create a list with the src_dir stripped to use for outputs.
dest_files = files.replace(src_dir, "").splitlines()
src_files = files.splitlines()
command = []
# We clear folders that might have been generated previously to avoid undesired inclusions
if genrule_name == "cuda-include":
command.append('if [ -d "$(@D)/cuda/include" ]; then rm -rf $(@D)/cuda/include; fi')
elif genrule_name == "cuda-lib":
command.append('if [ -d "$(@D)/cuda/lib" ]; then rm -rf $(@D)/cuda/lib; fi')
outs = []
for i in range(len(dest_files)):
if dest_files[i] != "":
# If we have only one file to link we do not want to use the dest_dir, as
# $(@D) will include the full path to the file.
dest = "$(@D)/" + dest_dir + dest_files[i] if len(dest_files) != 1 else "$(@D)/" + dest_files[i]
command.append("mkdir -p $$(dirname {})".format(dest))
command.append('cp -f "{}" "{}"'.format(src_files[i], dest))
outs.append(' "{}{}",'.format(dest_dir, dest_files[i]))
return _genrule(src_dir, genrule_name, command, outs)
def _genrule(src_dir, genrule_name, command, outs):
"""Returns a string with a genrule.
Genrule executes the given command and produces the given outputs.
"""
return "\n".join([
"genrule(",
' name = "{}",'.format(genrule_name),
" outs = [",
] + outs + [
" ],",
' cmd = """',
] + command + [
' """,',
")",
])
def _create_cuda_repository(ctx):
cpu_value = get_cpu_value(ctx)
_tpl(ctx, "build_defs.bzl", {
"%{cuda_is_configured}": "True",
})
cuda_config = _get_cuda_config(ctx)
cuda_include_path = _find_cuda_include_path(ctx, cuda_config)
cuda_libs = _find_libs(ctx, cuda_config)
cuda_lib_src = []
cuda_lib_dest = []
for lib in cuda_libs.values():
cuda_lib_src.append(lib.path)
cuda_lib_dest.append("cuda/lib/" + lib.file_name)
genrules = [
symlink_genrule_for_dir(ctx, cuda_include_path, "cuda/include", "cuda-include"),
symlink_genrule_for_dir(ctx, None, "", "cuda-lib", cuda_lib_src, cuda_lib_dest),
]
_tpl(ctx, "BUILD", {
"%{cuda_driver_lib}": cuda_libs["cuda"].file_name,
"%{nvrtc_lib}": cuda_libs["nvrtc"].file_name,
"%{nvrtc_builtins_lib}": cuda_libs["nvrtc_builtins"].file_name,
"%{cuda_include_genrules}": "\n".join(genrules),
"%{cuda_headers}": '":cuda-include",',
})
def _configure_cuda_impl(ctx):
enable_cuda = ctx.os.environ.get(_VAI_NEED_CUDA, "0").strip()
if enable_cuda == "1":
_create_cuda_repository(ctx)
else:
_create_dummy_repository(ctx)
configure_cuda = repository_rule(
environ = [
_CUDA_TOOLKIT_PATH,
_VAI_CUDA_REPO_VERSION,
_VAI_NEED_CUDA,
],
implementation = _configure_cuda_impl,
)
|
ML/nlp/reuters_analysis.py
|
saneravi/ML_Stuff
| 209 |
107210
|
#!/usr/bin/env python
from collections import defaultdict
import numpy as np
from nltk.corpus import reuters
def analyze_data_distribution(cat2count):
i = 1
most_frequent_words = sorted(cat2count.items(),
key=lambda n: n[1]['train'],
reverse=True)
for el in most_frequent_words:
cat = el[0]
print("\t{:>2}: {:<20}: {:>4}\t{:>4}\t{:0.1f}"
.format(i, cat,
cat2count[cat]['train'],
cat2count[cat]['test'],
np.array(cat2count[cat]['words']).mean()))
i += 1
def analyze_vocabulary(corpus):
word2count = defaultdict(int)
for word in corpus:
word2count[word] += 1
most_freq = sorted(word2count.items(), key=lambda n: n[1], reverse=True)
for i, el in enumerate(most_freq[:10]):
print("{}. frequent word is {} ({} occurences)"
.format(i, el[0], el[1]))
# Create vocabulary
min_occurences = 20
max_occurences = 50
vocabulary = [word[0]
for word in word2count.items()
if word[1] >= min_occurences and word[1] <= max_occurences]
# Design decision: Should there be a pseudo-word OOV
# (out of vocabulary)?
with_oov = True
if with_oov:
word2wid = {'<OOV>': 0}
else:
word2wid = {}
vocabulary = list(vocabulary)
for wid, word in enumerate(vocabulary, start=len(word2wid)):
word2wid[word] = wid
print("Created word2wid")
# Analyze the vocabulary
print("total vocabulary = {}".format(len(word2count)))
print("vocabulary size = {} (min_occ={}, max_occ={})"
.format(len(word2wid), min_occurences, max_occurences))
def main(categories, document_ids, verbose=False):
print(f"categories: {categories}")
print("number of categories: {}".format(len(categories)))
cat2catid = {}
for catid, cat in enumerate(sorted(categories)):
cat2catid[cat] = catid
documents = document_ids
test = [d for d in documents if d.startswith('test/')]
train = [d for d in documents if d.startswith('training/')]
print("train documents: {}".format(len(train)))
print("test documents: {}".format(len(test)))
# make it easy to map data to label
# gather simple statistics
id2cats = defaultdict(list)
cat2count = {}
for cat in categories:
for fid in reuters.fileids(cat):
id2cats[fid].append(cat)
if cat not in cat2count:
cat2count[cat] = {'train': 0, 'test': 0, 'words': []}
if fid in train:
cat2count[cat]['train'] += 1
else:
cat2count[cat]['test'] += 1
cat2count[cat]['words'].append(len(reuters.words(fid)))
print("How many labels do documents usually have?")
labelcount2doccount = defaultdict(int)
for _, cats in id2cats.items():
labelcount2doccount[len(cats)] += 1
s = sorted(labelcount2doccount.items(), reverse=True, key=lambda n: n[1])
for labelcount, documentcount in s:
print("\tlabelcount={:>3}, documentcount={:>3}"
.format(labelcount, documentcount))
# Analyze data distribution to classes
analyze_data_distribution(cat2count)
# Build corpus
corpus = []
for document_id in train:
corpus += list(reuters.words(document_id))
analyze_vocabulary(corpus)
def find_class_predictors(ys):
class_pred_corr = [[0.0 for _ in range(90)] for _ in range(90)]
class_pred_total = [[0.0 for _ in range(90)] for _ in range(90)]
for document_cats in ys:
for take_i in range(90):
for predict_i in range(90):
if take_i == 0:
continue
class_pred_total[take_i][predict_i] += 1
if document_cats[take_i] == document_cats[predict_i]:
class_pred_corr[take_i][predict_i] += 1
acc = []
for i in range(90):
line = []
for j in range(90):
if class_pred_total[i][j] == 0.0:
score = 0.0
else:
score = class_pred_corr[i][j] / class_pred_total[i][j]
line.append(score)
acc.append(line)
return acc
def print_class_predictors(acc):
score_list = []
for take_i in range(90):
for predict_i in range(90):
score_list.append({'take': take_i,
'pred': predict_i,
'acc': acc[take_i][predict_i]})
score_list = sorted(score_list, key=lambda n: n['acc'], reverse=True)
for el in score_list:
if el['take'] == el['pred']:
continue
take = reuters.labels[el['take']]
pred = reuters.labels[el['pred']]
print("{} => {} ({})".format(take, pred, el['acc']))
if __name__ == '__main__':
# main(reuters.categories(), reuters.fileids())
import reuters
acc = find_class_predictors(reuters.load_data()['y_train'])
print_class_predictors(acc)
|
lineage/query_history_stats.py
|
yu-iskw/elementary
| 282 |
107223
|
<reponame>yu-iskw/elementary
from collections import defaultdict
from lineage.query_context import QueryContext
class QueryHistoryStats(object):
def __init__(self):
self._query_type_stats = defaultdict(lambda: 0)
self._roles = set()
self._users = set()
def update_stats(self, query_context: QueryContext):
if query_context.query_type is not None:
self._query_type_stats[query_context.query_type] += 1
if query_context.user_name is not None:
self._users.add(query_context.user_name)
if query_context.role_name is not None:
self._roles.add(query_context.role_name)
def to_dict(self):
query_history_stats_dict = self._query_type_stats.copy()
query_history_stats_dict['user_count'] = len(self._users)
query_history_stats_dict['role_count'] = len(self._roles)
return query_history_stats_dict
|
preprocess-nmt.py
|
iYUYUE/struct-attn
| 261 |
107232
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Create the data for the LSTM.
"""
import os
import sys
import argparse
import numpy as np
import h5py
import itertools
from collections import defaultdict
class Indexer:
def __init__(self, symbols = ["<blank>","<unk>","<s>","</s>"]):
self.vocab = defaultdict(int)
self.PAD = symbols[0]
self.UNK = symbols[1]
self.BOS = symbols[2]
self.EOS = symbols[3]
self.d = {self.PAD: 1, self.UNK: 2, self.BOS: 3, self.EOS: 4}
def add_w(self, ws):
for w in ws:
if w not in self.d:
self.d[w] = len(self.d) + 1
def convert(self, w):
return self.d[w] if w in self.d else self.d[self.UNK]
def convert_sequence(self, ls):
return [self.convert(l) for l in ls]
def clean(self, s):
s = s.replace(self.PAD, "")
s = s.replace(self.BOS, "")
s = s.replace(self.EOS, "")
return s
def write(self, outfile):
out = open(outfile, "w")
items = [(v, k) for k, v in self.d.iteritems()]
items.sort()
for v, k in items:
print >>out, k, v
out.close()
def prune_vocab(self, k, cnt = False):
vocab_list = [(word, count) for word, count in self.vocab.iteritems()]
if cnt:
self.pruned_vocab = {pair[0]: pair[1] for pair in vocab_list if pair[1] > k}
else:
vocab_list.sort(key = lambda x: x[1], reverse=True)
k = min(k, len(vocab_list))
self.pruned_vocab = {pair[0]:pair[1] for pair in vocab_list[:k]}
for word in self.pruned_vocab:
if word not in self.d:
self.d[word] = len(self.d) + 1
def load_vocab(self, vocab_file):
self.d = {}
for line in open(vocab_file, 'r'):
v, k = line.strip().split()
self.d[v] = int(k)
def pad(ls, length, symbol):
if len(ls) >= length:
return ls[:length]
return ls + [symbol] * (length -len(ls))
def get_data(args):
src_indexer = Indexer(["<blank>","<unk>","<s>","</s>"])
target_indexer = Indexer(["<blank>","<unk>","<s>","</s>"])
def make_vocab(srcfile, targetfile, srcseqlength, targetseqlength, train=1):
num_sents = 0
for _, (src_orig, targ_orig) in \
enumerate(itertools.izip(open(srcfile,'r'), open(targetfile,'r'))):
src_orig = src_indexer.clean(src_orig.strip())
targ_orig = target_indexer.clean(targ_orig.strip())
targ = targ_orig.strip().split()
src = src_orig.strip().split()
if len(targ) > targetseqlength or len(src) > srcseqlength or len(targ) < 1 or len(src) < 1:
continue
num_sents += 1
if train == 1:
for word in targ:
target_indexer.vocab[word] += 1
for word in src:
src_indexer.vocab[word] += 1
return num_sents
def convert(srcfile, targetfile, batchsize, srcseqlength, targetseqlength, outfile, num_sents,
max_sent_l=0, shuffle=0):
newsrcseqlength = srcseqlength + 2 #add 2 for EOS and BOS
newtargetseqlength = targetseqlength + 2
targets = np.zeros((num_sents, newtargetseqlength), dtype=int)
target_output = np.zeros((num_sents, newtargetseqlength), dtype=int)
sources = np.zeros((num_sents, newsrcseqlength), dtype=int)
source_lengths = np.zeros((num_sents,), dtype=int)
target_lengths = np.zeros((num_sents,), dtype=int)
dropped = 0
sent_id = 0
for _, (src_orig, targ_orig) in \
enumerate(itertools.izip(open(srcfile,'r'), open(targetfile,'r'))):
src_orig = src_indexer.clean(src_orig.strip())
targ_orig = target_indexer.clean(targ_orig.strip())
targ = [target_indexer.BOS] + targ_orig.strip().split() + [target_indexer.EOS]
src = [src_indexer.BOS] + src_orig.strip().split() + [src_indexer.EOS]
max_sent_l = max(len(targ), len(src), max_sent_l)
if len(targ) > newtargetseqlength or len(src) > newsrcseqlength or len(targ) < 3 or len(src) < 3:
dropped += 1
continue
targ = pad(targ, newtargetseqlength+1, target_indexer.PAD)
targ = target_indexer.convert_sequence(targ)
targ = np.array(targ, dtype=int)
src = pad(src, newsrcseqlength, src_indexer.PAD)
src = src_indexer.convert_sequence(src)
src = np.array(src, dtype=int)
targets[sent_id] = np.array(targ[:-1],dtype=int)
target_lengths[sent_id] = (targets[sent_id] != 1).sum()
target_output[sent_id] = np.array(targ[1:],dtype=int)
sources[sent_id] = np.array(src, dtype=int)
source_lengths[sent_id] = (sources[sent_id] != 1).sum()
sent_id += 1
if sent_id % 100000 == 0:
print("{}/{} sentences processed".format(sent_id, num_sents))
print(sent_id, num_sents)
if shuffle == 1:
rand_idx = np.random.permutation(sent_id)
targets = targets[rand_idx]
target_output = target_output[rand_idx]
sources = sources[rand_idx]
source_lengths = source_lengths[rand_idx]
target_lengths = target_lengths[rand_idx]
#break up batches based on source lengths
source_lengths = source_lengths[:sent_id]
source_sort = np.argsort(source_lengths)
sources = sources[source_sort]
targets = targets[source_sort]
target_output = target_output[source_sort]
target_l = target_lengths[source_sort]
source_l = source_lengths[source_sort]
curr_l = 0
l_location = [] #idx where sent length changes
for j,i in enumerate(source_sort):
if source_lengths[i] > curr_l:
curr_l = source_lengths[i]
l_location.append(j+1)
l_location.append(len(sources))
#get batch sizes
curr_idx = 1
batch_idx = [1]
nonzeros = []
batch_l = []
batch_w = []
target_l_max = []
for i in range(len(l_location)-1):
while curr_idx < l_location[i+1]:
curr_idx = min(curr_idx + batchsize, l_location[i+1])
batch_idx.append(curr_idx)
for i in range(len(batch_idx)-1):
batch_l.append(batch_idx[i+1] - batch_idx[i])
batch_w.append(source_l[batch_idx[i]-1])
nonzeros.append((target_output[batch_idx[i]-1:batch_idx[i+1]-1] != 1).sum().sum())
target_l_max.append(max(target_l[batch_idx[i]-1:batch_idx[i+1]-1]))
# Write output
f = h5py.File(outfile, "w")
f["source"] = sources
f["target"] = targets
f["target_output"] = target_output
f["target_l"] = np.array(target_l_max, dtype=int)
f["target_l_all"] = target_l
f["batch_l"] = np.array(batch_l, dtype=int)
f["batch_w"] = np.array(batch_w, dtype=int)
f["batch_idx"] = np.array(batch_idx[:-1], dtype=int)
f["target_nonzeros"] = np.array(nonzeros, dtype=int)
f["source_size"] = np.array([len(src_indexer.d)])
f["target_size"] = np.array([len(target_indexer.d)])
print("Saved {} sentences (dropped {} due to length)".format(len(f["source"]), dropped))
f.close()
return max_sent_l
print("First pass through data to get vocab...")
num_sents_train = make_vocab(args.srcfile, args.targetfile,
args.srcseqlength, args.targetseqlength)
print("Number of sentences in training: {}".format(num_sents_train))
num_sents_valid = make_vocab(args.srcvalfile, args.targetvalfile,
args.srcseqlength, args.targetseqlength, 0)
print("Number of sentences in valid: {}".format(num_sents_valid))
#prune and write vocab
src_indexer.prune_vocab(args.srcvocabminfreq, True)
target_indexer.prune_vocab(args.targetvocabminfreq, True)
if args.srcvocabfile != '':
print('Loading pre-specified source vocab from ' + args.srcvocabfile)
src_indexer.load_vocab(args.srcvocabfile)
if args.targetvocabfile != '':
print('Loading pre-specified target vocab from ' + args.targetvocabfile)
target_indexer.load_vocab(args.targetvocabfile)
src_indexer.write(args.outputfile + ".src.dict")
target_indexer.write(args.outputfile + ".targ.dict")
print("Source vocab size: Original = {}, Pruned = {}".format(len(src_indexer.vocab),
len(src_indexer.d)))
print("Target vocab size: Original = {}, Pruned = {}".format(len(target_indexer.vocab),
len(target_indexer.d)))
max_sent_l = 0
max_sent_l = convert(args.srcvalfile, args.targetvalfile, args.batchsize, args.srcseqlength,
args.targetseqlength, args.outputfile + "-val.hdf5", num_sents_valid,
max_sent_l, args.shuffle)
max_sent_l = convert(args.srcfile, args.targetfile, args.batchsize, args.srcseqlength,
args.targetseqlength,
args.outputfile + "-train.hdf5", num_sents_train,
max_sent_l, args.shuffle)
print("Max sent length (before dropping): {}".format(max_sent_l))
def main(arguments):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--srcvocabminfreq', help="Source vocab count limit. All words that occurred"
"less than this amount are replaced with UNK.",
type=int, default=10)
parser.add_argument('--targetvocabminfreq', help="Source vocab count limit. All words that occurred"
"less than this amount are replaced with UNK.",
type=int, default=10)
parser.add_argument('--srcfile', help="Path to source training data, "
"where each line represents a single "
"source/target sequence.", required=True)
parser.add_argument('--targetfile', help="Path to target training data, "
"where each line represents a single "
"source/target sequence.", required=True)
parser.add_argument('--srcvalfile', help="Path to source validation data.", required=True)
parser.add_argument('--targetvalfile', help="Path to target validation data.", required=True)
parser.add_argument('--batchsize', help="Size of each minibatch.", type=int, default=128)
parser.add_argument('--srcseqlength', help="Maximum source sequence length. Sequences longer "
"than this are dropped.", type=int, default=50)
parser.add_argument('--targetseqlength', help="Maximum target sequence length. Sequences longer "
"than this are dropped.", type=int, default=50)
parser.add_argument('--outputfile', help="Prefix of the output file names. ", type=str, required=True)
parser.add_argument('--srcvocabfile', help="If working with a preset vocab, "
"then including this will ignore srcvocabsize and use the"
"vocab provided here.",
type = str, default='')
parser.add_argument('--targetvocabfile', help="If working with a preset vocab, "
"then including this will ignore targetvocabsize and "
"use the vocab provided here.",
type = str, default='')
parser.add_argument('--shuffle', help="If = 1, shuffle sentences before sorting (based on "
"source length).",
type = int, default = 0)
args = parser.parse_args(arguments)
get_data(args)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
035_BodyPix/saved_model_to_coreml.py
|
IgiArdiyanto/PINTO_model_zoo
| 1,529 |
107281
|
import coremltools as ct
def model_convert(model_name, stride_num, H, W):
saved_model_path = f'{model_name}/{stride_num}/saved_model_{H}x{W}'
input = ct.TensorType(name='sub_2', shape=(1, H, W, 3))
mlmodel = ct.convert(saved_model_path, inputs=[input], source='tensorflow')
mlmodel.save(f'{saved_model_path}/model_coreml_float32.mlmodel')
model_name = 'mobilenet050'
stride_num = 'stride8'
H = 240
W = 320
model_convert(model_name, stride_num, H, W)
H = 480
W = 640
model_convert(model_name, stride_num, H, W)
model_name = 'mobilenet050'
stride_num = 'stride16'
H = 240
W = 320
model_convert(model_name, stride_num, H, W)
H = 480
W = 640
model_convert(model_name, stride_num, H, W)
model_name = 'mobilenet050'
stride_num = 'stride8'
H = 240
W = 320
model_convert(model_name, stride_num, H, W)
H = 480
W = 640
model_convert(model_name, stride_num, H, W)
#=====================================================
model_name = 'mobilenet075'
stride_num = 'stride8'
H = 240
W = 320
model_convert(model_name, stride_num, H, W)
H = 480
W = 640
model_convert(model_name, stride_num, H, W)
model_name = 'mobilenet075'
stride_num = 'stride16'
H = 240
W = 320
model_convert(model_name, stride_num, H, W)
H = 480
W = 640
model_convert(model_name, stride_num, H, W)
#=====================================================
model_name = 'mobilenet100'
stride_num = 'stride8'
H = 240
W = 320
model_convert(model_name, stride_num, H, W)
H = 480
W = 640
model_convert(model_name, stride_num, H, W)
model_name = 'mobilenet100'
stride_num = 'stride16'
H = 240
W = 320
model_convert(model_name, stride_num, H, W)
H = 480
W = 640
model_convert(model_name, stride_num, H, W)
#=====================================================
model_name = 'resnet50'
stride_num = 'stride16'
H = 240
W = 320
model_convert(model_name, stride_num, H, W)
H = 480
W = 640
model_convert(model_name, stride_num, H, W)
model_name = 'resnet50'
stride_num = 'stride32'
H = 240
W = 320
model_convert(model_name, stride_num, H, W)
H = 480
W = 640
model_convert(model_name, stride_num, H, W)
|
admin/internet_archive/views.py
|
gaybro8777/osf.io
| 628 |
107302
|
<filename>admin/internet_archive/views.py
from django.views.generic import TemplateView, View, FormView
from django.contrib import messages
from osf.management.commands.archive_registrations_on_IA import (
archive_registrations_on_IA,
)
from osf.management.commands.populate_internet_archives_collections import (
populate_internet_archives_collections,
)
from osf.management.commands.check_ia_metadata import (
check_ia_metadata,
IAMetadataError
)
from osf.management.commands.sync_ia_metadata import (
sync_ia_metadata,
)
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from admin.base.forms import ArchiveRegistrationWithPigeonForm
from website import settings
from django.contrib.auth.mixins import PermissionRequiredMixin
class InternetArchiveView(TemplateView, PermissionRequiredMixin):
"""Basic form to trigger various management commands"""
template_name = 'internet_archive/internet_archive.html'
permission_required = 'osf.change_node'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['ia_collection'] = settings.IA_ROOT_COLLECTION
context['ia_id_version'] = settings.ID_VERSION
context['osf_pigeon_url'] = settings.OSF_PIGEON_URL
return context
class SendToPigeon(FormView, PermissionRequiredMixin):
form_class = ArchiveRegistrationWithPigeonForm
raise_exception = True
permission_required = 'osf.change_node'
def form_valid(self, form):
guids = form.cleaned_data['guid_to_archive']
guids = [guid.strip() for guid in guids.split(',') if guid]
archive_registrations_on_IA(guids=guids)
messages.success(self.request, f'{" ,".join(guids) if guids else "the job"} has begun archiving.')
return super().form_valid(form)
def get_success_url(self, *args, **kwargs):
return reverse('internet_archive:internet_archive')
class CreateIASubcollections(View, PermissionRequiredMixin):
def post(self, request, *args, **kwargs):
populate_internet_archives_collections(settings.ID_VERSION)
messages.success(
request,
f'Subcollections with ids of {settings.ID_VERSION} are being created',
)
return redirect(reverse('internet_archive:internet_archive'))
class CheckIAMetadata(FormView, PermissionRequiredMixin):
form_class = ArchiveRegistrationWithPigeonForm
raise_exception = True
permission_required = 'osf.change_node'
def form_valid(self, form):
guids = form.cleaned_data['guid_to_archive']
guids = [guid.strip() for guid in guids.split(',') if guid]
try:
check_ia_metadata(guids=guids)
messages.success(self.request, 'All IA items are synced')
except IAMetadataError as e:
messages.error(self.request, e.message)
if e.fields:
for ai_url, data in e.fields.items():
messages.error(self.request, f'{ai_url}: {", ".join(data["fields"])}')
return super().form_valid(form)
def get_success_url(self, *args, **kwargs):
return reverse('internet_archive:internet_archive')
class SyncIAMetadata(FormView, PermissionRequiredMixin):
form_class = ArchiveRegistrationWithPigeonForm
raise_exception = True
permission_required = 'osf.change_node'
def form_valid(self, form):
guids = form.cleaned_data['guid_to_archive']
guids = [guid.strip() for guid in guids.split(',') if guid]
sync_ia_metadata(guids=guids)
messages.success(self.request, f'{", ".join(guids)} match IA items.')
return super().form_valid(form)
def get_success_url(self, *args, **kwargs):
return reverse('internet_archive:internet_archive')
|
data/transcoder_evaluation_gfg/python/SPLIT_ARRAY_ADD_FIRST_PART_END.py
|
mxl1n/CodeGen
| 241 |
107383
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( arr , n , k ) :
for i in range ( 0 , k ) :
x = arr [ 0 ]
for j in range ( 0 , n - 1 ) :
arr [ j ] = arr [ j + 1 ]
arr [ n - 1 ] = x
#TOFILL
if __name__ == '__main__':
param = [
([75],0,0,),
([-58, -60, -38, 48, -2, 32, -48, -46, 90, -54, -18, 28, 72, 86, 0, -2, -74, 12, -58, 90, -30, 10, -88, 2, -14, 82, -82, -46, 2, -74],27,17,),
([0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],7,7,),
([45, 51, 26, 36, 10, 62, 62, 56, 61, 67, 86, 97, 31, 93, 32, 1, 14, 25, 24, 30, 1, 44, 7, 98, 56, 68, 53, 59, 30, 90, 79, 22],23,24,),
([-88, -72, -64, -46, -40, -16, -8, 0, 22, 34, 44],6,6,),
([0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0],23,30,),
([8, 17, 20, 23, 31, 32, 37, 37, 44, 45, 48, 64, 64, 67, 69, 71, 75, 77, 78, 81, 83, 87, 89, 92, 94],21,20,),
([-8, -88, -68, 48, 8, 50, 30, -88, 74, -16, 6, 74, 36, 32, 22, 96, -2, 70, 40, -46, 98, 34, 2, 94],23,13,),
([0, 0, 0, 0, 1, 1, 1, 1, 1],5,8,),
([80, 14, 35, 25, 60, 86, 45, 95, 32, 29, 94, 6, 63, 66, 38],9,7,)
]
filled_function_param = [
([75],0,0,),
([-58, -60, -38, 48, -2, 32, -48, -46, 90, -54, -18, 28, 72, 86, 0, -2, -74, 12, -58, 90, -30, 10, -88, 2, -14, 82, -82, -46, 2, -74],27,17,),
([0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],7,7,),
([45, 51, 26, 36, 10, 62, 62, 56, 61, 67, 86, 97, 31, 93, 32, 1, 14, 25, 24, 30, 1, 44, 7, 98, 56, 68, 53, 59, 30, 90, 79, 22],23,24,),
([-88, -72, -64, -46, -40, -16, -8, 0, 22, 34, 44],6,6,),
([0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0],23,30,),
([8, 17, 20, 23, 31, 32, 37, 37, 44, 45, 48, 64, 64, 67, 69, 71, 75, 77, 78, 81, 83, 87, 89, 92, 94],21,20,),
([-8, -88, -68, 48, 8, 50, 30, -88, 74, -16, 6, 74, 36, 32, 22, 96, -2, 70, 40, -46, 98, 34, 2, 94],23,13,),
([0, 0, 0, 0, 1, 1, 1, 1, 1],5,8,),
([80, 14, 35, 25, 60, 86, 45, 95, 32, 29, 94, 6, 63, 66, 38],9,7,)
]
n_success = 0
for i, parameters_set in enumerate(param):
f_filled(*(filled_function_param[i]))
f_gold(*parameters_set)
if parameters_set == filled_function_param[i]:
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
oct2py/ipython/tests/test_octavemagic.py
|
adityaapte/oct2py
| 195 |
107386
|
"""Tests for Octave magics extension."""
import codecs
import unittest
import sys
from IPython.display import SVG
from IPython.testing.globalipapp import get_ipython
import numpy as np
from oct2py.ipython import octavemagic
from oct2py import Oct2PyError
class OctaveMagicTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
'''Set up an IPython session just once.
It'd be safer to set it up for each test, but for now,
I'm mimicking the IPython team's logic.
'''
if not sys.stdin.encoding:
# needed for py.test
sys.stdin = codecs.getreader('utf-8')(sys.stdin)
cls.ip = get_ipython()
# This is just to get a minimally modified version of the changes
# working
cls.ip.magic('load_ext oct2py.ipython')
cls.ip.ex('import numpy as np')
cls.svgs_generated = 0
def test_octave_inline(self):
result = self.ip.run_line_magic('octave', '[1, 2, 3] + 1;')
assert np.allclose(result, [[2, 3, 4]])
def test_octave_roundtrip(self):
ip = self.ip
ip.ex('x = np.arange(3); y = 4.5')
ip.run_line_magic('octave_push', 'x y')
ip.run_line_magic('octave', 'x = x + 1; y = y + 1;')
ip.run_line_magic('octave_pull', 'x y')
assert np.allclose(ip.user_ns['x'], [[1, 2, 3]])
assert np.allclose(ip.user_ns['y'], 5.5)
def test_octave_cell_magic(self):
ip = self.ip
ip.ex('x = 3; y = [1, 2]')
ip.run_cell_magic('octave', '-f png -s 400,400 -i x,y -o z',
'z = x + y;')
assert np.allclose(ip.user_ns['z'], [[4, 5]])
def test_octave_plot(self):
magic = self.ip.find_cell_magic('octave').__self__
magic._display = self._verify_display
self.ip.run_cell_magic('octave', '-f svg -s 400,500',
'plot([1, 2, 3]); figure; plot([4, 5, 6]);')
assert self.svgs_generated == 2
def _verify_display(self, obj):
if isinstance(obj, SVG):
svg = obj.data
assert 'height="500px"' in svg, svg
assert 'width="400px"' in svg, svg
self.svgs_generated += 1
def test_octave_syntax_error(self):
try:
self.ip.run_cell_magic('octave', '', "a='1")
except Oct2PyError:
self.ip.magic('reload_ext oct2py.ipython')
def test_octave_error(self):
self.assertRaises(Oct2PyError, self.ip.run_cell_magic,
'octave', '', 'a = ones2(1)')
|
tests/sequence/test_annotation.py
|
danijoo/biotite
| 208 |
107425
|
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
import biotite.sequence as seq
from biotite.sequence import Location, Feature, Annotation, AnnotatedSequence
import biotite.sequence.io.genbank as gb
import numpy as np
from os.path import join
from ..util import data_dir
import pytest
def test_annotation_creation():
feature1 = Feature("CDS", [seq.Location(1,2)], qual={"gene" : "test1"})
feature2 = Feature("CDS", [seq.Location(3,4)], qual={"gene" : "test2"})
feature_list = [feature1, feature2]
annotation = Annotation(feature_list)
for feature in annotation:
assert feature.key in [f.key for f in feature_list]
assert feature.qual["gene"] in [
f.qual["gene"] for f in feature_list
]
def test_annotation_concatenation():
feature1 = Feature("CDS", [seq.Location(1,1)], qual={"gene" : "test1"})
feature2 = Feature("CDS", [seq.Location(2,2)], qual={"gene" : "test2"})
annot1 = Annotation([feature1, feature2])
feature3 = Feature("CDS", [seq.Location(3,3)], qual={"gene" : "test3"})
feature4 = Feature("CDS", [seq.Location(4,4)], qual={"gene" : "test4"})
annot2 = Annotation([feature3, feature4])
feature5 = Feature("CDS", [seq.Location(5,5)], qual={"gene" : "test5"})
concat = annot1 + annot2 + feature5
assert set([f.qual["gene"] for f in concat]) \
== set(["test1", "test2", "test3", "test4", "test5"])
def test_annotation_indexing():
feature1 = Feature("CDS", [Location(-10,30 )], qual={"gene" : "test1"})
feature2 = Feature("CDS", [Location(20, 50 )], qual={"gene" : "test2"})
feature3 = Feature("CDS", [Location(100,130)], qual={"gene" : "test3"})
feature4 = Feature("CDS", [Location(150,250)], qual={"gene" : "test4"})
feature5 = Feature("CDS", [Location(-50,200)], qual={"gene" : "test5"})
annotation = Annotation([feature1,feature2,feature3,feature4,feature5])
sub_annot = annotation[40:150]
# Only one location per feature
assert set([list(f.locs)[0].defect for f in sub_annot]) \
== set([Location.Defect.MISS_LEFT, Location.Defect.NONE,
(Location.Defect.MISS_LEFT | Location.Defect.MISS_RIGHT)])
assert set([f.qual["gene"] for f in sub_annot]) \
== set(["test2", "test3", "test5"])
def test_annotated_sequence():
sequence = seq.NucleotideSequence("ATGGCGTACGATTAGAAAAAAA")
feature1 = Feature("misc_feature", [Location(1,2), Location(11,12)],
{"note" : "walker"})
feature2 = Feature("misc_feature", [Location(16,22)], {"note" : "poly-A"})
annotation = Annotation([feature1, feature2])
annot_seq = AnnotatedSequence(annotation, sequence)
assert annot_seq[2] == "T"
assert annot_seq.sequence[2] == "G"
annot_seq2 = annot_seq[:16]
assert annot_seq2.sequence == seq.NucleotideSequence("ATGGCGTACGATTAG")
assert annot_seq[feature1] == seq.NucleotideSequence("ATAT")
assert annot_seq[feature2] == seq.NucleotideSequence("AAAAAAA")
annot_seq[feature1] = seq.NucleotideSequence("CCCC")
assert annot_seq.sequence == seq.NucleotideSequence("CCGGCGTACGCCTAGAAAAAAA")
def test_reverse_complement():
gb_file = gb.GenBankFile.read(join(data_dir("sequence"), "ec_bl21.gb"))
annot_seq = gb.get_annotated_sequence(gb_file)
assert annot_seq == annot_seq.reverse_complement().reverse_complement()
|
src/masonite/stubs/events/Listener.py
|
cercos/masonite
| 1,816 |
107456
|
class __class__:
def handle(self, event):
pass
|
cbpro/__init__.py
|
shokitaka/coinbasepro-python
| 1,023 |
107471
|
<filename>cbpro/__init__.py<gh_stars>1000+
from cbpro.authenticated_client import AuthenticatedClient
from cbpro.public_client import PublicClient
from cbpro.websocket_client import WebsocketClient
from cbpro.order_book import OrderBook
from cbpro.cbpro_auth import CBProAuth
|
src/poliastro/earth/util.py
|
Carlosbogo/poliastro
| 634 |
107481
|
<filename>src/poliastro/earth/util.py<gh_stars>100-1000
import numpy as np
from astropy import units as u
from astropy.coordinates import get_sun
from poliastro import constants
from poliastro.util import wrap_angle
@u.quantity_input(ltan=u.hourangle)
def raan_from_ltan(epoch, ltan=12.0):
"""RAAN angle from LTAN for SSO around the earth
Parameters
----------
epoch : ~astropy.time.Time
Value of time to calculate the RAAN for
ltan: ~astropy.units.Quantity
Decimal hour between 0 and 24
Returns
-------
RAAN: ~astropy.units.Quantity
Right ascension of the ascending node angle in GCRS
Note
----
Calculations of the sun mean longitude and equation of time
follow "Fundamentals of Astrodynamics and Applications"
Fourth edition by Vallado, <NAME>.
"""
T_UT1 = ((epoch.ut1 - constants.J2000).value / 36525.0) * u.deg
T_TDB = ((epoch.tdb - constants.J2000).value / 36525.0) * u.deg
# Apparent sun position
sun_position = get_sun(epoch)
# Calculate the sun apparent local time
salt = sun_position.ra + 12 * u.hourangle
# Use the equation of time to calculate the mean sun local time (fictional sun without anomalies)
# Sun mean anomaly
M_sun = 357.5291092 * u.deg + 35999.05034 * T_TDB
# Sun mean longitude
l_sun = 280.460 * u.deg + 36000.771 * T_UT1
l_ecliptic_part2 = 1.914666471 * u.deg * np.sin(
M_sun
) + 0.019994643 * u.deg * np.sin(2 * M_sun)
l_ecliptic = l_sun + l_ecliptic_part2
eq_time = (
-l_ecliptic_part2
+ 2.466 * u.deg * np.sin(2 * l_ecliptic)
- 0.0053 * u.deg * np.sin(4 * l_ecliptic)
)
# Calculate sun mean local time
smlt = salt + eq_time
# Desired angle between sun and ascending node
alpha = wrap_angle(ltan, 24 * u.hourangle).to(u.rad)
# Use the mean sun local time calculate needed RAAN for given LTAN
raan = smlt + alpha
return raan
|
utils_nlp/interpreter/Interpreter.py
|
Anita1017/nlp-recipes
| 4,407 |
107492
|
<filename>utils_nlp/interpreter/Interpreter.py<gh_stars>1000+
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Utilities that enables you to explain every hidden state in your model"""
import torch
from torch import nn
from torch import optim
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
def calculate_regularization(sampled_x, Phi, reduced_axes=None, device=None):
""" Calculate the variance that is used for Interpreter
Args:
sampled_x (list of torch.FloatTensor):
A list of sampled input embeddings $x$, each $x$ is of shape
``[length, dimension]``. All the $x$s can have different length,
but should have the same dimension. Sampled number should be
higher to get a good estimation.
reduced_axes (list of ints, Optional):
The axes that is variable in Phi (e.g., the sentence length axis).
We will reduce these axes by mean along them.
Returns:
torch.FloatTensor: The regularization term calculated
"""
sample_num = len(sampled_x)
sample_s = []
for n in range(sample_num):
x = sampled_x[n]
if device is not None:
x = x.to(device)
s = Phi(x)
if reduced_axes is not None:
for axis in reduced_axes:
assert axis < len(s.shape)
s = s.mean(dim=axis, keepdim=True)
sample_s.append(s.tolist())
sample_s = np.array(sample_s)
return np.std(sample_s, axis=0)
class Interpreter(nn.Module):
""" Interpreter for interpreting one instance. The method is from
paper `Towards a Deep and Unified Understanding of Deep Neural
Models in NLP <http://proceedings.mlr.press/v97/guan19a/guan19a.pdf>`_
It will minimize the loss in Eqn.(7):
$L(sigma) = (||Phi(embed + epsilon) - Phi(embed)||_2^2)
// (regularization^2) - rate * log(sigma)$
In our implementation, we use reparameterization trick to represent
epsilon ~ N(0, sigma^2 I), i.e. epsilon = scale * ratio * noise.
Where noise ~ N(0, 1), scale is a hyper-parameter that controls the
maximum value of sigma^2, and ratio in (0, 1) is the learnable parameter.
"""
def __init__(self, x, Phi, scale=0.5, rate=0.1, regularization=None, words=None):
""" Initialize an interpreter class.
Args:
x (torch.FloatTensor): Of shape ``[length, dimension]``.
The $x$ we studied. i.e. The input word embeddings.
Phi (function):
The $Phi$ we studied. A function whose input is x (the first
parameter) and returns a hidden state (of type
``torch.FloatTensor``, of any shape)
scale (float):
The maximum size of sigma. A hyper-parameter in
reparameterization trick. The recommended value is
10 * Std[word_embedding_weight], where word_embedding_weight
is the word embedding weight in the model interpreted. Larger
scale will give more salient result, Default: 0.5.
rate (float):
A hyper-parameter that balance the MLE Loss and Maximum
Entropy Loss. Larger rate will result in larger information
loss. Default: 0.1.
regularization (Torch.FloatTensor or np.ndarray):
The regularization term, should be of the same shape as
(or broadcastable to) the output of Phi. If None is given,
method will use the output to regularize itself.
Default: None.
words (List[Str]):
The input sentence, used for visualizing. If None is given,
method will not show the words.
"""
super(Interpreter, self).__init__()
self.s = x.size(0)
self.d = x.size(1)
self.ratio = nn.Parameter(torch.randn(self.s, 1), requires_grad=True)
self.scale = scale
self.rate = rate
self.x = x
self.Phi = Phi
self.regular = regularization
if self.regular is not None:
self.regular = nn.Parameter(torch.tensor(self.regular).to(x), requires_grad=False)
self.words = words
if self.words is not None:
assert self.s == len(
words
), "the length of x should be of the same with the lengh of words"
def forward(self):
""" Calculate loss:
$L(sigma) = (||Phi(embed + epsilon) - Phi(embed)||_2^2)
// (regularization^2) - rate * log(sigma)$
Returns:
torch.FloatTensor: a scalar, the target loss.
"""
ratios = torch.sigmoid(self.ratio) # S * 1
x = self.x + 0.0 # S * D
x_tilde = x + ratios * torch.randn(self.s, self.d).to(x.device) * self.scale # S * D
s = self.Phi(x) # D or S * D
s_tilde = self.Phi(x_tilde)
loss = (s_tilde - s) ** 2
if self.regular is not None:
loss = torch.mean(loss / self.regular ** 2)
else:
loss = torch.mean(loss) / torch.mean(s ** 2)
return loss - torch.mean(torch.log(ratios)) * self.rate
def optimize(self, iteration=5000, lr=0.01, show_progress=False):
""" Optimize the loss function
Args:
iteration (int): Total optimizing iteration
lr (float): Learning rate
show_progress (bool): Whether to show the learn progress
"""
minLoss = None
state_dict = None
optimizer = optim.Adam(self.parameters(), lr=lr)
self.train()
func = (lambda x: x) if not show_progress else tqdm
for _ in func(range(iteration)):
optimizer.zero_grad()
loss = self()
loss.backward()
optimizer.step()
if minLoss is None or minLoss > loss:
state_dict = {k: self.state_dict()[k] + 0.0 for k in self.state_dict().keys()}
minLoss = loss
self.eval()
self.load_state_dict(state_dict)
def get_sigma(self):
""" Calculate and return the sigma
Returns:
np.ndarray: of shape ``[seqLen]``, the ``sigma``.
"""
ratios = torch.sigmoid(self.ratio) # S * 1
return ratios.detach().cpu().numpy()[:, 0] * self.scale
def visualize(self):
""" Visualize the information loss of every word.
"""
sigma_ = self.get_sigma()
_, ax = plt.subplots()
im = ax.imshow([sigma_], cmap="GnBu_r")
ax.set_xticks(range(self.s))
ax.set_xticklabels(self.words)
ax.set_yticks([0])
ax.set_yticklabels([""])
plt.colorbar(im, orientation="horizontal")
plt.tight_layout()
plt.show()
|
rest-service/manager_rest/update_rest_db_config.py
|
ilan-WS/cloudify-manager
| 124 |
107496
|
<filename>rest-service/manager_rest/update_rest_db_config.py
#!/opt/manager/env/bin/python
import argparse
import grp
import json
import logging
import os
import pwd
import re
import shutil
import sys
import yaml
def _copy(src, dest, username, groupname):
"""Copy src to dest and chown to uid:gid"""
shutil.copyfile(src, dest)
uid = pwd.getpwnam(username).pw_uid
gid = grp.getgrnam(groupname).gr_gid
os.chown(dest, uid, gid)
return dest
def _find_db_servers(haproxy_cfg):
"""Parse the haproxy config
:param haproxy_cfg: file object containing the haproxy config
:return: list of database server addresses
"""
config_content = haproxy_cfg.read()
logging.debug('Loaded haproxy config: %d bytes', len(config_content))
server_lines = re.findall(
r'server postgresql_.*$', config_content, re.MULTILINE)
server_addrs = [line.split()[2] for line in server_lines]
logging.info('Found %d servers in the haproxy config', len(server_lines))
logging.debug('DB servers: %s', server_addrs)
return [addr.partition(':')[0] for addr in server_addrs]
def _format_db_urls(rest_config, params, db):
params = '&'.join('{0}={1}'.format(k, v) for k, v in params.items() if v)
for host in rest_config['postgresql_host']:
yield (
'postgres://{username}:{password}@{host}:{port}/{db}?{params}'
.format(
username=rest_config['postgresql_username'],
password=rest_config['postgresql_password'],
host=host,
port=5432,
db=db,
params=params
)
)
def _update_stage_conf(rest_config, commit):
logging.debug('Loading stage config...')
try:
with open('/opt/cloudify-stage/conf/app.json') as f:
stage_conf = json.load(f)
except IOError as e:
raise RuntimeError('Cannot open Stage config: {0}'.format(e))
postgres_ca = '/opt/cloudify-stage/conf/postgres_ca.crt'
if commit:
_copy(
rest_config['postgresql_ca_cert_path'],
postgres_ca,
'stage_user',
'cfyuser'
)
stage_conf['db']['options']['dialectOptions'].update({
'ssl': {
'rejectUnauthorized': True,
'ca': postgres_ca
}
})
if rest_config.get('postgresql_ssl_key_path'):
postgres_cert = '/opt/cloudify-stage/conf/postgres.crt'
postgres_key = '/opt/cloudify-stage/conf/postgres.key'
if commit:
_copy(
rest_config['postgresql_ssl_cert_path'],
postgres_cert,
'stage_user',
'cfyuser'
)
_copy(
rest_config['postgresql_ssl_key_path'],
postgres_key,
'stage_user',
'cfyuser'
)
stage_conf['db']['options']['dialectOptions']['ssl'].update({
'cert': postgres_cert,
'key': postgres_key,
})
else:
postgres_cert = None
postgres_key = None
url_params = {
'sslcert': postgres_cert,
'sslkey': postgres_key,
'sslmode': 'verify-full',
'sslrootcert': postgres_ca
}
stage_conf['db']['url'] = list(_format_db_urls(
rest_config, url_params, db='stage'))
serialized = json.dumps(stage_conf, indent=4, sort_keys=True)
logging.info('Stage config:')
print(serialized)
if commit:
with open('/opt/cloudify-stage/conf/app.json', 'w') as f:
f.write(serialized)
def _update_composer_conf(rest_config, commit):
logging.debug('Loading composer config...')
try:
with open('/opt/cloudify-composer/backend/conf/prod.json') as f:
composer_conf = json.load(f)
except IOError as e:
raise RuntimeError('Cannot open Composer config: {0}'.format(e))
postgres_ca = '/opt/cloudify-composer/backend/conf/postgres_ca.crt'
if commit:
_copy(
rest_config['postgresql_ca_cert_path'],
postgres_ca,
'composer_user',
'cfyuser'
)
composer_conf['db']['options']['dialectOptions'].update({
'ssl': {
'rejectUnauthorized': True,
'ca': postgres_ca
}
})
if rest_config.get('postgresql_ssl_key_path'):
postgres_cert = '/opt/cloudify-composer/backend/conf/postgres.crt'
postgres_key = '/opt/cloudify-composer/backend/conf/postgres.key'
if commit:
_copy(
rest_config['postgresql_ssl_cert_path'],
postgres_cert,
'composer_user',
'cfyuser'
)
_copy(
rest_config['postgresql_ssl_key_path'],
postgres_key,
'composer_user',
'cfyuser'
)
composer_conf['db']['options']['dialectOptions']['ssl'].update({
'cert': postgres_cert,
'key': postgres_key,
})
else:
postgres_cert = None
postgres_key = None
url_params = {
'sslcert': postgres_cert,
'sslkey': postgres_key,
'sslmode': 'verify-full',
'sslrootcert': postgres_ca
}
composer_conf['db']['url'] = list(_format_db_urls(
rest_config, url_params, db='composer'))
serialized = json.dumps(composer_conf, indent=4, sort_keys=True)
logging.info('Composer config:')
print(serialized)
if commit:
with open('/opt/cloudify-composer/backend/conf/prod.json', 'w') as f:
f.write(serialized)
def update_db_address(restservice_config_path, commit):
logging.debug('Loading haproxy config...')
try:
with open('/etc/haproxy/haproxy.cfg') as f:
dbs = _find_db_servers(f)
except IOError:
logging.info('Cannot open HAProxy config: nothing to do')
return
if not dbs:
logging.info("No DB addresses configured, nothing to do")
return
logging.debug('Loading restservice config')
with open(restservice_config_path) as f:
rest_config = yaml.safe_load(f)
logging.debug('Loaded restservice config')
db_ca = '/etc/cloudify/ssl/db_ca.crt'
rest_config['postgresql_ca_cert_path'] = db_ca
if commit:
_copy(
'/etc/haproxy/ca.crt',
db_ca,
'cfyuser',
'cfyuser'
)
rest_config['postgresql_host'] = dbs
_update_stage_conf(rest_config, commit)
_update_composer_conf(rest_config, commit)
serialized = json.dumps(rest_config, indent=4, sort_keys=True)
print(serialized)
if commit:
with open(restservice_config_path, 'w') as f:
f.write(serialized)
logging.info('Stored the new config in %s', restservice_config_path)
else:
logging.info('Dry-run: did not store the new config')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Rewrite RESTservice db config to not use HAProxy")
parser.add_argument(
'--restservice-config-path',
default='/opt/manager/cloudify-rest.conf')
parser.add_argument('--commit', action='store_true',
help='Commit changes, otherwise dryrun')
parser.add_argument('--verbose', '-v', action='count', default=0)
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO,
format='%(levelname)s %(asctime)s %(message)s',
stream=sys.stderr)
if os.geteuid() != 0:
raise RuntimeError('This script must be run as root!')
update_db_address(args.restservice_config_path, args.commit)
|
iwant/core/messagebaker.py
|
nirvik/iWant
| 323 |
107514
|
<gh_stars>100-1000
import json
from functools import wraps
import time_uuid
from constants import INDEXED, LEADER_NOT_READY,\
ERROR_LIST_ALL_FILES, LEADER,\
HASH_DUMP, FILE_SYS_EVENT, SEARCH_REQ, SEARCH_RES, \
LOOKUP, IWANT_PEER_FILE, PEER_LOOKUP_RESPONSE,\
SEND_PEER_DETAILS, FILE_DETAILS_RESP, INIT_FILE_REQ, \
FILE_TO_BE_DOWNLOADED, DEAD,\
NEW_PEER, BCAST_LEDGER, NEW_LEADER, REMOVE_LEADER, \
SECRET_VAL, HANDLE_PONG, FACE_OFF,\
RE_ELECTION, ALIVE, HANDLE_ALIVE,\
HANDLE_PING, REQ_CHUNK, END_GAME, FILE_CONFIRMATION_MESSAGE,\
INTERESTED, UNCHOKE, PEER_DEAD, CHANGE, SHARE, NEW_DOWNLOAD_FOLDER_RES,\
NEW_SHARED_FOLDER_RES, HASH_IDENTITY_RESPONSE, GET_HASH_IDENTITY, HASH_NOT_PRESENT
def finishing(func):
@wraps(func)
def jsonify(key, **kwargs):
_EOL = '\r'
return json.dumps(func(key, **kwargs)) + _EOL
return jsonify
@finishing
def bake(key, **kwargs):
'''
This utility is for crafting `action messages` to be sent across the network.
The `action_msg` contains `type` and `payload`
'''
action_msg = {}
action_msg['type'] = key
action_msg['payload'] = None
payload = {}
def _craft_new_peer_msg():
# print NEW_PEER, kwargs
try:
payload['identity'] = kwargs['identity'].hex
except:
payload['identity'] = None
try:
payload['leader_id'] = kwargs['leader_id'].hex
except:
payload['leader_id'] = None
action_msg['payload'] = payload
return action_msg
def _craft_remove_leader_msg():
# print REMOVE_LEADER
payload['leader_id'] = kwargs['leader_id'].hex
action_msg['payload'] = payload
return action_msg
def _craft_re_election_msg():
# print RE_ELECTION
payload['election_id'] = kwargs['election_id']
action_msg['payload'] = payload
return action_msg
def _craft_handle_pong_msg():
# print HANDLE_PONG
payload['secret_value'] = kwargs['secret_value']
action_msg['payload'] = payload
return action_msg
def _craft_new_leader_msg():
# print NEW_LEADER
payload['leader_id'] = kwargs['leader_id'].hex
payload['election_id'] = kwargs['election_id']
payload['secret_value'] = kwargs['secret_value']
action_msg['payload'] = payload
return action_msg
def _craft_alive_msg():
# print ALIVE
payload['election_id'] = kwargs['election_id']
action_msg['payload'] = payload
return action_msg
def _craft_handle_alive_msg():
# print HANDLE_ALIVE
payload['election_id'] = kwargs['election_id']
action_msg['payload'] = payload
return action_msg
def _craft_handle_ping_msg():
payload['ping'] = kwargs['ping']
action_msg['payload'] = payload
return action_msg
def _craft_bcast_ledger_msg():
try:
payload['leader_id'] = kwargs['leader_id'].hex
except AttributeError:
payload['leader_id'] = None
ledger = {}
for uuid, value in kwargs['ledger'].iteritems():
ledger[uuid.hex] = value
payload['ledger'] = ledger
payload['secret_value'] = kwargs['secret_value']
action_msg['payload'] = payload
return action_msg
def _craft_secret_val_msg():
# print SECRET_VAL
payload['secret_value'] = kwargs['secret_value']
action_msg['payload'] = payload
return action_msg
def _craft_face_off_msg():
# print FACE_OFF
payload['with_leader'] = kwargs['with_leader']
action_msg['payload'] = payload
return action_msg
def _craft_dead_msg():
try:
payload['dead_uuid'] = kwargs['dead_uuid'].hex
except AttributeError:
payload['dead_uuid'] = None
payload['secret_value'] = kwargs['secret_value']
action_msg['payload'] = payload
return action_msg
# SERVER MESSAGES
def _craft_unchoke_msg():
payload['unchoke'] = kwargs['unchoke']
action_msg['payload'] = payload
return action_msg
def _craft_error_list_all_files_msg():
payload['reason'] = kwargs['reason']
action_msg['payload'] = payload
return action_msg
def _craft_leader_not_ready_msg():
payload['reason'] = kwargs['reason']
action_msg['payload'] = payload
return action_msg
def _craft_search_response_msg():
payload['search_query_response'] = kwargs['search_query_response']
action_msg['payload'] = payload
return action_msg
def _craft_peer_lookup_response_msg():
payload['peer_lookup_response'] = kwargs['peer_lookup_response']
action_msg['payload'] = payload
return action_msg
def _craft_hash_dump_msg():
payload['identity'] = kwargs['identity'].hex
payload['operation'] = kwargs['operation']
action_msg['payload'] = payload
return action_msg
def _craft_init_file_req_msg():
payload['filehash'] = kwargs['filehash']
action_msg['payload'] = payload
return action_msg
def _craft_leader_msg():
payload['leader'] = kwargs['leader']
action_msg['payload'] = payload
return action_msg
def _craft_peer_dead_msg():
try:
payload['dead_uuid'] = kwargs['dead_uuid'].hex
except AttributeError:
payload['dead_uuid'] = None
action_msg['payload'] = payload
return action_msg
def _craft_file_sys_event_msg():
payload['ADD'] = kwargs['ADD']
payload['DEL'] = kwargs['DEL']
payload['shared_folder'] = kwargs['shared_folder']
action_msg['payload'] = payload
return action_msg
def _craft_search_req_msg():
payload['search_query'] = kwargs['search_query']
action_msg['payload'] = payload
return action_msg
def _craft_lookup_msg():
payload['search_query'] = kwargs['search_query']
action_msg['payload'] = payload
return action_msg
def _craft_iwant_peer_file_msg():
payload['filehash'] = kwargs['filehash']
action_msg['payload'] = payload
return action_msg
def _craft_send_peer_details_msg():
payload['filehash'] = kwargs['filehash']
action_msg['payload'] = payload
return action_msg
def _craft_indexed_msg():
payload['ADD'] = kwargs['ADD']
payload['DEL'] = kwargs['DEL']
payload['shared_folder'] = kwargs['shared_folder']
action_msg['payload'] = payload
return action_msg
def _craft_req_chunk_msg():
payload['piece_data'] = kwargs['piece_data']
action_msg['payload'] = payload
# print action_msg
return action_msg
def _craft_end_game_msg():
payload['end_game'] = kwargs['end_game']
action_msg['payload'] = payload
return action_msg
def _craft_file_details_resp():
pass
def _craft_file_to_be_downloaded_msg():
# payload['filesize'] = kwargs['filesize']
# payload['filename'] = kwargs['filename']
payload['message'] = kwargs['message']
action_msg['payload'] = payload
return action_msg
def _craft_interested_msg():
payload['filehash'] = kwargs['filehash']
action_msg['payload'] = payload
return action_msg
def _craft_file_confirmation_message():
payload['piecehashes'] = kwargs['piecehashes']
action_msg['payload'] = payload
return action_msg
def _craft_change_download_path_msg():
payload['download_folder'] = kwargs['download_folder']
action_msg['payload'] = payload
return action_msg
def _craft_share_new_folder_msg():
payload['shared_folder'] = kwargs['shared_folder']
action_msg['payload'] = payload
return action_msg
def _craft_new_download_folder_response_msg():
payload['download_folder_response'] = kwargs[
'download_folder_response']
action_msg['payload'] = payload
return action_msg
def _craft_new_shared_folder_response_msg():
payload['shared_folder_response'] = kwargs['shared_folder_response']
action_msg['payload'] = payload
return action_msg
def _craft_get_hash_identity_msg():
payload['checksum'] = kwargs['checksum']
action_msg['payload'] = payload
return action_msg
def _craft_hash_identity_response_msg():
payload['file_structure_response'] = kwargs['file_structure_response']
action_msg['payload'] = payload
return action_msg
def _craft_hash_not_present_msg():
payload['reason'] = kwargs['reason']
action_msg['payload'] = payload
return action_msg
dispatcher = {
NEW_PEER: _craft_new_peer_msg,
REMOVE_LEADER: _craft_remove_leader_msg,
RE_ELECTION: _craft_re_election_msg,
HANDLE_PONG: _craft_handle_pong_msg,
NEW_LEADER: _craft_new_leader_msg,
ALIVE: _craft_alive_msg,
HANDLE_PING: _craft_handle_ping_msg,
HANDLE_ALIVE: _craft_handle_alive_msg,
BCAST_LEDGER: _craft_bcast_ledger_msg,
SECRET_VAL: _craft_secret_val_msg,
FACE_OFF: _craft_face_off_msg,
DEAD: _craft_dead_msg,
UNCHOKE: _craft_unchoke_msg,
ERROR_LIST_ALL_FILES: _craft_error_list_all_files_msg,
LEADER_NOT_READY: _craft_leader_not_ready_msg,
SEARCH_RES: _craft_search_response_msg,
HASH_DUMP: _craft_hash_dump_msg,
INIT_FILE_REQ: _craft_init_file_req_msg,
LEADER: _craft_leader_msg,
PEER_DEAD: _craft_peer_dead_msg,
FILE_SYS_EVENT: _craft_file_sys_event_msg,
SEARCH_REQ: _craft_search_req_msg,
LOOKUP: _craft_lookup_msg,
IWANT_PEER_FILE: _craft_iwant_peer_file_msg,
SEND_PEER_DETAILS: _craft_send_peer_details_msg,
PEER_LOOKUP_RESPONSE: _craft_peer_lookup_response_msg,
INDEXED: _craft_indexed_msg,
REQ_CHUNK: _craft_req_chunk_msg,
END_GAME: _craft_end_game_msg,
INTERESTED: _craft_interested_msg,
FILE_DETAILS_RESP: _craft_file_details_resp,
FILE_CONFIRMATION_MESSAGE: _craft_file_confirmation_message,
FILE_TO_BE_DOWNLOADED: _craft_file_to_be_downloaded_msg,
CHANGE: _craft_change_download_path_msg,
SHARE: _craft_share_new_folder_msg,
NEW_DOWNLOAD_FOLDER_RES: _craft_new_download_folder_response_msg,
NEW_SHARED_FOLDER_RES: _craft_new_shared_folder_response_msg,
GET_HASH_IDENTITY: _craft_get_hash_identity_msg,
HASH_IDENTITY_RESPONSE: _craft_hash_identity_response_msg,
HASH_NOT_PRESENT: _craft_hash_not_present_msg
}
return dispatcher[key]()
def unbake(message=None):
json_msg = json.loads(message)
if 'leader_id' in json_msg['payload']:
leader_uuid = json_msg['payload']['leader_id']
if leader_uuid is not None:
json_msg['payload']['leader_id'] = time_uuid.TimeUUID(leader_uuid)
if 'identity' in json_msg['payload']:
identity_uuid = json_msg['payload']['identity']
if identity_uuid is not None:
json_msg['payload']['identity'] = time_uuid.TimeUUID(identity_uuid)
if 'dead_uuid' in json_msg['payload']:
dead_uuid = json_msg['payload']['dead_uuid']
if dead_uuid is not None:
json_msg['payload']['dead_uuid'] = time_uuid.TimeUUID(dead_uuid)
if 'ledger' in json_msg['payload']:
ledger = {}
ledger_response = json_msg['payload']['ledger']
if ledger_response:
for uuid, values in ledger_response.iteritems():
ledger[time_uuid.TimeUUID(uuid)] = values
json_msg['payload']['ledger'] = ledger
action_dispatcher, action_payload = json_msg['type'], json_msg['payload']
return action_dispatcher, action_payload
|
tensorflow/contrib/slim/python/slim/data/dataset.py
|
connectthefuture/tensorflow
| 101 |
107524
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition of a Dataset.
A Dataset is a collection of several components: (1) a list of data sources
(2) a Reader class that can read those sources and returns possibly encoded
samples of data (3) a decoder that decodes each sample of data provided by the
reader (4) the total number of samples and (5) an optional dictionary mapping
the list of items returns to a description of those items.
Data can be loaded from a dataset specification using a dataset_data_provider:
dataset = CreateMyDataset(...)
provider = dataset_data_provider.DatasetDataProvider(
dataset, shuffle=False)
image, label = provider.get(['image', 'label'])
See slim.data.dataset_data_provider for additional examples.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class Dataset(object):
"""Represents a Dataset specification."""
def __init__(self, data_sources, reader, decoder, num_samples,
items_to_descriptions, **kwargs):
"""Initializes the dataset.
Args:
data_sources: A list of files that make up the dataset.
reader: The reader class, a subclass of BaseReader such as TextLineReader
or TFRecordReader.
decoder: An instance of a data_decoder.
num_samples: The number of samples in the dataset.
items_to_descriptions: A map from the items that the dataset provides to
the descriptions of those items.
**kwargs: Any remaining dataset-specific fields.
"""
kwargs['data_sources'] = data_sources
kwargs['reader'] = reader
kwargs['decoder'] = decoder
kwargs['num_samples'] = num_samples
kwargs['items_to_descriptions'] = items_to_descriptions
self.__dict__.update(kwargs)
|
users/signals.py
|
ThusharaX/mumbleapi
| 187 |
107538
|
from django.db.models.signals import post_save, pre_save, post_delete
from django.contrib.auth.models import User
from .models import UserProfile
def create_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(
user=instance,
name=instance.username,
username=instance.username,
#email=instance.email,
)
print('Profile Created!')
def update_profile(sender, instance, created, **kwargs):
user_profile, _ = UserProfile.objects.get_or_create(user=instance)
if created == False:
user_profile.username = instance.username
#instance.userprofile.email = instance.email
user_profile.save()
print('Profile updated!')
post_save.connect(create_profile, sender=User)
post_save.connect(update_profile, sender=User)
|
tests/agents/turtleagent/finish.py
|
LaudateCorpus1/holodeck
| 518 |
107624
|
"""
Script that will navigate the "maze" in testworld
"""
def navigate(env, callback):
for _ in range(100):
callback(env.step([0, 0]))
for _ in range(11):
callback(env.step([0, -30]))
for _ in range(10):
callback(env.step([0, 0]))
for _ in range(123):
callback(env.step([80, 0]))
for _ in range(26):
callback(env.step([0, 0]))
for _ in range(10):
callback(env.step([0, 30]))
for _ in range(100):
callback(env.step([150, 0]))
for _ in range(100):
callback(env.step([0, 0]))
for _ in range(10):
callback(env.step([0, 30]))
for _ in range(7):
callback(env.step([0, 0]))
for _ in range(100):
callback(env.step([100, 0]))
for _ in range(30):
callback(env.step([0, 0]))
|
pytest-webdriver/tests/unit/test_webdriver.py
|
RaiVaibhav/pytest-plugins
| 167 |
107627
|
from mock import Mock, sentinel, patch
import pytest
import selenium
import pytest_webdriver
def test_browser_to_use():
caps = Mock(CHROME=sentinel.chrome, UNKNOWN=None)
wd = Mock(DesiredCapabilities = Mock(return_value = caps))
assert pytest_webdriver.browser_to_use(wd, 'chrome') == sentinel.chrome
with pytest.raises(ValueError):
pytest_webdriver.browser_to_use(wd, 'unknown')
|
code/general.py
|
hanseungwook/Stylized-ImageNet
| 443 |
107645
|
<reponame>hanseungwook/Stylized-ImageNet<filename>code/general.py
#!/usr/env/python
"""
General definitions and paths
"""
import argparse
import os
from os.path import join as pjoin
###########################################################
# SETTINGS THAT NEED TO BE CHANGED BY USER
###########################################################
# TODO Dear user, please change these paths:
IMAGENET_PATH = "/gpfs01/bethge/data/imagenet-raw/raw-data/" # TODO specify ImageNet path
STYLIZED_IMAGENET_PATH = "/gpfs01/bethge/data/imagenet-styletransfer-v2/" # TODO specify target path: where should Stylized-ImageNet be stored?
###########################################################
# SETTINGS THAT USUALLY DON'T NEED TO BE CHANGED
###########################################################
IMG_SIZE = 224
ADAIN_RAW_PAINTINGS_DIR = "./paintings_raw/"
ADAIN_EXCLUDED_PAINTINGS_DIR = "./paintings_excluded/"
ADAIN_PREPROCESSED_PAINTINGS_DIR = "./paintings_preprocessed/"
ADAIN_MODEL_DIR = "./models/"
ADAIN_VGG_PATH = pjoin(ADAIN_MODEL_DIR, "vgg_normalised.pth")
ADAIN_DECODER_PATH = pjoin(ADAIN_MODEL_DIR, "decoder.pth")
assert os.path.exists(ADAIN_VGG_PATH)
assert os.path.exists(ADAIN_DECODER_PATH)
assert os.path.exists(IMAGENET_PATH)
assert os.path.exists(pjoin(IMAGENET_PATH, "train/"))
assert os.path.exists(pjoin(IMAGENET_PATH, "val/"))
def get_default_adain_args():
# parse arguments
parser = argparse.ArgumentParser()
# Basic options
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--vgg', type=str, default=ADAIN_VGG_PATH)
parser.add_argument('--decoder', type=str, default=ADAIN_DECODER_PATH)
# Additional options
parser.add_argument('--content_size', type=int, default=IMG_SIZE,
help='New (minimum) size for the content image, \
keeping the original size if set to 0')
parser.add_argument('--style_size', type=int, default=IMG_SIZE,
help='New (minimum) size for the style image, \
keeping the original size if set to 0')
parser.add_argument('--crop', action='store_true',
help='do center crop to create squared image')
parser.add_argument('--save_ext', default='.jpg',
help='The extension name of the output image')
# Advanced options
parser.add_argument('--preserve_color', action='store_true',
help='If specified, preserve color of the content image')
parser.add_argument('--alpha', type=float, default=1.0,
help='The weight that controls the degree of \
stylization. Should be between 0 and 1')
parser.add_argument(
'--style_interpolation_weights', type=str, default='',
help='The weight for blending the style of multiple style images')
args = parser.parse_args(args=[])
return args
|
scripts/dump_checkpoint_vars.py
|
ewpatton/fast-style-transfer-deeplearnjs
| 1,419 |
107649
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A script to dump tensorflow checkpoint variables to deeplearnjs.
This script takes a checkpoint file and writes all of the variables in the
checkpoint to a directory.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import re
import string
import tensorflow as tf
FLAGS = None
FILENAME_CHARS = string.ascii_letters + string.digits + '_'
def _var_name_to_filename(var_name):
chars = []
for c in var_name:
if c in FILENAME_CHARS:
chars.append(c)
elif c == '/':
chars.append('_')
return ''.join(chars)
def main():
chk_fpath = os.path.expanduser(FLAGS.checkpoint_file)
reader = tf.train.NewCheckpointReader(chk_fpath)
var_to_shape_map = reader.get_variable_to_shape_map()
output_dir = os.path.expanduser(FLAGS.output_dir)
tf.gfile.MakeDirs(output_dir)
manifest = {}
remove_vars_compiled_re = re.compile(FLAGS.remove_variables_regex)
var_filenames_strs = []
for name in var_to_shape_map:
if (FLAGS.remove_variables_regex and
re.match(remove_vars_compiled_re, name)) or name == 'global_step':
print('Ignoring ' + name)
continue
var_filename = _var_name_to_filename(name)
manifest[name] = {'filename': var_filename, 'shape': var_to_shape_map[name]}
print('Writing variable ' + name + '...')
tensor = reader.get_tensor(name)
with open(os.path.join(output_dir, var_filename), 'wb') as f:
f.write(tensor.tobytes())
var_filenames_strs.append("\"" + var_filename + "\"")
manifest_fpath = os.path.join(output_dir, 'manifest.json')
print('Writing manifest to ' + manifest_fpath)
with open(manifest_fpath, 'w') as f:
f.write(json.dumps(manifest, indent=2, sort_keys=True))
print('Done!')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_file',
type=str,
required=True,
help='Path to the model checkpoint')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='The output directory where to store the converted weights')
parser.add_argument(
'--remove_variables_regex',
type=str,
default='',
help='A regular expression to match against variable names that should '
'not be included')
FLAGS, unparsed = parser.parse_known_args()
if unparsed:
print('Error, unrecognized flags:', unparsed)
exit(-1)
main()
|
openaerostruct/common/atmos_comp.py
|
lamkina/OpenAeroStruct
| 114 |
107651
|
from collections import namedtuple
import numpy as np
from scipy.interpolate import Akima1DInterpolator as Akima
import openmdao.api as om
"""United States standard atmosphere 1976 tables, data obtained from http://www.digitaldutch.com/atmoscalc/index.htm"""
USatm1976Data = namedtuple("USatm1976Data", ["alt", "T", "P", "rho", "speed_of_sound", "viscosity"])
USatm1976Data.alt = np.array(
[
-1000,
0,
1000,
2000,
3000,
4000,
5000,
6000,
7000,
8000,
9000,
10000,
11000,
12000,
13000,
14000,
15000,
16000,
17000,
18000,
19000,
20000,
21000,
22000,
23000,
24000,
25000,
26000,
27000,
28000,
29000,
30000,
31000,
32000,
33000,
34000,
35000,
36000,
37000,
38000,
39000,
40000,
41000,
42000,
43000,
44000,
45000,
46000,
47000,
48000,
49000,
50000,
51000,
52000,
53000,
54000,
55000,
56000,
57000,
58000,
59000,
60000,
61000,
62000,
63000,
64000,
65000,
66000,
67000,
68000,
69000,
70000,
71000,
72000,
73000,
74000,
75000,
76000,
77000,
78000,
79000,
80000,
81000,
82000,
83000,
84000,
85000,
86000,
87000,
88000,
89000,
90000,
91000,
92000,
93000,
94000,
95000,
96000,
97000,
98000,
99000,
100000,
105000,
110000,
115000,
120000,
125000,
130000,
135000,
140000,
145000,
150000,
]
) # units='ft'
USatm1976Data.T = np.array(
[
522.236,
518.67,
515.104,
511.538,
507.972,
504.405,
500.839,
497.273,
493.707,
490.141,
486.575,
483.008,
479.442,
475.876,
472.31,
468.744,
465.178,
461.611,
458.045,
454.479,
450.913,
447.347,
443.781,
440.214,
436.648,
433.082,
429.516,
425.95,
422.384,
418.818,
415.251,
411.685,
408.119,
404.553,
400.987,
397.421,
393.854,
390.288,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
390.18,
390.729,
391.278,
391.826,
392.375,
392.923,
393.472,
394.021,
394.569,
395.118,
395.667,
396.215,
396.764,
397.313,
397.861,
398.41,
398.958,
399.507,
400.056,
400.604,
401.153,
401.702,
402.25,
402.799,
403.348,
403.896,
404.445,
404.994,
405.542,
406.091,
406.639,
407.188,
407.737,
408.285,
408.834,
411.59,
419.271,
426.952,
434.633,
442.314,
449.995,
457.676,
465.357,
473.038,
480.719,
]
) # units='degR'
USatm1976Data.P = np.array(
[
15.2348,
14.6959,
14.1726,
13.6644,
13.1711,
12.6923,
12.2277,
11.777,
11.3398,
10.9159,
10.5049,
10.1065,
9.7204,
9.34636,
8.98405,
8.63321,
8.29354,
7.96478,
7.64665,
7.33889,
7.4123,
6.75343,
6.47523,
6.20638,
5.94664,
5.69578,
5.45355,
5.21974,
4.9941,
4.77644,
4.56651,
4.36413,
4.16906,
3.98112,
3.8001,
3.6258,
3.45803,
3.29661,
3.14191,
2.99447,
2.85395,
2.72003,
2.59239,
2.47073,
2.35479,
2.24429,
2.13897,
2.0386,
1.94293,
1.85176,
1.76486,
1.68204,
1.60311,
1.52788,
1.45618,
1.38785,
1.32272,
1.26065,
1.20149,
1.14511,
1.09137,
1.04016,
0.991347,
0.944827,
0.900489,
0.858232,
0.817958,
0.779578,
0.743039,
0.708261,
0.675156,
0.643641,
0.613638,
0.585073,
0.557875,
0.531976,
0.507313,
0.483825,
0.461455,
0.440148,
0.419853,
0.400519,
0.382101,
0.364553,
0.347833,
0.331902,
0.31672,
0.302253,
0.288464,
0.275323,
0.262796,
0.250856,
0.239473,
0.228621,
0.218275,
0.20841,
0.199003,
0.190032,
0.181478,
0.173319,
0.165537,
0.158114,
0.12582,
0.10041,
0.08046,
0.064729,
0.0522725,
0.0423688,
0.0344637,
0.0281301,
0.0230369,
0.0189267,
]
) # units='psi'
USatm1976Data.rho = np.array(
[
0.00244752,
0.00237717,
0.00230839,
0.00224114,
0.00217539,
0.00211114,
0.00204834,
0.00198698,
0.00192704,
0.0018685,
0.00181132,
0.00175549,
0.00170099,
0.00164779,
0.00159588,
0.00154522,
0.00149581,
0.00144761,
0.00140061,
0.00135479,
0.00131012,
0.00126659,
0.00122417,
0.00118285,
0.0011426,
0.00110341,
0.00106526,
0.00102812,
0.000991984,
0.000956827,
0.000922631,
0.000889378,
0.00085705,
0.000825628,
0.000795096,
0.000765434,
0.000736627,
0.000708657,
0.000675954,
0.000644234,
0.000614002,
0.000585189,
0.000557728,
0.000531556,
0.000506612,
0.000482838,
0.00046018,
0.000438586,
0.000418004,
0.000398389,
0.000379694,
0.000361876,
0.000344894,
0.000328709,
0.000313284,
0.000298583,
0.000284571,
0.000271217,
0.00025849,
0.00024636,
0.000234799,
0.000223781,
0.000213279,
0.000203271,
0.000193732,
0.000184641,
0.000175976,
0.000167629,
0.000159548,
0.000151867,
0.000144566,
0.000137625,
0.000131026,
0.000124753,
0.000118788,
0.000113116,
0.000107722,
0.000102592,
9.77131e-05,
9.30725e-05,
8.86582e-05,
0.000084459,
8.04641e-05,
7.66632e-05,
7.30467e-05,
6.96054e-05,
6.63307e-05,
6.32142e-05,
6.02481e-05,
5.74249e-05,
5.47376e-05,
5.21794e-05,
4.97441e-05,
4.74254e-05,
4.52178e-05,
4.31158e-05,
0.000041114,
3.92078e-05,
3.73923e-05,
3.56632e-05,
3.40162e-05,
3.24473e-05,
2.56472e-05,
2.00926e-05,
1.58108e-05,
1.24948e-05,
9.9151e-06,
7.89937e-06,
6.3177e-06,
5.07154e-06,
4.08586e-06,
3.30323e-06,
]
) # units='slug/ft**3'
USatm1976Data.a = np.array(
[
1120.28,
1116.45,
1112.61,
1108.75,
1104.88,
1100.99,
1097.09,
1093.18,
1089.25,
1085.31,
1081.36,
1077.39,
1073.4,
1069.4,
1065.39,
1061.36,
1057.31,
1053.25,
1049.18,
1045.08,
1040.97,
1036.85,
1032.71,
1028.55,
1024.38,
1020.19,
1015.98,
1011.75,
1007.51,
1003.24,
998.963,
994.664,
990.347,
986.01,
981.655,
977.28,
972.885,
968.471,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.337,
969.017,
969.698,
970.377,
971.056,
971.735,
972.413,
973.091,
973.768,
974.445,
975.121,
975.797,
976.472,
977.147,
977.822,
978.496,
979.169,
979.842,
980.515,
981.187,
981.858,
982.53,
983.2,
983.871,
984.541,
985.21,
985.879,
986.547,
987.215,
987.883,
988.55,
989.217,
989.883,
990.549,
991.214,
994.549,
1003.79,
1012.94,
1022.01,
1031,
1039.91,
1048.75,
1057.52,
1066.21,
1074.83,
]
) # units='ft/s'
USatm1976Data.viscosity = np.array(
[
3.81e-07,
3.78e-07,
3.76e-07,
3.74e-07,
3.72e-07,
3.70e-07,
3.68e-07,
3.66e-07,
3.64e-07,
3.62e-07,
3.60e-07,
3.57e-07,
3.55e-07,
3.53e-07,
3.51e-07,
3.49e-07,
3.47e-07,
3.45e-07,
3.42e-07,
3.40e-07,
3.38e-07,
3.36e-07,
3.34e-07,
3.31e-07,
3.29e-07,
3.27e-07,
3.25e-07,
3.22e-07,
3.20e-07,
3.18e-07,
3.16e-07,
3.13e-07,
3.11e-07,
3.09e-07,
3.06e-07,
3.04e-07,
3.02e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
3.00e-07,
3.00e-07,
3.00e-07,
3.01e-07,
3.01e-07,
3.01e-07,
3.02e-07,
3.02e-07,
3.03e-07,
3.03e-07,
3.03e-07,
3.04e-07,
3.04e-07,
3.04e-07,
3.05e-07,
3.05e-07,
3.05e-07,
3.06e-07,
3.06e-07,
3.06e-07,
3.07e-07,
3.07e-07,
3.08e-07,
3.08e-07,
3.08e-07,
3.09e-07,
3.09e-07,
3.09e-07,
3.10e-07,
3.10e-07,
3.10e-07,
3.11e-07,
3.11e-07,
3.11e-07,
3.13e-07,
3.18e-07,
3.23e-07,
3.28e-07,
3.33e-07,
3.37e-07,
3.42e-07,
3.47e-07,
3.51e-07,
3.56e-07,
]
) # units='lbf*s/ft**2'
T_interp = Akima(USatm1976Data.alt, USatm1976Data.T)
P_interp = Akima(USatm1976Data.alt, USatm1976Data.P)
rho_interp = Akima(USatm1976Data.alt, USatm1976Data.rho)
a_interp = Akima(USatm1976Data.alt, USatm1976Data.a)
viscosity_interp = Akima(USatm1976Data.alt, USatm1976Data.viscosity)
T_interp_deriv = T_interp.derivative(1)
P_interp_deriv = P_interp.derivative(1)
rho_interp_deriv = rho_interp.derivative(1)
a_interp_deriv = a_interp.derivative(1)
viscosity_interp_deriv = viscosity_interp.derivative(1)
class AtmosComp(om.ExplicitComponent):
def setup(self):
self.add_input("altitude", val=1.0, units="ft")
self.add_input("Mach_number", val=1.0)
self.add_output("T", val=1.0, units="degR")
self.add_output("P", val=1.0, units="psi")
self.add_output("rho", val=1.0, units="slug/ft**3")
self.add_output("speed_of_sound", val=1.0, units="ft/s")
self.add_output("mu", val=1.0, units="lbf*s/ft**2")
self.add_output("v", val=1.0, units="ft/s")
self.declare_partials(["T", "P", "rho", "speed_of_sound", "mu", "v"], "altitude")
self.declare_partials("v", "Mach_number")
def compute(self, inputs, outputs):
outputs["T"] = T_interp(inputs["altitude"])
outputs["P"] = P_interp(inputs["altitude"])
outputs["rho"] = rho_interp(inputs["altitude"])
outputs["speed_of_sound"] = a_interp(inputs["altitude"])
outputs["mu"] = viscosity_interp(inputs["altitude"])
outputs["v"] = outputs["speed_of_sound"] * inputs["Mach_number"]
def compute_partials(self, inputs, partials):
partials["T", "altitude"] = T_interp_deriv(inputs["altitude"])
partials["P", "altitude"] = P_interp_deriv(inputs["altitude"])
partials["rho", "altitude"] = rho_interp_deriv(inputs["altitude"])
partials["speed_of_sound", "altitude"] = a_interp_deriv(inputs["altitude"])
partials["mu", "altitude"] = viscosity_interp_deriv(inputs["altitude"])
partials["v", "altitude"] = a_interp_deriv(inputs["altitude"]) * inputs["Mach_number"]
partials["v", "Mach_number"] = a_interp(inputs["altitude"])
|
test/lang/c/test_synthesis.py
|
rakati/ppci-mirror
| 161 |
107652
|
import unittest
import io
from ppci import ir
from ppci.irutils import verify_module
from ppci.lang.c import CBuilder
from ppci.lang.c.options import COptions
from ppci.arch.example import ExampleArch
from ppci.lang.c import CSynthesizer
class CSynthesizerTestCase(unittest.TestCase):
def test_hello(self):
""" Convert C to Ir, and then this IR to C """
src = r"""
void printf(char*);
void main(int b) {
printf("Hello" "world\n");
}
"""
arch = ExampleArch()
builder = CBuilder(arch.info, COptions())
f = io.StringIO(src)
ir_module = builder.build(f, None)
assert isinstance(ir_module, ir.Module)
verify_module(ir_module)
synthesizer = CSynthesizer()
synthesizer.syn_module(ir_module)
if __name__ == "__main__":
unittest.main()
|
deepvariant/realigner/python/ssw_misc_test.py
|
serge2016/deepvariant
| 2,553 |
107661
|
# Copyright 2017 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from absl.testing import absltest
import six
from deepvariant.realigner.python import ssw
def p(obj):
for x in dir(obj):
if not x.startswith('_'):
print(x + ':' + repr(getattr(obj, x, '')))
class SswGccTest(absltest.TestCase):
"""Tests for the wrapped SSW aligner in a way that fails with gcc5.4."""
def test_short(self):
"""Test very short strings."""
ref = 'tttt'
query = 'ttAtt'
match = 4
mismatch = 2
gap_extend_penalty = 2
gap_open_penalty = 4
aligner = ssw.Aligner.construct(
match_score=match,
mismatch_penalty=mismatch,
gap_opening_penalty=gap_open_penalty,
gap_extending_penalty=gap_extend_penalty)
filter_ = ssw.Filter()
length = aligner.set_reference_sequence(ref)
self.assertLen(ref, length)
alignment = aligner.align(query, filter_)
p(alignment)
self.assertEqual(six.b('2=1I2='), alignment.cigar_string)
def test_longer(self):
"""Test longer strings, so the second-best alignment is considered."""
ref = 'TTTTGGGGGGGGGGGGG'
query = 'TTATTGGGGGGGGGGGGG'
match = 4
mismatch = 2
gap_extend_penalty = 2
gap_open_penalty = 4
aligner = ssw.Aligner.construct(
match_score=match,
mismatch_penalty=mismatch,
gap_opening_penalty=gap_open_penalty,
gap_extending_penalty=gap_extend_penalty)
filter_ = ssw.Filter()
length = aligner.set_reference_sequence(ref)
self.assertLen(ref, length)
alignment = aligner.align(query, filter_)
p(alignment)
self.assertEqual(six.b('2=1I15='), alignment.cigar_string)
if __name__ == '__main__':
absltest.main()
|
Radar/radar_angle_radius_axis.py
|
pyecharts/pyecharts_gallery
| 759 |
107663
|
<filename>Radar/radar_angle_radius_axis.py
from pyecharts import options as opts
from pyecharts.charts import Radar
data = [{"value": [4, -4, 2, 3, 0, 1], "name": "预算分配"}]
c_schema = [
{"name": "销售", "max": 4, "min": -4},
{"name": "管理", "max": 4, "min": -4},
{"name": "技术", "max": 4, "min": -4},
{"name": "客服", "max": 4, "min": -4},
{"name": "研发", "max": 4, "min": -4},
{"name": "市场", "max": 4, "min": -4},
]
c = (
Radar()
.set_colors(["#4587E7"])
.add_schema(
schema=c_schema,
shape="circle",
center=["50%", "50%"],
radius="80%",
angleaxis_opts=opts.AngleAxisOpts(
min_=0,
max_=360,
is_clockwise=False,
interval=5,
axistick_opts=opts.AxisTickOpts(is_show=False),
axislabel_opts=opts.LabelOpts(is_show=False),
axisline_opts=opts.AxisLineOpts(is_show=False),
splitline_opts=opts.SplitLineOpts(is_show=False),
),
radiusaxis_opts=opts.RadiusAxisOpts(
min_=-4,
max_=4,
interval=2,
splitarea_opts=opts.SplitAreaOpts(
is_show=True, areastyle_opts=opts.AreaStyleOpts(opacity=1)
),
),
polar_opts=opts.PolarOpts(),
splitarea_opt=opts.SplitAreaOpts(is_show=False),
splitline_opt=opts.SplitLineOpts(is_show=False),
)
.add(
series_name="预算",
data=data,
areastyle_opts=opts.AreaStyleOpts(opacity=0.1),
linestyle_opts=opts.LineStyleOpts(width=1),
)
.render("radar_angle_radius_axis.html")
)
|
tasks/cluster_agent_cloudfoundry.py
|
Jeremyyang920/datadog-agent
| 1,611 |
107671
|
"""
Cluster Agent for Cloud Foundry tasks
"""
import os
from invoke import task
from .build_tags import get_default_build_tags
from .cluster_agent_helpers import build_common, clean_common, refresh_assets_common, version_common
# constants
BIN_PATH = os.path.join(".", "bin", "datadog-cluster-agent-cloudfoundry")
@task
def build(ctx, rebuild=False, build_include=None, build_exclude=None, race=False, development=True, skip_assets=False):
"""
Build Cluster Agent for Cloud Foundry
Example invokation:
inv cluster-agent-cloudfoundry.build
"""
build_common(
ctx,
BIN_PATH,
get_default_build_tags(build="cluster-agent-cloudfoundry"),
"-cloudfoundry",
rebuild,
build_include,
build_exclude,
race,
development,
skip_assets,
)
@task
def refresh_assets(ctx, development=True):
"""
Clean up and refresh cluster agent's assets and config files
"""
refresh_assets_common(ctx, BIN_PATH, [], development)
@task
def integration_tests(ctx, install_deps=False, race=False, remote_docker=False): # noqa: U100
"""
Run integration tests for cluster-agent-cloudfoundry
"""
pass # TODO
@task
def clean(ctx):
"""
Remove temporary objects and binary artifacts
"""
clean_common(ctx, "datadog-cluster-agent")
@task
def version(ctx, url_safe=False, git_sha_length=7):
"""
Get the agent version.
url_safe: get the version that is able to be addressed as a url
git_sha_length: different versions of git have a different short sha length,
use this to explicitly set the version
(the windows builder and the default ubuntu version have such an incompatibility)
"""
version_common(ctx, url_safe, git_sha_length)
|
examples/bamboo/bamboo_plan_directory_info.py
|
Kudesnick/atlassian-python-api
| 779 |
107673
|
# coding=utf-8
import os
from atlassian import Bamboo
BAMBOO_URL = os.environ.get("BAMBOO_URL", "http://localhost:8085")
ATLASSIAN_USER = os.environ.get("ATLASSIAN_USER", "admin")
ATLASSIAN_PASSWORD = os.environ.get("ATLASSIAN_PASSWORD", "<PASSWORD>")
bamboo = Bamboo(url=BAMBOO_URL, username=ATLASSIAN_USER, password=<PASSWORD>)
plan_directories_roots = bamboo.plan_directory_info("PROJ-PLAN")
print(plan_directories_roots)
|
sdk/netapp/azure-mgmt-netapp/tests/test_vault.py
|
rsdoherty/azure-sdk-for-python
| 2,728 |
107684
|
<filename>sdk/netapp/azure-mgmt-netapp/tests/test_vault.py
from devtools_testutils import AzureMgmtTestCase
from test_volume import create_volume, delete_volume, delete_pool, delete_account
from setup import *
import azure.mgmt.netapp.models
class NetAppAccountTestCase(AzureMgmtTestCase):
def setUp(self):
super(NetAppAccountTestCase, self).setUp()
self.client = self.create_mgmt_client(azure.mgmt.netapp.NetAppManagementClient)
# Before tests are run live a resource group needs to be created along with vnet and subnet
# Note that when tests are run in live mode it is best to run one test at a time.
def test_get_vault(self):
create_volume(self.client)
vaults = self.client.vaults.list(TEST_RG, TEST_ACC_1)
self.assertEqual(len(list(vaults)), 1)
# clean up
delete_volume(self.client, TEST_RG, TEST_ACC_1, TEST_POOL_1, TEST_VOL_1, live=self.is_live)
delete_pool(self.client, TEST_RG, TEST_ACC_1, TEST_POOL_1, live=self.is_live)
delete_account(self.client, TEST_RG, TEST_ACC_1, live=self.is_live)
|
koku/api/migrations/0039_create_hive_db.py
|
rubik-ai/koku
| 157 |
107696
|
# Generated by Django 3.1.7 on 2021-03-03 19:26
import logging
import os
from django.conf import settings
from django.db import migrations
from django.db.utils import ProgrammingError
from psycopg2.errors import DuplicateDatabase
from psycopg2.errors import DuplicateObject
from psycopg2.errors import InsufficientPrivilege
LOG = logging.getLogger(__name__)
def create_hive_db(apps, schema_editor):
rolname = settings.HIVE_DATABASE_USER
datname = settings.HIVE_DATABASE_NAME
kokudb = settings.DATABASES.get("default").get("NAME")
kokudbuser = settings.DATABASES.get("default").get("USER")
db_password = settings.DATABASES.get("default").get("PASSWORD")
hive_db_password = settings.HIVE_DATABASE_PASSWORD
role_create_sql = f"""
create role "{rolname}" with login encrypted password '{{hivepw}}';
"""
role_public_revoke_sql = """
revoke connect on database "{}" from "public";
"""
role_revoke_sql = f"""
revoke connect on database "{kokudb}" from "{rolname}";
"""
role_grant_sql = f"""
grant connect on database "{datname}" to "{kokudbuser}";
"""
db_create_sql = f"""
create database "{datname}" owner "{rolname}";
"""
db_access_check_sql = """
select has_database_privilege(%s, %s, 'connect');
"""
try:
conn = schema_editor.connection.connection.__class__(
"postgresql://{user}:{password}@{host}:{port}/{dbname}".format(
password=db_password, **schema_editor.connection.connection.get_dsn_parameters()
)
)
conn.autocommit = True
with conn.cursor() as cur:
LOG.info(f"Creating role {rolname}.")
try:
cur.execute(role_create_sql.format(hivepw=hive_db_password))
except (ProgrammingError, InsufficientPrivilege, DuplicateObject) as e:
LOG.info(e)
LOG.info(f"Role {rolname} exists.")
try:
LOG.info(f"""Granting role "{rolname}" membership to "{kokudbuser}".""")
cur.execute(f"""grant "{rolname}" to "{kokudbuser}"; """)
except (ProgrammingError, InsufficientPrivilege) as e:
LOG.info(e)
try:
LOG.info(f"Creating database {rolname}.")
cur.execute(db_create_sql)
except (ProgrammingError, InsufficientPrivilege, DuplicateDatabase) as e:
LOG.info(e)
LOG.info(f"Database {rolname} exists.")
# Revoke access to koku db from public
try:
cur.execute(db_access_check_sql, ("public", kokudb))
if cur.fetchone()[0]:
LOG.info(f"Revoking public access to {kokudb}.")
cur.execute(role_public_revoke_sql.format(kokudb))
except (ProgrammingError, InsufficientPrivilege) as e:
LOG.info(e)
# Revoke access to hive db from public
try:
cur.execute(db_access_check_sql, ("public", datname))
if cur.fetchone()[0]:
LOG.info(f"Revoking public access to {datname}.")
cur.execute(role_public_revoke_sql.format(datname))
except (ProgrammingError, InsufficientPrivilege) as e:
LOG.info(e)
# Revoke access to koku db from hive user
try:
cur.execute(db_access_check_sql, (rolname, kokudb))
if cur.fetchone()[0]:
LOG.info(f"Revoking {rolname} access to {kokudb}.")
cur.execute(role_revoke_sql)
except (ProgrammingError, InsufficientPrivilege) as e:
LOG.info(e)
# Grant access to hive db from koku user
try:
cur.execute(db_access_check_sql, (kokudbuser, datname))
if not cur.fetchone()[0]:
LOG.info(f"Granting {kokudbuser} access to {datname}.")
cur.execute(role_grant_sql)
except (ProgrammingError, InsufficientPrivilege) as e:
LOG.info(e)
try:
LOG.info(f"""Revoking role "{rolname}" membership from "{kokudbuser}".""")
cur.execute(f"""revoke "{rolname}" from "{kokudbuser}"; """)
except (ProgrammingError, InsufficientPrivilege) as e:
LOG.info(e)
finally:
if conn:
conn.close()
class Migration(migrations.Migration):
dependencies = [("api", "0038_drop_app_needs_migrations_func")]
operations = [migrations.RunPython(create_hive_db)]
|
maskrcnn_benchmark/modeling/roi_heads/boundary_head/roi_boundary_predictors.py
|
sergiev/ContourNet
| 211 |
107708
|
<reponame>sergiev/ContourNet
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from torch.nn import functional as F
from maskrcnn_benchmark.layers import Conv2d
from maskrcnn_benchmark.layers import ConvTranspose2d
from maskrcnn_benchmark import layers
class BOUNDARYRCNNC4Predictor(nn.Module):
def __init__(self, cfg):
super(BOUNDARYRCNNC4Predictor, self).__init__()
dim_reduced = cfg.MODEL.ROI_BOUNDARY_HEAD.CONV_LAYERS[-1]
self.resol = cfg.MODEL.ROI_BOUNDARY_HEAD.RESOLUTION # 56
if cfg.MODEL.ROI_HEADS.USE_FPN:
num_inputs = dim_reduced
else:
stage_index = 4
stage2_relative_factor = 2 ** (stage_index - 1)
res2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS #256
num_inputs = res2_out_channels * stage2_relative_factor
self.bo_input_xy = Conv2d(num_inputs, num_inputs, 1, 1, 0)
nn.init.kaiming_normal_(self.bo_input_xy.weight,
mode='fan_out', nonlinearity='relu')
nn.init.constant_(self.bo_input_xy.bias, 0)
self.conv5_bo_xy = ConvTranspose2d(num_inputs, dim_reduced, 2, 2, 0)
nn.init.kaiming_normal_(self.conv5_bo_xy.weight,
mode='fan_out', nonlinearity='relu')
nn.init.constant_(self.conv5_bo_xy.bias, 0)
self.bo_input_1_1 = Conv2d(dim_reduced, dim_reduced, 1, 1, 0)
nn.init.kaiming_normal_(self.bo_input_1_1.weight,
mode='fan_out', nonlinearity='relu')
nn.init.constant_(self.bo_input_1_1.bias, 0)
self.bo_input_2_1 = Conv2d(dim_reduced, dim_reduced, 1, 1, 0)
nn.init.kaiming_normal_(self.bo_input_2_1.weight,
mode='fan_out', nonlinearity='relu')
nn.init.constant_(self.bo_input_2_1.bias, 0)
self.conv5_bo_x = Conv2d(dim_reduced, 1, (3, 1), 1, (1,0)) # H W
nn.init.kaiming_normal_(self.conv5_bo_x.weight,
mode='fan_out', nonlinearity='relu') # 'relu'
nn.init.constant_(self.conv5_bo_x.bias, 0)
self.conv5_bo_y = Conv2d(dim_reduced, 1, (1, 3), 1, (0,1)) # H W
nn.init.kaiming_normal_(self.conv5_bo_y.weight,
mode='fan_out', nonlinearity='relu')
nn.init.constant_(self.conv5_bo_y.bias, 0)
self.up_scale=2
def forward(self, ft):
ft = self.bo_input_xy(ft)
ft_2x = self.conv5_bo_xy(ft)
ft_2x = layers.interpolate(ft_2x, size = (48,48), mode='bilinear', align_corners=True)
x = self.bo_input_1_1(ft_2x)
y = self.bo_input_2_1(ft_2x)
x = self.conv5_bo_x(x)
y = self.conv5_bo_y(y)
return x, y
_ROI_KE_PREDICTOR = {"BoundaryRCNNC4Predictor": BOUNDARYRCNNC4Predictor}
def make_roi_boundary_predictor(cfg):
func = _ROI_KE_PREDICTOR[cfg.MODEL.ROI_BOUNDARY_HEAD.PREDICTOR]
return func(cfg)
|
examples/gymfc_nf/controllers/pid.py
|
xabierolaz/gymfc
| 270 |
107717
|
import numpy as np
import logging
class PID(object):
def __init__(self, kp, ki, kd):
self.kp = kp
self.ki = ki
self.kd = kd
self.reset()
def update(self, t, e):
# TODO add anti-windup logic
# Most environments have a short execution time
# the controller doesn't have much time to wind up
dt = t - self.last_t
self.last_t = t
p_term = self.kp * e
self.accum += e * dt
i_term = self.ki * self.accum
de = e - self.last_e
self.last_e = e
d_term = self.kd * de / dt if dt > 0 else 0
return p_term + i_term + d_term
def reset(self):
self.last_t = 0
self.last_e = 0
self.accum = 0
class PidController(object):
""" This is a loose port from Betaflight """
FD_ROLL = 0
FD_PITCH = 1
FD_YAW = 2
PTERM_SCALE = 0.032029
ITERM_SCALE = 0.244381
DTERM_SCALE = 0.000529
minthrottle = 1000
maxthrottle = 2000
def __init__(self,
pid_roll = [40, 40, 30],
pid_pitch = [58, 50, 35],
pid_yaw = [80, 45, 20],
mixer = [],
itermLimit = 150):
# init gains and scale
self.Kp = [pid_roll[0], pid_pitch[0], pid_yaw[0]]
self.Kp = [self.PTERM_SCALE * p for p in self.Kp]
self.Ki = [pid_roll[1], pid_pitch[1], pid_yaw[1]]
self.Ki = [self.ITERM_SCALE * i for i in self.Ki]
self.Kd = [pid_roll[2], pid_pitch[2], pid_yaw[2]]
self.Kd = [self.DTERM_SCALE * d for d in self.Kd]
self.itermLimit = itermLimit
self.previousRateError = [0]*3
self.previousTime = 0
self.previous_motor_values = [self.minthrottle]*4
self.pid_rpy = [PID(*pid_roll), PID(*pid_pitch), PID(*pid_yaw)]
self.mixer = mixer
def calculate_motor_values(self, current_time, sp_rates, gyro_rates):
rpy_sums = []
for i in range(3):
u = self.pid_rpy[i].update(current_time, sp_rates[i] - gyro_rates[i])
rpy_sums.append(u)
return self.mix(*rpy_sums)
def constrainf(self, amt, low, high):
# From BF src/main/common/maths.h
if amt < low:
return low
elif amt > high:
return high
else:
return amt
def mix(self, r, p, y):
PID_MIXER_SCALING = 1000.0
pidSumLimit = 10000.#500
pidSumLimitYaw = 100000.#1000.0#400
motorOutputMixSign = 1
motorOutputRange = self.maxthrottle - self.minthrottle# throttle max - throttle min
motorOutputMin = self.minthrottle
mixer_index_throttle = 0
mixer_index_roll = 1
mixer_index_pitch = 2
mixer_index_yaw = 3
scaledAxisPidRoll = self.constrainf(r, -pidSumLimit, pidSumLimit) / PID_MIXER_SCALING
scaledAxisPidPitch = self.constrainf(p, -pidSumLimit, pidSumLimit) / PID_MIXER_SCALING
scaledAxisPidYaw = self.constrainf(y, -pidSumLimitYaw, pidSumLimitYaw) / PID_MIXER_SCALING
scaledAxisPidYaw = -scaledAxisPidYaw
# Find roll/pitch/yaw desired output
motor_count = 4
motorMix = [0]*motor_count
motorMixMax = 0
motorMixMin = 0
# No additional throttle, in air mode
throttle = 0
motorRangeMin = 1000
motorRangeMax = 2000
for i in range(motor_count):
mix = (scaledAxisPidRoll * self.mixer[i][1] +
scaledAxisPidPitch * self.mixer[i][2] +
scaledAxisPidYaw * self.mixer[i][3])
if mix > motorMixMax:
motorMixMax = mix
elif mix < motorMixMin:
motorMixMin = mix
motorMix[i] = mix
motorMixRange = motorMixMax - motorMixMin
if motorMixRange > 1.0:
for i in range(motor_count):
motorMix[i] /= motorMixRange
# Get the maximum correction by setting offset to center when airmode enabled
throttle = 0.5
else:
# Only automatically adjust throttle when airmode enabled. Airmode logic is always active on high throttle
throttleLimitOffset = motorMixRange / 2.0
throttle = self.constrainf(throttle, 0.0 + throttleLimitOffset, 1.0 - throttleLimitOffset)
motor = []
for i in range(motor_count):
motorOutput = motorOutputMin + (motorOutputRange * (motorOutputMixSign * motorMix[i] + throttle * self.mixer[i][mixer_index_throttle]))
motorOutput = self.constrainf(motorOutput, motorRangeMin, motorRangeMax);
motor.append(motorOutput)
motor = list(map(int, np.round(motor)))
return motor
def reset(self):
for pid in self.pid_rpy:
pid.reset()
|
modules/viz/misc/python/test/test_viz_simple.py
|
ptelang/opencv_contrib
| 7,158 |
107731
|
import os
import numpy as np
import cv2 as cv
from tests_common import NewOpenCVTests
def generate_test_trajectory():
result = []
angle_i = np.arange(0, 271, 3)
angle_j = np.arange(0, 1200, 10)
for i, j in zip(angle_i, angle_j):
x = 2 * np.cos(i * 3 * np.pi/180.0) * (1.0 + 0.5 * np.cos(1.2 + i * 1.2 * np.pi/180.0))
y = 0.25 + i/270.0 + np.sin(j * np.pi/180.0) * 0.2 * np.sin(0.6 + j * 1.5 * np.pi/180.0)
z = 2 * np.sin(i * 3 * np.pi/180.0) * (1.0 + 0.5 * np.cos(1.2 + i * np.pi/180.0))
result.append(cv.viz.makeCameraPose((x, y, z), (0.0, 0, 0), (0.0, 1.0, 0.0)))
x = np.zeros(shape=(len(result), 1, 16 ), dtype= np.float64)
for idx, m in enumerate(result):
x[idx, 0, :] = m.mat().reshape(16)
return x, result
def tutorial3(camera_pov, filename):
myWindow = cv.viz_Viz3d("Coordinate Frame")
myWindow.showWidget("axe",cv.viz_WCoordinateSystem())
cam_origin = (3.0, 3.0, 3.0)
cam_focal_point = (3.0,3.0,2.0)
cam_y_dir = (-1.0,0.0,0.0)
camera_pose = cv.viz.makeCameraPose(cam_origin, cam_focal_point, cam_y_dir)
transform = cv.viz.makeTransformToGlobal((0.0,-1.0,0.0), (-1.0,0.0,0.0), (0.0,0.0,-1.0), cam_origin)
dragon_cloud,_,_ = cv.viz.readCloud(filename)
cloud_widget = cv.viz_WCloud(dragon_cloud, cv.viz_Color().green())
cloud_pose = cv.viz_Affine3d()
cloud_pose = cv.viz_Affine3d().rotate((0, np.pi / 2, 0)).translate((0, 0, 3))
cloud_pose_global = transform.product(cloud_pose)
myWindow.showWidget("CPW_FRUSTUM", cv.viz_WCameraPosition((0.889484, 0.523599)), camera_pose)
if not camera_pov:
myWindow.showWidget("CPW", cv.viz_WCameraPosition(0.5), camera_pose)
myWindow.showWidget("dragon", cloud_widget, cloud_pose_global)
if camera_pov:
myWindow.setViewerPose(camera_pose)
class viz_test(NewOpenCVTests):
def setUp(self):
super(viz_test, self).setUp()
if not bool(os.environ.get('OPENCV_PYTEST_RUN_VIZ', False)):
self.skipTest("Use OPENCV_PYTEST_RUN_VIZ=1 to enable VIZ UI tests")
def test_viz_tutorial3_global_view(self):
tutorial3(False, self.find_file("viz/dragon.ply"))
def test_viz_tutorial3_camera_view(self):
tutorial3(True, self.find_file("viz/dragon.ply"))
def test_viz(self):
dragon_cloud,_,_ = cv.viz.readCloud(self.find_file("viz/dragon.ply"))
myWindow = cv.viz_Viz3d("abc")
myWindow.showWidget("coo", cv.viz_WCoordinateSystem(1))
myWindow.showWidget("cloud", cv.viz_WPaintedCloud(dragon_cloud))
myWindow.spinOnce(500, True)
def test_viz_show_simple_widgets(self):
viz = cv.viz_Viz3d("show_simple_widgets")
viz.setBackgroundMeshLab()
viz.showWidget("coos", cv.viz_WCoordinateSystem())
viz.showWidget("cube", cv.viz_WCube())
viz.showWidget("cub0", cv.viz_WCube((-1.0, -1, -1), (-0.5, -0.5, -0.5), False, cv.viz_Color().indigo()))
viz.showWidget("arro", cv.viz_WArrow((-0.5, -0.5, -0.5), (0.5, 0.5, 0.5), 0.009, cv.viz_Color().raspberry()))
viz.showWidget("cir1", cv.viz_WCircle(0.5, 0.01, cv.viz_Color.bluberry()))
viz.showWidget("cir2", cv.viz_WCircle(0.5, (0.5, 0.0, 0.0), (1.0, 0.0, 0.0), 0.01, cv.viz_Color().apricot()))
viz.showWidget("cyl0", cv.viz_WCylinder((-0.5, 0.5, -0.5), (0.5, 0.5, -0.5), 0.125, 30, cv.viz_Color().brown()))
viz.showWidget("con0", cv.viz_WCone(0.25, 0.125, 6, cv.viz_Color().azure()))
viz.showWidget("con1", cv.viz_WCone(0.125, (0.5, -0.5, 0.5), (0.5, -1.0, 0.5), 6, cv.viz_Color().turquoise()))
text2d = cv.viz_WText("Different simple widgets", (20, 20), 20, cv.viz_Color().green())
viz.showWidget("text2d", text2d)
text3d = cv.viz_WText3D("Simple 3D text", ( 0.5, 0.5, 0.5), 0.125, False, cv.viz_Color().green())
viz.showWidget("text3d", text3d)
viz.showWidget("plane1", cv.viz_WPlane((0.25, 0.75)))
viz.showWidget("plane2", cv.viz_WPlane((0.5, -0.5, -0.5), (0.0, 1.0, 1.0), (1.0, 1.0, 0.0), (1.0, 0.5), cv.viz_Color().gold()))
viz.showWidget("grid1", cv.viz_WGrid((7,7), (0.75,0.75), cv.viz_Color().gray()), cv.viz_Affine3d().translate((0.0, 0.0, -1.0)))
viz.spinOnce(500, True)
text2d.setText("Different simple widgets (updated)")
text3d.setText("Updated text 3D")
viz.spinOnce(500, True)
def test_viz_show_overlay_image(self):
lena = cv.imread(self.find_file("viz/lena.png"))
gray = cv.cvtColor(lena, cv.COLOR_BGR2GRAY)
rows = lena.shape[0]
cols = lena.shape[1]
half_lsize = (lena.shape[1] // 2, lena.shape[0] // 2)
viz = cv.viz_Viz3d("show_overlay_image")
viz.setBackgroundMeshLab();
vsz = viz.getWindowSize()
viz.showWidget("coos", cv.viz_WCoordinateSystem())
viz.showWidget("cube", cv.viz_WCube())
x = cv.viz_WImageOverlay(lena, (10, 10, half_lsize[1], half_lsize[0]))
viz.showWidget("img1", x)
viz.showWidget("img2", cv.viz_WImageOverlay(gray, (vsz[0] - 10 - cols // 2, 10, half_lsize[1], half_lsize[0])))
viz.showWidget("img3", cv.viz_WImageOverlay(gray, (10, vsz[1] - 10 - rows // 2, half_lsize[1], half_lsize[0])))
viz.showWidget("img5", cv.viz_WImageOverlay(lena, (vsz[0] - 10 - cols // 2, vsz[1] - 10 - rows // 2, half_lsize[1], half_lsize[0])))
viz.showWidget("text2d", cv.viz_WText("Overlay images", (20, 20), 20, cv.viz_Color().green()))
i = 0
for num in range(50):
i = i + 1
a = i % 360
pose = (3 * np.sin(a * np.pi/180), 2.1, 3 * np.cos(a * np.pi/180));
viz.setViewerPose(cv.viz.makeCameraPose(pose , (0.0, 0.5, 0.0), (0.0, 0.1, 0.0)))
img = lena * (np.sin(i * 10 * np.pi/180) * 0.5 + 0.5)
x.setImage(img.astype(np.uint8))
viz.spinOnce(100, True)
viz.showWidget("text2d", cv.viz_WText("Overlay images (stopped)", (20, 20), 20, cv.viz_Color().green()))
viz.spinOnce(500, True)
def test_viz_show_image_3d(self):
lena = cv.imread(self.find_file("viz/lena.png"))
lena_gray = cv.cvtColor(lena, cv.COLOR_BGR2GRAY)
viz = cv.viz_Viz3d("show_image_3d")
viz.setBackgroundMeshLab()
viz.showWidget("coos", cv.viz_WCoordinateSystem())
viz.showWidget("cube", cv.viz_WCube());
viz.showWidget("arr0", cv.viz_WArrow((0.5, 0.0, 0.0), (1.5, 0.0, 0.0), 0.009, cv.viz_Color().raspberry()))
x = cv.viz_WImage3D(lena, (1.0, 1.0))
viz.showWidget("img0", x, cv.viz_Affine3d((0.0, np.pi/2, 0.0), (.5, 0.0, 0.0)))
viz.showWidget("arr1", cv.viz_WArrow((-0.5, -0.5, 0.0), (0.2, 0.2, 0.0), 0.009, cv.viz_Color().raspberry()))
viz.showWidget("img1", cv.viz_WImage3D(lena_gray, (1.0, 1.0), (-0.5, -0.5, 0.0), (1.0, 1.0, 0.0), (0.0, 1.0, 0.0)))
viz.showWidget("arr3", cv.viz_WArrow((-0.5, -0.5, -0.5), (0.5, 0.5, 0.5), 0.009, cv.viz_Color().raspberry()))
viz.showWidget("text2d", cv.viz_WText("Images in 3D", (20, 20), 20, cv.viz_Color().green()))
i = 0
for num in range(50):
img = lena * (np.sin(i*7.5*np.pi/180) * 0.5 + 0.5)
x.setImage(img.astype(np.uint8))
i = i + 1
viz.spinOnce(100, True);
viz.showWidget("text2d", cv.viz_WText("Images in 3D (stopped)", (20, 20), 20, cv.viz_Color().green()))
viz.spinOnce(500, True)
def test_viz_show_cloud_bluberry(self):
dragon_cloud,_,_ = cv.viz.readCloud(self.find_file("viz/dragon.ply"))
pose = cv.viz_Affine3d()
pose = pose.rotate((0, 0.8, 0));
viz = cv.viz_Viz3d("show_cloud_bluberry")
viz.setBackgroundColor(cv.viz_Color().black())
viz.showWidget("coosys", cv.viz_WCoordinateSystem())
viz.showWidget("dragon", cv.viz_WCloud(dragon_cloud, cv.viz_Color().bluberry()), pose)
viz.showWidget("text2d", cv.viz_WText("Bluberry cloud", (20, 20), 20, cv.viz_Color().green()))
viz.spinOnce(500, True)
def test_viz_show_cloud_random_color(self):
dragon_cloud,_,_ = cv.viz.readCloud(self.find_file("viz/dragon.ply"))
colors = np.random.randint(0, 255, size=(dragon_cloud.shape[0],dragon_cloud.shape[1],3), dtype=np.uint8)
pose = cv.viz_Affine3d()
pose = pose.rotate((0, 0.8, 0));
viz = cv.viz_Viz3d("show_cloud_random_color")
viz.setBackgroundMeshLab()
viz.showWidget("coosys", cv.viz_WCoordinateSystem())
viz.showWidget("dragon", cv.viz_WCloud(dragon_cloud, colors), pose)
viz.showWidget("text2d", cv.viz_WText("Random color cloud", (20, 20), 20, cv.viz_Color().green()))
viz.spinOnce(500, True)
def test_viz_show_cloud_masked(self):
dragon_cloud,_,_ = cv.viz.readCloud(self.find_file("viz/dragon.ply"))
qnan = np.NAN
for idx in range(dragon_cloud.shape[0]):
if idx % 15 != 0:
dragon_cloud[idx,:] = qnan
pose = cv.viz_Affine3d()
pose = pose.rotate((0, 0.8, 0))
viz = cv.viz_Viz3d("show_cloud_masked");
viz.showWidget("coosys", cv.viz_WCoordinateSystem())
viz.showWidget("dragon", cv.viz_WCloud(dragon_cloud), pose)
viz.showWidget("text2d", cv.viz_WText("Nan masked cloud", (20, 20), 20, cv.viz_Color().green()))
viz.spinOnce(500, True)
def test_viz_show_cloud_collection(self):
cloud,_,_ = cv.viz.readCloud(self.find_file("viz/dragon.ply"))
ccol = cv.viz_WCloudCollection()
pose = cv.viz_Affine3d()
pose1 = cv.viz_Affine3d().translate((0, 0, 0)).rotate((np.pi/2, 0, 0))
ccol.addCloud(cloud, cv.viz_Color().white(), cv.viz_Affine3d().translate((0, 0, 0)).rotate((np.pi/2, 0, 0)))
ccol.addCloud(cloud, cv.viz_Color().blue(), cv.viz_Affine3d().translate((1, 0, 0)))
ccol.addCloud(cloud, cv.viz_Color().red(), cv.viz_Affine3d().translate((2, 0, 0)))
ccol.finalize();
viz = cv.viz_Viz3d("show_cloud_collection")
viz.setBackgroundColor(cv.viz_Color().mlab())
viz.showWidget("coosys", cv.viz_WCoordinateSystem());
viz.showWidget("ccol", ccol);
viz.showWidget("text2d", cv.viz_WText("Cloud collection", (20, 20), 20, cv.viz_Color(0, 255,0 )))
viz.spinOnce(500, True)
def test_viz_show_painted_clouds(self):
cloud,_,_ = cv.viz.readCloud(self.find_file("viz/dragon.ply"))
viz = cv.viz_Viz3d("show_painted_clouds")
viz.setBackgroundMeshLab()
viz.showWidget("coosys", cv.viz_WCoordinateSystem())
pose1 = cv.viz_Affine3d((0.0, -np.pi/2, 0.0), (-1.5, 0.0, 0.0))
pose2 = cv.viz_Affine3d((0.0, np.pi/2, 0.0), (1.5, 0.0, 0.0))
viz.showWidget("cloud1", cv.viz_WPaintedCloud(cloud), pose1)
viz.showWidget("cloud2", cv.viz_WPaintedCloud(cloud, (0.0, -0.75, -1.0), (0.0, 0.75, 0.0)), pose2);
viz.showWidget("cloud3", cv.viz_WPaintedCloud(cloud, (0.0, 0.0, -1.0), (0.0, 0.0, 1.0), cv.viz_Color().blue(), cv.viz_Color().red()))
viz.showWidget("arrow", cv.viz_WArrow((0.0, 1.0, -1.0), (0.0, 1.0, 1.0), 0.009, cv.viz_Color()))
viz.showWidget("text2d", cv.viz_WText("Painted clouds", (20, 20), 20, cv.viz_Color(0, 255, 0)))
viz.spinOnce(500, True)
def test_viz_show_mesh(self):
mesh = cv.viz.readMesh(self.find_file("viz/dragon.ply"))
viz = cv.viz_Viz3d("show_mesh")
viz.showWidget("coosys", cv.viz_WCoordinateSystem());
viz.showWidget("mesh", cv.viz_WMesh(mesh), cv.viz_Affine3d().rotate((0, 0.8, 0)));
viz.showWidget("text2d", cv.viz_WText("Just mesh", (20, 20), 20, cv.viz_Color().green()))
viz.spinOnce(500, True)
def test_viz_show_mesh_random_colors(self):
mesh = cv.viz.readMesh(self.find_file("viz/dragon.ply"))
mesh.colors = np.random.randint(0, 255, size=mesh.colors.shape, dtype=np.uint8)
viz = cv.viz_Viz3d("show_mesh")
viz.showWidget("coosys", cv.viz_WCoordinateSystem());
viz.showWidget("mesh", cv.viz_WMesh(mesh), cv.viz_Affine3d().rotate((0, 0.8, 0)))
viz.setRenderingProperty("mesh", cv.viz.SHADING, cv.viz.SHADING_PHONG)
viz.showWidget("text2d", cv.viz_WText("Random color mesh", (20, 20), 20, cv.viz_Color().green()))
viz.spinOnce(500, True)
def test_viz_show_textured_mesh(self):
lena = cv.imread(self.find_file("viz/lena.png"))
angle = np.arange(0,64)
points0 = np.vstack((np.zeros(shape=angle.shape, dtype=np.float32), np.cos(angle * np.pi /128), np.sin(angle* np.pi /128)))
points1 = np.vstack((1.57 * np.ones(shape=angle.shape, dtype=np.float32),np.cos(angle* np.pi /128), np.sin(angle* np.pi /128)))
tcoords0 = np.vstack((np.zeros(shape=angle.shape, dtype=np.float32), angle / 64))
tcoords1 = np.vstack((np.ones(shape=angle.shape, dtype=np.float32), angle / 64))
points = np.zeros(shape=(points0.shape[0], points0.shape[1] * 2 ),dtype=np.float32)
tcoords = np.zeros(shape=(tcoords0.shape[0], tcoords0.shape[1] * 2),dtype=np.float32)
tcoords[:,0::2] = tcoords0
tcoords[:,1::2] = tcoords1
points[:,0::2] = points0 * 0.75
points[:,1::2] = points1 * 0.75
polygons = np.zeros(shape=(4 * (points.shape[1]-2)+1),dtype=np.int32)
for idx in range(points.shape[1] // 2 - 1):
polygons[8 * idx: 8 * (idx + 1)] = [3, 2*idx, 2*idx+1, 2*idx+2, 3, 2*idx+1, 2*idx+2, 2*idx+3]
mesh = cv.viz_Mesh()
mesh.cloud = points.transpose().reshape(1,points.shape[1],points.shape[0])
mesh.tcoords = tcoords.transpose().reshape(1,tcoords.shape[1],tcoords.shape[0])
mesh.polygons = polygons.reshape(1, 4 * (points.shape[1]-2)+1)
mesh.texture = lena
viz = cv.viz_Viz3d("show_textured_mesh")
viz.setBackgroundMeshLab();
viz.showWidget("coosys", cv.viz_WCoordinateSystem());
viz.showWidget("mesh", cv.viz_WMesh(mesh))
viz.setRenderingProperty("mesh", cv.viz.SHADING, cv.viz.SHADING_PHONG)
viz.showWidget("text2d", cv.viz_WText("Textured mesh", (20, 20), 20, cv.viz_Color().green()));
viz.spinOnce(500, True)
def test_viz_show_polyline(self):
palette = [ cv.viz_Color().red(),
cv.viz_Color().green(),
cv.viz_Color().blue(),
cv.viz_Color().gold(),
cv.viz_Color().raspberry(),
cv.viz_Color().bluberry(),
cv.viz_Color().lime()]
palette_size = len(palette)
polyline = np.zeros(shape=(1, 32, 3), dtype=np.float32)
colors = np.zeros(shape=(1, 32, 3), dtype=np.uint8)
for i in range(polyline.shape[1]):
polyline[0,i,0] = i / 16.0
polyline[0,i,1] = np.cos(i * np.pi/6)
polyline[0,i,2] = np.sin(i * np.pi/6)
colors[0,i,0] = palette[i % palette_size].get_blue()
colors[0,i,1] = palette[i % palette_size].get_green()
colors[0,i,2] = palette[i % palette_size].get_red()
viz = cv.viz_Viz3d("show_polyline")
viz.showWidget("polyline", cv.viz_WPolyLine(polyline, colors))
viz.showWidget("coosys", cv.viz_WCoordinateSystem())
viz.showWidget("text2d", cv.viz_WText("Polyline", (20, 20), 20, cv.viz_Color().green()))
viz.spinOnce(500, True)
def test_viz_show_sampled_normals(self):
mesh = cv.viz.readMesh(self.find_file("viz/dragon.ply"))
mesh.normals = cv.viz.computeNormals(mesh)
pose = cv.viz_Affine3d().rotate((0, 0.8, 0))
viz = cv.viz_Viz3d("show_sampled_normals")
viz.showWidget("mesh", cv.viz_WMesh(mesh), pose)
viz.showWidget("normals", cv.viz_WCloudNormals(mesh.cloud, mesh.normals, 30, 0.1, cv.viz_Color().green()), pose)
viz.setRenderingProperty("normals", cv.viz.LINE_WIDTH, 2.0)
viz.showWidget("text2d", cv.viz_WText("Cloud or mesh normals", (20, 20), 20, cv.viz_Color().green()))
viz.spinOnce(500, True);
def test_viz_show_cloud_shaded_by_normals(self):
mesh = cv.viz.readMesh(self.find_file("viz/dragon.ply"))
mesh.normals = cv.viz.computeNormals(mesh)
pose = cv.viz_Affine3d().rotate((0, 0.8, 0))
cloud = cv.viz_WCloud(mesh.cloud, cv.viz_Color().white(), mesh.normals)
cloud.setRenderingProperty(cv.viz.SHADING, cv.viz.SHADING_GOURAUD)
viz = cv.viz_Viz3d("show_cloud_shaded_by_normals")
viz.showWidget("cloud", cloud, pose)
viz.showWidget("text2d", cv.viz_WText("Cloud shaded by normals", (20, 20), 20, cv.viz_Color().green()))
viz.spinOnce(500, True)
def test_viz_show_image_method(self):
lena = cv.imread(self.find_file("viz/lena.png"))
lena_gray = cv.cvtColor(lena, cv.COLOR_BGR2GRAY)
viz = cv.viz_Viz3d("show_image_method")
viz.showImage(lena)
viz.spinOnce(1500, True)
viz.showImage(lena, (lena.shape[1], lena.shape[0]))
viz.spinOnce(1500, True)
#cv.viz.imshow("show_image_method", lena_gray).spinOnce(500, True) BUG
def test_viz_show_follower(self):
viz = cv.viz_Viz3d("show_follower")
viz.showWidget("coos", cv.viz_WCoordinateSystem())
viz.showWidget("cube", cv.viz_WCube())
text_3d = cv.viz_WText3D("Simple 3D follower", (-0.5, -0.5, 0.5), 0.125, True, cv.viz_Color().green())
viz.showWidget("t3d_2", text_3d)
viz.showWidget("text2d", cv.viz_WText("Follower: text always facing camera", (20, 20), 20, cv.viz_Color().green()))
viz.setBackgroundMeshLab()
viz.spinOnce(500, True)
text_3d.setText("Updated follower 3D")
viz.spinOnce(500, True)
def test_viz_show_trajectory_reposition(self):
mat, path = generate_test_trajectory()
viz = cv.viz_Viz3d("show_trajectory_reposition_to_origin")
viz.showWidget("coos", cv.viz_WCoordinateSystem())
viz.showWidget("sub3", cv.viz_WTrajectory(mat[0: len(path) // 3,:,:], cv.viz.PyWTrajectory_BOTH, 0.2, cv.viz_Color().brown()), path[0].inv())
viz.showWidget("text2d", cv.viz_WText("Trajectory resposition to origin", (20, 20), 20, cv.viz_Color().green()))
viz.spinOnce(500, True)
def test_viz_show_trajectories(self):
mat, path = generate_test_trajectory()
size =len(path)
sub0 = np.copy(mat[0: size//10+1,::])
sub1 = np.copy(mat[size//10: size//5+1,::])
sub2 = np.copy(mat[size//5: 11*size//12,::])
sub3 = np.copy(mat[11 * size // 12 : size,::])
sub4 = np.copy(mat[3 * size//4: 33*size//40,::])
sub5 = np.copy(mat[11*size//12: size,::])
K = np.array([[1024.0, 0.0, 320.0], [0.0, 1024.0, 240.0], [0.0, 0.0, 1.0]],dtype=np.float64)
viz = cv.viz_Viz3d("show_trajectories")
viz.showWidget("coos", cv.viz_WCoordinateSystem())
viz.showWidget("sub0", cv.viz_WTrajectorySpheres(sub0, 0.25, 0.07))
viz.showWidget("sub1", cv.viz_WTrajectory(sub1, cv.viz.PyWTrajectory_PATH, 0.2, cv.viz_Color().brown()))
viz.showWidget("sub2", cv.viz_WTrajectory(sub2, cv.viz.PyWTrajectory_FRAMES, 0.2))
viz.showWidget("sub3", cv.viz_WTrajectory(sub3, cv.viz.PyWTrajectory_BOTH, 0.2, cv.viz_Color().green()))
viz.showWidget("sub4", cv.viz_WTrajectoryFrustums(sub4, K, 0.3, cv.viz_Color().yellow()))
viz.showWidget("sub5", cv.viz_WTrajectoryFrustums(sub5, (0.78, 0.78), 0.15, cv.viz_Color().magenta())) #BUG
viz.showWidget("text2d", cv.viz_WText("Different kinds of supported trajectories", (20, 20), 20, cv.viz_Color().green()))
i = 0
for num in range(50):
i = i - 1
a = i % 360
pose = (np.sin(a * np.pi/180)* 7.5, 0.7, np.cos(a * np.pi/180)* 7.5)
viz.setViewerPose(cv.viz.makeCameraPose(pose , (0.0, 0.5, 0.0), (0.0, 0.1, 0.0)));
viz.spinOnce(100, True)
viz.resetCamera()
viz.spinOnce(500, True)
def test_viz_show_camera_positions(self):
K = np.array([[1024.0, 0.0, 320.0], [0.0, 1024.0, 240.0], [0.0, 0.0, 1.0]],dtype=np.float64)
lena = cv.imread(self.find_file("viz/lena.png"))
lena_gray = cv.cvtColor(lena, cv.COLOR_BGR2GRAY)
poses = []
for i in range(2):
pose = (5 * np.sin(3.14 + 2.7 + i*60 * np.pi/180), 2 - i*1.5, 5 * np.cos(3.14 + 2.7 + i*60 * np.pi/180))
poses.append(cv.viz.makeCameraPose(pose, (0.0, 0.0, 0.0), (0.0, -0.1, 0.0)))
viz = cv.viz_Viz3d("show_camera_positions")
viz.showWidget("sphe", cv.viz_WSphere((0,0,0), 1.0, 10, cv.viz_Color().orange_red()))
viz.showWidget("coos", cv.viz_WCoordinateSystem(1.5))
viz.showWidget("pos1", cv.viz_WCameraPosition(0.75), poses[0])
viz.showWidget("pos2", cv.viz_WCameraPosition((0.78, 0.78), lena, 2.2, cv.viz_Color().green()), poses[0])
viz.showWidget("pos3", cv.viz_WCameraPosition(0.75), poses[0])
viz.showWidget("pos4", cv.viz_WCameraPosition(K, lena_gray, 3, cv.viz_Color().indigo()), poses[1])
viz.showWidget("text2d", cv.viz_WText("Camera positions with images", (20, 20), 20, cv.viz_Color().green()))
viz.spinOnce(500, True)
"""
TEST(Viz, show_widget_merger)
{
WWidgetMerger merger;
merger.addWidget(WCube(Vec3d::all(0.0), Vec3d::all(1.0), true, Color::gold()));
RNG& rng = theRNG();
for(int i = 0; i < 77; ++i)
{
Vec3b c;
rng.fill(c, RNG::NORMAL, Scalar::all(128), Scalar::all(48), true);
merger.addWidget(WSphere(Vec3d(c)*(1.0/255.0), 7.0/255.0, 10, Color(c[2], c[1], c[0])));
}
merger.finalize();
Viz3d viz("show_mesh_random_color");
viz.showWidget("coo", WCoordinateSystem());
viz.showWidget("merger", merger);
viz.showWidget("text2d", WText("Widget merger", Point(20, 20), 20, Color::green()));
viz.spinOnce(500, true);
}
"""
if __name__ == '__main__':
NewOpenCVTests.bootstrap()
|
tests/test_product.py
|
ExtraE113/dj-stripe
| 937 |
107769
|
<filename>tests/test_product.py<gh_stars>100-1000
"""
dj-stripe Product model tests
"""
from copy import deepcopy
import pytest
import stripe
from djstripe.models import Product
from djstripe.models.core import Price
from . import (
FAKE_FILEUPLOAD_ICON,
FAKE_PLATFORM_ACCOUNT,
FAKE_PRICE,
FAKE_PRICE_METERED,
FAKE_PRICE_ONETIME,
FAKE_PRICE_TIER,
FAKE_PRODUCT,
)
pytestmark = pytest.mark.django_db
class TestProduct:
#
# Helper Methods for monkeypatching
#
def mock_file_retrieve(*args, **kwargs):
return deepcopy(FAKE_FILEUPLOAD_ICON)
def mock_account_retrieve(*args, **kwargs):
return deepcopy(FAKE_PLATFORM_ACCOUNT)
def mock_product_get(self, *args, **kwargs):
return deepcopy(FAKE_PRODUCT)
@pytest.mark.parametrize("count", [1, 2, 3])
def test___str__(self, count, monkeypatch):
def mock_price_get(*args, **kwargs):
return random_price_data
# monkeypatch stripe.Product.retrieve and stripe.Price.retrieve calls to return
# the desired json response.
monkeypatch.setattr(stripe.Product, "retrieve", self.mock_product_get)
monkeypatch.setattr(stripe.Price, "retrieve", mock_price_get)
product = Product.sync_from_stripe_data(deepcopy(FAKE_PRODUCT))
PRICE_DATA_OPTIONS = [
deepcopy(FAKE_PRICE),
deepcopy(FAKE_PRICE_TIER),
deepcopy(FAKE_PRICE_METERED),
deepcopy(FAKE_PRICE_ONETIME),
]
for _ in range(count):
random_price_data = PRICE_DATA_OPTIONS.pop()
price = Price.sync_from_stripe_data(random_price_data)
if count > 1:
assert f"{FAKE_PRODUCT['name']} ({count} prices)" == str(product)
else:
assert f"{FAKE_PRODUCT['name']} ({price.human_readable_price})" == str(
product
)
def test_sync_from_stripe_data(self, monkeypatch):
# monkeypatch stripe.Product.retrieve call to return
# the desired json response.
monkeypatch.setattr(stripe.Product, "retrieve", self.mock_product_get)
product = Product.sync_from_stripe_data(deepcopy(FAKE_PRODUCT))
assert product.id == FAKE_PRODUCT["id"]
assert product.name == FAKE_PRODUCT["name"]
assert product.type == FAKE_PRODUCT["type"]
|
recipes/Python/114579_remotely_exit_XMLRPC_Server/recipe-114579.py
|
tdiprima/code
| 2,023 |
107847
|
<gh_stars>1000+
from SimpleXMLRPCServer import *
class MyServer(SimpleXMLRPCServer):
def serve_forever(self):
self.quit = 0
while not self.quit:
self.handle_request()
def kill():
server.quit = 1
return 1
server = MyServer(('127.0.0.1', 8000))
server.register_function(kill)
server.serve_forever()
|
py/moma/utils/ik_solver.py
|
wx-b/dm_robotics
| 128 |
107881
|
<reponame>wx-b/dm_robotics
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IK solver for initialization of robot arms."""
import copy
from typing import List, NamedTuple, Optional, Sequence, Union
from absl import logging
from dm_control import mjcf
from dm_control.mujoco.wrapper import mjbindings
from dm_control.mujoco.wrapper.mjbindings.enums import mjtObj
from dm_robotics.controllers import cartesian_6d_to_joint_velocity_mapper
from dm_robotics.geometry import geometry
from dm_robotics.geometry import mujoco_physics
from dm_robotics.transformations import transformations as tr
import numpy as np
# Default value for the nullspace gain parameter.
_NULLSPACE_GAIN = 0.4
# Gain for the linear and angular twist computation, these values should always
# be between 0 and 1. 0 corresponds to not move and 1 corresponds to move to the
# target in a single integration timestep.
_LINEAR_VELOCITY_GAIN = 0.95
_ANGULAR_VELOCITY_GAIN = 0.95
# Integration timestep used when solving the IK.
_INTEGRATION_TIMESTEP_SEC = 1.0
# At each step of the solve, we measure how much the tracked element
# translated (linear progress) and rotated (angular progress). We compare this
# progress to the total linear and angular error and if not enough progress is
# made stop the solve before the maximum number of steps is reached.
_ERR_TO_PROGRESS_THRESHOLD = 20.0
### ---------------PARAMETERS USED FOR THE QP MAPPER: START----------------- ###
# Regularisation parameter used by the qp to compute joint velocities.
_REGULARIZATION_WEIGHT = 0.01
# Ensure that the joint limits are respected.
_ENABLE_JOINT_POSITION_LIMITS = True
# Gain that scales the joint velocities down when close to the joint limits.
_JOINT_POSITION_LIMIT_VELOCITY_SCALE = 1.0
# The minimal distance to joint limits the IK solution can have.
_MINIMUM_DISTANCE_FROM_JOINT_POSITION_LIMIT = 0.0
# Maximum number of iteration to find a joint velocity command that applies the
# desired twist to the element.
_MAX_CARTESIAN_VELOCITY_CONTROL_ITERATIONS = 300
# Number of iterations for the nullspace control problem.
_MAX_NULLSPACE_CONTROL_ITERATIONS = 300
# Maximum error allowed for the nullspace problem.
_NULLSPACE_PROJECTION_SLACK = 1e-5
# Maximum error allowed between the requested twist command and the actual one.
_SOLUTION_TOLERANCE = 1e-4
# Remove the logging when the nullspace cannot find a solution as this
# clutters the logging.
_LOG_NULLSPACE_FAILURE_WARNINGS = False
### -----------------PARAMETERS USED FOR THE QP MAPPER: END----------------- ###
_Binding = Union[mjcf.physics.Binding, mjcf.physics._EmptyBinding] # pylint: disable=protected-access
_MjcfElement = mjcf.element._ElementImpl # pylint: disable=protected-access
class _Solution(NamedTuple):
"""Return value of an ik solution.
Attributes:
qpos: The joint configuration.
linear_err: The linear error between the target pose and desired pose.
angular_err: The angular error between the target pose and desired pose.
"""
qpos: np.ndarray
linear_err: float
angular_err: float
class IkSolver():
"""Inverse kinematics solver.
This class computes a joint configuration that brings an element to a certain
pose.
"""
# The cartesian velocity controller used to solve the IK.
_qp_mapper: cartesian_6d_to_joint_velocity_mapper.Mapper
# Array of indices that sorts the joints in ascending order. The qp_mapper
# returns values in joint-ID ascending order which could be different than
# the order of the joints provided by the user.
_joints_argsort: List[int]
# The desired joint configuration that is set as the nullspace goal. This
# corresponds to the mid-range of each joint. The user can override this
# reference configuration in the `solve` method.
_nullspace_joint_position_reference: List[float]
def __init__(
self,
model: mjcf.RootElement,
controllable_joints: List[_MjcfElement],
element: _MjcfElement,
nullspace_gain: float = _NULLSPACE_GAIN,
):
"""Constructor.
Args:
model: The MJCF model root.
controllable_joints: The joints that can be controlled to achieve
the desired target pose. Only 1 DoF joints are supported.
element: The MJCF element that is being placed by the inverse kinematics
solver. Only body, geoms, and sites are supported
nullspace_gain: Scales the nullspace velocity bias. If the gain is set to
0, there will be no nullspace optimization during the solve process.
"""
self._physics = mjcf.Physics.from_mjcf_model(model)
self._geometry_physics = mujoco_physics.wrap(self._physics)
self._joints_binding = _binding(self._physics, controllable_joints)
self._num_joints = len(controllable_joints)
self._element = element
self._nullspace_gain = nullspace_gain
self._create_qp_mapper()
def solve(self,
ref_pose: geometry.Pose,
linear_tol: float = 1e-3,
angular_tol: float = 1e-3,
max_steps: int = 100,
early_stop: bool = False,
num_attempts: int = 30,
stop_on_first_successful_attempt: bool = False,
inital_joint_configuration: Optional[np.ndarray] = None,
nullspace_reference: Optional[np.ndarray] = None
) -> Optional[np.ndarray]:
"""Attempts to solve the inverse kinematics.
This method computes joint configuration that solves the inverse kinematics
problem. Returns None if no solution is found. If multiple solutions are
found, the solver will return the one where the joints are closer to the
`nullspace_reference`. If none is provided uses the center of the joint
ranges
Args:
ref_pose: Target pose of the controlled element, it must be
in the world frame.
linear_tol: The linear tolerance, in meters, that determines if the
solution found is valid.
angular_tol: The angular tolerance, in radians, to determine if the
solution found is valid.
max_steps: Maximum number of integration steps that can be used. The
larger the number of steps the more likely it is a solution will be
found but a larger number of steps increases computation time.
early_stop: If true, stops the attempt as soon as the configuration is
within the linear and angular tolerances. If false, it will always run
`max_steps` iterations per attempt and return the last configuration.
num_attempts: The number of different attempts the solver should do.
For a given target pose, there exists an infinite number of possible
solutions, having more attempts allows to compare different joint
configurations. The solver will return the solution where the joints are
closer to the `nullspace_reference`. Note that not all attempts
are successful, and thus, having more attempts gives better chances of
finding a correct solution.
stop_on_first_successful_attempt: If true, the method will return the
first solution that meets the tolerance criteria. If false, returns the
solution where the joints are closer the center of their respective
range.
inital_joint_configuration: A joint configuration that will be used for
the first attempt. This can be useful in the case of a complex pose,
a user could provide the initial guess that is close to the desired
solution. If None, all the joints will be set to 0 for the first
attempt.
nullspace_reference: The desired joint configuration. When the controlled
element is in the desired pose, the solver will try and bring the joint
configuration closer to the nullspace reference without moving the
element. If no nullspace reference is provided, the center of the joint
ranges is used as reference.
Returns:
If a solution is found, returns the corresponding joint configuration.
If the inverse kinematics failed, returns None.
Raises:
ValueError: If the `nullspace_reference` does not have the correct length.
ValueError: If the `inital_joint_configuration` does not have the correct
length.
"""
nullspace_reference = (
nullspace_reference or self._nullspace_joint_position_reference)
if len(nullspace_reference) != self._num_joints:
raise ValueError(
'The provided nullspace reference does not have the right number of '
f'elements expected length of {self._num_joints}.'
f' Got {nullspace_reference}')
if inital_joint_configuration is not None:
if len(inital_joint_configuration) != self._num_joints:
raise ValueError(
'The provided inital joint configuration does not have the right '
f'number of elements expected length of {self._num_joints}.'
f' Got {inital_joint_configuration}')
inital_joint_configuration = inital_joint_configuration or np.zeros(
self._num_joints)
nullspace_jnt_qpos_min_err = np.inf
sol_qpos = None
success = False
# Each iteration of this loop attempts to solve the inverse kinematics.
# If a solution is found, it is compared to previous solutions.
for attempt in range(num_attempts):
# Use the user provided joint configuration for the first attempt.
if attempt == 0:
self._joints_binding.qpos[:] = inital_joint_configuration
else:
# Randomize the initial joint configuration so that the IK can find
# different solutions.
qpos_new = np.random.uniform(
self._joints_binding.range[:, 0], self._joints_binding.range[:, 1])
self._joints_binding.qpos[:] = qpos_new
# Solve the IK.
joint_qpos, linear_err, angular_err = self._solve_ik(
ref_pose, linear_tol, angular_tol, max_steps,
early_stop, nullspace_reference)
# Check if the attempt was successful. The solution is saved if the joints
# are closer to the nullspace reference.
if (linear_err <= linear_tol and angular_err <= angular_tol):
success = True
nullspace_jnt_qpos_err = np.linalg.norm(
joint_qpos - nullspace_reference)
if nullspace_jnt_qpos_err < nullspace_jnt_qpos_min_err:
nullspace_jnt_qpos_min_err = nullspace_jnt_qpos_err
sol_qpos = joint_qpos
if success and stop_on_first_successful_attempt:
break
if not success:
logging.warning('Unable to solve the inverse kinematics for ref_pose: '
'%s', ref_pose)
return sol_qpos
def _create_qp_mapper(self):
"""Instantiates the cartesian velocity controller used by the ik solver."""
qp_params = cartesian_6d_to_joint_velocity_mapper.Parameters()
qp_params.model = self._physics.model
qp_params.joint_ids = self._joints_binding.jntid
qp_params.object_type = _get_element_type(self._element)
qp_params.object_name = self._element.full_identifier
qp_params.integration_timestep = _INTEGRATION_TIMESTEP_SEC
qp_params.enable_joint_position_limits = _ENABLE_JOINT_POSITION_LIMITS
qp_params.joint_position_limit_velocity_scale = (
_JOINT_POSITION_LIMIT_VELOCITY_SCALE)
qp_params.minimum_distance_from_joint_position_limit = (
_MINIMUM_DISTANCE_FROM_JOINT_POSITION_LIMIT)
qp_params.regularization_weight = _REGULARIZATION_WEIGHT
qp_params.max_cartesian_velocity_control_iterations = (
_MAX_CARTESIAN_VELOCITY_CONTROL_ITERATIONS)
if self._nullspace_gain > 0:
qp_params.enable_nullspace_control = True
else:
qp_params.enable_nullspace_control = False
qp_params.max_nullspace_control_iterations = (
_MAX_NULLSPACE_CONTROL_ITERATIONS)
qp_params.nullspace_projection_slack = _NULLSPACE_PROJECTION_SLACK
qp_params.solution_tolerance = _SOLUTION_TOLERANCE
qp_params.log_nullspace_failure_warnings = _LOG_NULLSPACE_FAILURE_WARNINGS
self._qp_mapper = cartesian_6d_to_joint_velocity_mapper.Mapper(qp_params)
self._joints_argsort = np.argsort(self._joints_binding.jntid)
self._nullspace_joint_position_reference = 0.5 * np.sum(
self._joints_binding.range, axis=1)
def _solve_ik(self,
ref_pose: geometry.Pose,
linear_tol: float,
angular_tol: float,
max_steps: int,
early_stop: bool,
nullspace_reference: np.ndarray
) -> _Solution:
"""Finds a joint configuration that brings element pose to target pose."""
cur_frame = geometry.PoseStamped(pose=None, frame=self._element)
linear_err = np.inf
angular_err = np.inf
cur_pose = cur_frame.get_world_pose(self._geometry_physics)
previous_pose = copy.copy(cur_pose)
# Each iteration of this loop attempts to reduce the error between the
# element's pose and the target pose.
for _ in range(max_steps):
# Find the twist that will bring the element's pose closer to the desired
# one.
twist = _compute_twist(
cur_pose, ref_pose, _LINEAR_VELOCITY_GAIN,
_ANGULAR_VELOCITY_GAIN, _INTEGRATION_TIMESTEP_SEC)
# Computes the joint velocities to achieve the desired twist.
# The joint velocity vector passed to mujoco's integration
# needs to have a value for all the joints in the model. The velocity
# for all the joints that are not controlled is set to 0.
qdot_sol = np.zeros(self._physics.model.nv)
joint_vel = self._compute_joint_velocities(
twist.full, nullspace_reference)
# If we are unable to compute joint velocities we stop the iteration
# as the solver is stuck and cannot make any more progress.
if joint_vel is not None:
qdot_sol[self._joints_binding.dofadr] = joint_vel
else:
break
# The velocity vector is passed to mujoco to be integrated.
mjbindings.mjlib.mj_integratePos(
self._physics.model.ptr, self._physics.data.qpos,
qdot_sol, _INTEGRATION_TIMESTEP_SEC)
self._update_physics_data()
# Get the distance and the angle between the current pose and the
# target pose.
cur_pose = cur_frame.get_world_pose(self._geometry_physics)
linear_err = np.linalg.norm(ref_pose.position - cur_pose.position)
angular_err = np.linalg.norm(_get_orientation_error(
ref_pose.quaternion, cur_pose.quaternion))
# Stop if the pose is close enough to the target pose.
if (early_stop and
linear_err <= linear_tol and angular_err <= angular_tol):
break
# We measure the progress made during this step. If the error is not
# reduced fast enough the solve is stopped to save computation time.
linear_change = np.linalg.norm(
cur_pose.position - previous_pose.position)
angular_change = np.linalg.norm(_get_orientation_error(
cur_pose.quaternion, previous_pose.quaternion))
if (linear_err / (linear_change + 1e-10) > _ERR_TO_PROGRESS_THRESHOLD and
angular_err / (angular_change + 1e-10) > _ERR_TO_PROGRESS_THRESHOLD):
break
previous_pose = copy.copy(cur_pose)
qpos = np.array(self._joints_binding.qpos)
return _Solution(qpos=qpos, linear_err=linear_err, angular_err=angular_err)
def _compute_joint_velocities(
self, cartesian_6d_target: np.ndarray, nullspace_reference: np.ndarray
) -> Optional[np.ndarray]:
"""Maps a Cartesian 6D target velocity to joint velocities.
Args:
cartesian_6d_target: array of size 6 describing the desired 6 DoF
Cartesian target [(lin_vel), (ang_vel)]. Must be expressed about the
element's origin in the world orientation.
nullspace_reference: The desired joint configuration used to compute
the nullspace bias.
Returns:
Computed joint velocities in the same order as the `joints` sequence
passed during construction. If a solution could not be found,
returns None.
"""
joint_velocities = np.empty(self._num_joints)
nullspace_bias = None
if self._nullspace_gain > 0:
nullspace_bias = self._nullspace_gain * (
nullspace_reference
- self._joints_binding.qpos) / _INTEGRATION_TIMESTEP_SEC
# Sort nullspace_bias by joint ID, ascending. The QP requires this.
nullspace_bias = nullspace_bias[self._joints_argsort]
# Compute joint velocities. The Python bindings throw an exception whenever
# the mapper fails to find a solution, in which case we return None.
# We need to catch a general exception because the StatusOr->Exception
# conversion can result in a wide variety of different exceptions.
try:
# Reorder the joint velocities to be in the same order as the joints
# sequence. The QP returns joints by ascending joint ID which could be
# different.
joint_velocities[self._joints_argsort] = np.array(
self._qp_mapper.compute_joint_velocities(
self._physics.data, cartesian_6d_target.tolist(), nullspace_bias))
except Exception: # pylint: disable=broad-except
joint_velocities = None
logging.warning('Failed to compute joint velocities, returning None.')
return joint_velocities
def _update_physics_data(self):
"""Updates the physics data following the integration of the velocities."""
# Clip joint positions; the integration done previously can make joints
# out of range.
qpos = self._joints_binding.qpos
min_range = self._joints_binding.range[:, 0]
max_range = self._joints_binding.range[:, 1]
qpos = np.clip(qpos, min_range, max_range)
self._joints_binding.qpos[:] = qpos
# Forward kinematics to update the pose of the tracked element.
mjbindings.mjlib.mj_normalizeQuat(
self._physics.model.ptr, self._physics.data.qpos)
mjbindings.mjlib.mj_kinematics(
self._physics.model.ptr, self._physics.data.ptr)
mjbindings.mjlib.mj_comPos(self._physics.model.ptr, self._physics.data.ptr)
def _get_element_type(element: _MjcfElement):
"""Returns the MuJoCo enum corresponding to the element type."""
if element.tag == 'body':
return mjtObj.mjOBJ_BODY
elif element.tag == 'geom':
return mjtObj.mjOBJ_GEOM
elif element.tag == 'site':
return mjtObj.mjOBJ_SITE
else:
raise ValueError('Element must be a MuJoCo body, geom, or site. Got '
f'[{element.tag}].')
def _binding(physics: mjcf.Physics,
elements: Union[Sequence[mjcf.Element], mjcf.Element]
) -> _Binding:
"""Binds the elements with physics and returns a non optional object."""
physics_elements = physics.bind(elements)
if physics_elements is None:
raise ValueError(f'Calling physics.bind with {elements} returns None.')
return physics_elements
def _get_orientation_error(
to_quat: np.ndarray, from_quat: np.ndarray) -> np.ndarray:
"""Returns error between the two quaternions."""
err_quat = tr.quat_diff_active(from_quat, to_quat)
return tr.quat_to_axisangle(err_quat)
def _compute_twist(init_pose: geometry.Pose,
ref_pose: geometry.Pose,
linear_velocity_gain: float,
angular_velocity_gain: float,
control_timestep_seconds: float,
) -> geometry.Twist:
"""Returns the twist to apply to the end effector to reach ref_pose.
This function returns the twist that moves init_pose closer to ref_pose.
Both poses need to be expressed in the same frame. The returned twist is
expressed in the frame located at the initial pose and with the same
orientation as the world frame.
Args:
init_pose: The inital pose.
ref_pose: The target pose that we want to reach.
linear_velocity_gain: Scales the linear velocity. The value should be
between 0 and 1. A value of 0 corresponds to not moving. A value of 1
corresponds to moving from the inital pose to the target pose in a
single timestep.
angular_velocity_gain: Scales the angualr velocity. The value should be
between 0 and 1. A value of 0 corresponds to not rotating. A value of 1
corresponds to rotating from the inital pose to the target pose in a
single timestep.
control_timestep_seconds: Duration of the control timestep. The outputed
twist is intended to be used over that duration.
Returns:
The twist to be applied to `init_pose` to move in closer to `ref_pose`.
The twist is expressed in the frame located at `init_pose` and with the
same orientation as the world frame.
"""
position_error = ref_pose.position - init_pose.position
orientation_error = _get_orientation_error(
from_quat=init_pose.quaternion, to_quat=ref_pose.quaternion)
linear = linear_velocity_gain * position_error / control_timestep_seconds
angular = angular_velocity_gain * orientation_error / control_timestep_seconds
return geometry.Twist(np.concatenate((linear, angular)))
|
tests/java/org/python/indexer/data/pkgload.py
|
jeff5/jython-whinchat
| 577 |
107888
|
<reponame>jeff5/jython-whinchat
# test loading a package by dirname
import pkg
|
plenum/test/node_request/message_request/test_node_requests_missing_preprepares_and_prepares.py
|
jandayanan/indy-plenum
| 148 |
107898
|
<filename>plenum/test/node_request/message_request/test_node_requests_missing_preprepares_and_prepares.py<gh_stars>100-1000
from plenum.server.consensus.message_request.message_req_service import MessageReqService
from plenum.server.consensus.ordering_service import OrderingService
from plenum.test.delayers import delay_3pc
from plenum.test.node_request.message_request.helper import \
check_pp_out_of_sync
from plenum.test.stasher import delay_rules_without_processing
from plenum.test.waits import expectedPoolGetReadyTimeout
from stp_core.common.log import getlogger
from plenum.test.node_catchup.helper import waitNodeDataEquality
from plenum.test.helper import sdk_send_random_requests, sdk_send_random_and_check
from stp_core.loop.eventually import eventually
logger = getlogger()
nodeCount = 4
def test_node_requests_missing_preprepares_and_prepares(
looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle,
tconf, tdir, allPluginsPath):
"""
2 of 4 nodes go down (simulate this by dropping requests), so pool can not process any more incoming requests.
A new request comes in. After a while those 2 nodes come back alive.
Another request comes in. Check that previously disconnected two nodes
request missing PREPREPARES and PREPARES and the pool successfully handles
both transactions after that.
"""
INIT_REQS_CNT = 5
MISSING_REQS_CNT = 4
REQS_AFTER_RECONNECT_CNT = 1
disconnected_nodes = txnPoolNodeSet[2:]
alive_nodes = txnPoolNodeSet[:2]
disconnected_nodes_stashers = [n.nodeIbStasher for n in disconnected_nodes]
sdk_send_random_and_check(looper,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client,
INIT_REQS_CNT)
init_ledger_size = txnPoolNodeSet[0].domainLedger.size
with delay_rules_without_processing(disconnected_nodes_stashers, delay_3pc()):
sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, MISSING_REQS_CNT)
last_ordered_key = txnPoolNodeSet[0].master_replica.last_ordered_3pc
looper.run(eventually(check_pp_out_of_sync,
alive_nodes,
disconnected_nodes,
last_ordered_key,
retryWait=1,
timeout=expectedPoolGetReadyTimeout(len(txnPoolNodeSet))))
for node in txnPoolNodeSet:
assert node.domainLedger.size == init_ledger_size
for node in disconnected_nodes:
assert node.master_replica._ordering_service.spylog.count(OrderingService._request_pre_prepare) == 0
assert node.master_replica._ordering_service.spylog.count(OrderingService._request_prepare) == 0
assert node.master_replica._message_req_service.spylog.count(MessageReqService.process_message_rep) == 0
sdk_send_random_and_check(looper,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client,
REQS_AFTER_RECONNECT_CNT)
waitNodeDataEquality(looper, disconnected_nodes[0], *txnPoolNodeSet[:-1])
for node in disconnected_nodes:
assert node.master_replica._ordering_service.spylog.count(OrderingService._request_pre_prepare) > 0
assert node.master_replica._ordering_service.spylog.count(OrderingService._request_prepare) > 0
assert node.master_replica._message_req_service.spylog.count(MessageReqService.process_message_rep) > 0
def check_all_ordered():
for node in txnPoolNodeSet:
assert node.domainLedger.size == (init_ledger_size +
MISSING_REQS_CNT +
REQS_AFTER_RECONNECT_CNT)
looper.run(eventually(check_all_ordered, timeout=20))
|
src/EKF.py
|
noskill/JRMOT_ROS
| 112 |
107908
|
# vim: expandtab:ts=4:sw=4
import numpy as np
import scipy.linalg
import pdb
"""
Table for the 0.95 quantile of the chi-square distribution with N degrees of
freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv
function and used as Mahalanobis gating threshold.
"""
chi2inv95 = {
1: 3.8415,
2: 5.9915,
3: 7.8147,
4: 9.4877,
5: 11.070,
6: 12.592,
7: 14.067,
8: 15.507,
9: 16.919}
chi2inv90 = {
1: 2.706,
2: 4.605,
3: 6.251,
4: 7.779,
5: 9.236,
6: 10.645,
7: 12.017,
8: 13.363,
9: 14.684}
chi2inv975 = {
1: 5.025,
2: 7.378,
3: 9.348,
4: 11.143,
5: 12.833,
6: 14.449,
7: 16.013,
8: 17.535,
9: 19.023}
chi2inv10 = {
1: .016,
2: .221,
3: .584,
4: 1.064,
5: 1.610,
6: 2.204,
7: 2.833,
8: 3.490,
9: 4.168}
chi2inv995 = {
1: 0.0000393,
2: 0.0100,
3: .0717,
4: .207,
5: .412,
6: .676,
7: .989,
8: 1.344,
9: 1.735}
chi2inv75 = {
1: 1.323,
2: 2.773,
3: 4.108,
4: 5.385,
5: 6.626,
6: 7.841,
7: 9.037,
8: 10.22,
9: 11.39}
def squared_mahalanobis_distance(mean, covariance, measurements):
# cholesky factorization used to solve for
# z = d * inv(covariance)
# so z is also the solution to
# covariance * z = d
d = measurements - mean
# cholesky_factor = np.linalg.cholesky(covariance)
# z = scipy.linalg.solve_triangular(
# cholesky_factor, d.T, lower=True, check_finite=False,
# overwrite_b=True)
squared_maha = np.linalg.multi_dot([d, np.linalg.inv(covariance),
d.T]).diagonal()
return squared_maha
class EKF(object):
"""
Generic extended kalman filter class
"""
def __init__(self):
pass
def initiate(self, measurement):
"""Create track from unassociated measurement.
Parameters
----------
measurement : ndarray
Returns
-------
(ndarray, ndarray)
Returns the mean vector and covariance matrix of the new track.
Unobserved velocities are initialized to 0 mean.
"""
pass
def predict_mean(self, mean):
# Updates predicted state from previous state (function g)
# Calculates motion update Jacobian (Gt)
# Returns (g(mean), Gt)
pass
def get_process_noise(self, mean, covariance):
# Returns Rt the motion noise covariance
pass
def predict_covariance(self, mean, covariance):
pass
def project_mean(self, mean):
# Measurement prediction from state (function h)
# Calculations sensor update Jacobian (Ht)
# Returns (h(mean), Ht)
pass
def project_cov(self, mean, covariance):
pass
def predict(self, mean, covariance):
"""Run Kalman filter prediction step.
Parameters
----------
mean : ndarray
The mean vector of the object state at the previous
time step.
covariance : ndarray
The covariance matrix of the object state at the
previous time step.
Returns
-------
(ndarray, ndarray)
Returns the mean vector and covariance matrix of the predicted
state. Unobserved velocities are initialized to 0 mean.
"""
# Perform prediction
covariance = self.predict_covariance(mean, covariance)
mean = self.predict_mean(mean)
return mean, covariance
def get_innovation_cov(self, covariance):
pass
def project(self, mean, covariance):
"""Project state distribution to measurement space.
Parameters
----------
mean : ndarray
The state's mean vector
covariance : ndarray
The state's covariance matrix
Returns
-------
(ndarray, ndarray)
Returns the projected mean and covariance matrix of the given state
estimate.
"""
# Measurement uncertainty scaled by estimated height
return self.project_mean(mean), self.project_cov(mean, covariance)
def update(self, mean, covariance, measurement_t, marginalization=None, JPDA=False):
"""Run Kalman filter correction step.
Parameters
----------
mean : ndarray
The predicted state's mean vector (8 dimensional).
covariance : ndarray
The state's covariance matrix (8x8 dimensional).
measurement : ndarray
The 4 dimensional measurement vector (x, y, a, h), where (x, y)
is the center position, a the aspect ratio, and h the height of the
bounding box.
Returns
-------
(ndarray, ndarray)
Returns the measurement-corrected state distribution.
"""
predicted_measurement, innovation_cov = self.project(mean, covariance)
# cholesky factorization used to solve for kalman gain since
# K = covariance * update_mat.T * inv(innovation_cov)
# so K is also the solution to
# innovation_cov * K = covariance * update_mat.T
try:
chol_factor, lower = scipy.linalg.cho_factor(
innovation_cov, lower=True, check_finite=False)
kalman_gain = scipy.linalg.cho_solve(
(chol_factor, lower), np.dot(covariance, self._observation_mat.T).T,
check_finite=False).T
except:
# in case cholesky factorization fails, revert to standard solver
kalman_gain = np.linalg.solve(innovation_cov, np.dot(covariance, self._observation_mat.T).T).T
if JPDA:
# marginalization
innovation = np.zeros((self.ndim))
cov_soft = np.zeros((self.ndim, self.ndim))
for measurement_idx, measurement in enumerate(measurement_t):
p_ij = marginalization[measurement_idx + 1] # + 1 for dummy
y_ij = measurement - predicted_measurement
innovation += y_ij * p_ij
cov_soft += p_ij * np.outer(y_ij, y_ij)
cov_soft = cov_soft - np.outer(innovation, innovation)
P_star = covariance - np.linalg.multi_dot((
kalman_gain, innovation_cov, kalman_gain.T))
p_0 = marginalization[0]
P_0 = p_0 * covariance + (1 - p_0) * P_star
new_covariance = P_0 + np.linalg.multi_dot((kalman_gain, cov_soft, kalman_gain.T))
else:
innovation = measurement_t - predicted_measurement
new_covariance = covariance - np.linalg.multi_dot((
kalman_gain, innovation_cov, kalman_gain.T))
new_mean = mean + np.dot(innovation, kalman_gain.T)
return new_mean, new_covariance
|
anymail/message.py
|
bhumikapahariapuresoftware/django-anymail
| 1,324 |
107916
|
from email.mime.image import MIMEImage
from email.utils import unquote
from pathlib import Path
from django.core.mail import EmailMessage, EmailMultiAlternatives, make_msgid
from .utils import UNSET
class AnymailMessageMixin(EmailMessage):
"""Mixin for EmailMessage that exposes Anymail features.
Use of this mixin is optional. You can always just set Anymail
attributes on any EmailMessage.
(The mixin can be helpful with type checkers and other development
tools that complain about accessing Anymail's added attributes
on a regular EmailMessage.)
"""
def __init__(self, *args, **kwargs):
self.esp_extra = kwargs.pop('esp_extra', UNSET)
self.envelope_sender = kwargs.pop('envelope_sender', UNSET)
self.metadata = kwargs.pop('metadata', UNSET)
self.send_at = kwargs.pop('send_at', UNSET)
self.tags = kwargs.pop('tags', UNSET)
self.track_clicks = kwargs.pop('track_clicks', UNSET)
self.track_opens = kwargs.pop('track_opens', UNSET)
self.template_id = kwargs.pop('template_id', UNSET)
self.merge_data = kwargs.pop('merge_data', UNSET)
self.merge_global_data = kwargs.pop('merge_global_data', UNSET)
self.merge_metadata = kwargs.pop('merge_metadata', UNSET)
self.anymail_status = AnymailStatus()
super().__init__(*args, **kwargs)
def attach_inline_image_file(self, path, subtype=None, idstring="img", domain=None):
"""Add inline image from file path to an EmailMessage, and return its content id"""
assert isinstance(self, EmailMessage)
return attach_inline_image_file(self, path, subtype, idstring, domain)
def attach_inline_image(self, content, filename=None, subtype=None, idstring="img", domain=None):
"""Add inline image and return its content id"""
assert isinstance(self, EmailMessage)
return attach_inline_image(self, content, filename, subtype, idstring, domain)
class AnymailMessage(AnymailMessageMixin, EmailMultiAlternatives):
pass
def attach_inline_image_file(message, path, subtype=None, idstring="img", domain=None):
"""Add inline image from file path to an EmailMessage, and return its content id"""
pathobj = Path(path)
filename = pathobj.name
content = pathobj.read_bytes()
return attach_inline_image(message, content, filename, subtype, idstring, domain)
def attach_inline_image(message, content, filename=None, subtype=None, idstring="img", domain=None):
"""Add inline image to an EmailMessage, and return its content id"""
if domain is None:
# Avoid defaulting to hostname that might end in '.com', because some ESPs
# use Content-ID as filename, and Gmail blocks filenames ending in '.com'.
domain = 'inline' # valid domain for a msgid; will never be a real TLD
content_id = make_msgid(idstring, domain) # Content ID per RFC 2045 section 7 (with <...>)
image = MIMEImage(content, subtype)
image.add_header('Content-Disposition', 'inline', filename=filename)
image.add_header('Content-ID', content_id)
message.attach(image)
return unquote(content_id) # Without <...>, for use as the <img> tag src
ANYMAIL_STATUSES = [
'sent', # the ESP has sent the message (though it may or may not get delivered)
'queued', # the ESP will try to send the message later
'invalid', # the recipient email was not valid
'rejected', # the recipient is blacklisted
'failed', # the attempt to send failed for some other reason
'unknown', # anything else
]
class AnymailRecipientStatus:
"""Information about an EmailMessage's send status for a single recipient"""
def __init__(self, message_id, status):
try:
# message_id must be something that can be put in a set
# (see AnymailStatus.set_recipient_status)
set([message_id])
except TypeError:
raise TypeError("Invalid message_id %r is not scalar type" % message_id)
if status is not None and status not in ANYMAIL_STATUSES:
raise ValueError("Invalid status %r" % status)
self.message_id = message_id # ESP message id
self.status = status # one of ANYMAIL_STATUSES, or None for not yet sent to ESP
def __repr__(self):
return "AnymailRecipientStatus({message_id!r}, {status!r})".format(
message_id=self.message_id, status=self.status)
class AnymailStatus:
"""Information about an EmailMessage's send status for all recipients"""
def __init__(self):
self.message_id = None # set of ESP message ids across all recipients, or bare id if only one, or None
self.status = None # set of ANYMAIL_STATUSES across all recipients, or None for not yet sent to ESP
self.recipients = {} # per-recipient: { email: AnymailRecipientStatus, ... }
self.esp_response = None
def __repr__(self):
def _repr(o):
if isinstance(o, set):
# force sorted order, for reproducible testing
item_reprs = [repr(item) for item in sorted(o)]
return "{%s}" % ", ".join(item_reprs)
else:
return repr(o)
details = ["status={status}".format(status=_repr(self.status))]
if self.message_id:
details.append("message_id={message_id}".format(message_id=_repr(self.message_id)))
if self.recipients:
details.append("{num_recipients} recipients".format(num_recipients=len(self.recipients)))
return "AnymailStatus<{details}>".format(details=", ".join(details))
def set_recipient_status(self, recipients):
self.recipients.update(recipients)
recipient_statuses = self.recipients.values()
self.message_id = set([recipient.message_id for recipient in recipient_statuses])
if len(self.message_id) == 1:
self.message_id = self.message_id.pop() # de-set-ify if single message_id
self.status = set([recipient.status for recipient in recipient_statuses])
|
backend/api/views/import_export.py
|
skaghzz/doccano
| 3,989 |
107918
|
<filename>backend/api/views/import_export.py
from django.conf import settings
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
class Features(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request, *args, **kwargs):
return Response({
'cloud_upload': bool(settings.CLOUD_BROWSER_APACHE_LIBCLOUD_PROVIDER),
})
# class CloudUploadAPI(APIView):
# permission_classes = TextUploadAPI.permission_classes
#
# def get(self, request, *args, **kwargs):
# try:
# project_id = request.query_params['project_id']
# file_format = request.query_params['upload_format']
# cloud_container = request.query_params['container']
# cloud_object = request.query_params['object']
# except KeyError as ex:
# raise ValidationError('query parameter {} is missing'.format(ex))
#
# try:
# cloud_file = self.get_cloud_object_as_io(cloud_container, cloud_object)
# except ContainerDoesNotExistError:
# raise ValidationError('cloud container {} does not exist'.format(cloud_container))
# except ObjectDoesNotExistError:
# raise ValidationError('cloud object {} does not exist'.format(cloud_object))
#
# TextUploadAPI.save_file(
# user=request.user,
# file=cloud_file,
# file_format=file_format,
# project_id=project_id,
# )
#
# next_url = request.query_params.get('next')
#
# if next_url == 'about:blank':
# return Response(data='', content_type='text/plain', status=status.HTTP_201_CREATED)
#
# if next_url:
# return redirect(next_url)
#
# return Response(status=status.HTTP_201_CREATED)
#
# @classmethod
# def get_cloud_object_as_io(cls, container_name, object_name):
# provider = settings.CLOUD_BROWSER_APACHE_LIBCLOUD_PROVIDER.lower()
# account = settings.CLOUD_BROWSER_APACHE_LIBCLOUD_ACCOUNT
# key = settings.CLOUD_BROWSER_APACHE_LIBCLOUD_SECRET_KEY
#
# driver = get_driver(DriverType.STORAGE, provider)
# client = driver(account, key)
#
# cloud_container = client.get_container(container_name)
# cloud_object = cloud_container.get_object(object_name)
#
# return iterable_to_io(cloud_object.as_stream())
|
tests/unit/document/test_namedscore.py
|
fastflair/docarray
| 591 |
107935
|
<gh_stars>100-1000
import pytest
from docarray.score import NamedScore
@pytest.mark.parametrize(
'init_args', [None, dict(value=123, description='hello'), NamedScore()]
)
@pytest.mark.parametrize('copy', [True, False])
def test_construct_ns(init_args, copy):
NamedScore(init_args, copy)
|
release/stubs.min/Grasshopper/Kernel/Graphs.py
|
htlcnn/ironpython-stubs
| 182 |
107966
|
<gh_stars>100-1000
# encoding: utf-8
# module Grasshopper.Kernel.Graphs calls itself Graphs
# from Grasshopper,Version=1.0.0.20,Culture=neutral,PublicKeyToken=dda4f5ec2cd80803
# by generator 1.145
""" NamespaceTracker represent a CLS namespace. """
# no imports
# functions
def GH_GraphProxyObject(n_owner): # real signature unknown; restored from __doc__
""" GH_GraphProxyObject(n_owner: IGH_Graph) """
pass
# classes
class GH_AbstractGraph(object,IGH_Graph,GH_ISerializable):
# no doc
def AddGrip(self,*args):
""" AddGrip(self: GH_AbstractGraph,Grip: GH_GraphGrip) """
pass
def ClearCaches(self):
""" ClearCaches(self: GH_AbstractGraph) """
pass
def ClearGrips(self,*args):
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self,*args):
""" CreateDerivedDuplicate(self: GH_AbstractGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self,*args):
""" CreateGrips(self: GH_AbstractGraph) """
pass
def CurveToPointFArray(self,*args):
""" CurveToPointFArray(Crv: Curve,dest: RectangleF) -> Array[PointF] """
pass
def Draw_PostRenderGraph(self,g,cnt):
""" Draw_PostRenderGraph(self: GH_AbstractGraph,g: Graphics,cnt: GH_GraphContainer) """
pass
def Draw_PostRenderGrid(self,g,cnt):
""" Draw_PostRenderGrid(self: GH_AbstractGraph,g: Graphics,cnt: GH_GraphContainer) """
pass
def Draw_PostRenderGrip(self,g,cnt,index):
""" Draw_PostRenderGrip(self: GH_AbstractGraph,g: Graphics,cnt: GH_GraphContainer,index: int) """
pass
def Draw_PostRenderTags(self,g,cnt):
""" Draw_PostRenderTags(self: GH_AbstractGraph,g: Graphics,cnt: GH_GraphContainer) """
pass
def Draw_PreRenderGraph(self,g,cnt):
""" Draw_PreRenderGraph(self: GH_AbstractGraph,g: Graphics,cnt: GH_GraphContainer) -> GH_GraphDrawInstruction """
pass
def Draw_PreRenderGrid(self,g,cnt):
""" Draw_PreRenderGrid(self: GH_AbstractGraph,g: Graphics,cnt: GH_GraphContainer) -> GH_GraphDrawInstruction """
pass
def Draw_PreRenderGrip(self,g,cnt,index):
""" Draw_PreRenderGrip(self: GH_AbstractGraph,g: Graphics,cnt: GH_GraphContainer,index: int) -> GH_GraphDrawInstruction """
pass
def Draw_PreRenderTags(self,g,cnt):
""" Draw_PreRenderTags(self: GH_AbstractGraph,g: Graphics,cnt: GH_GraphContainer) -> GH_GraphDrawInstruction """
pass
def Duplicate(self):
""" Duplicate(self: GH_AbstractGraph) -> IGH_Graph """
pass
def EmitProxyObject(self):
""" EmitProxyObject(self: GH_AbstractGraph) -> IGH_GraphProxyObject """
pass
def GDI_GraphPath(self,reg):
""" GDI_GraphPath(self: GH_AbstractGraph,reg: RectangleF) -> Array[PointF] """
pass
def GHGraphToPointArray(self,*args):
"""
GHGraphToPointArray(reg: RectangleF,pix_accuracy: float,eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph,reg: RectangleF,pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self,*args):
""" Internal_GripChanged(self: GH_AbstractGraph,grip: GH_GraphGrip,bIntermediate: bool) """
pass
def IntersectionEvaluate(self,*args):
""" IntersectionEvaluate(C: Curve,offset: float) -> float """
pass
def OnGraphChanged(self,bIntermediate):
""" OnGraphChanged(self: GH_AbstractGraph,bIntermediate: bool) """
pass
def PrepareForUse(self):
""" PrepareForUse(self: GH_AbstractGraph) """
pass
def Read(self,reader):
""" Read(self: GH_AbstractGraph,reader: GH_IReader) -> bool """
pass
def UpdateEquation(self,*args):
""" UpdateEquation(self: GH_AbstractGraph) """
pass
def ValueAt(self,t):
""" ValueAt(self: GH_AbstractGraph,t: float) -> float """
pass
def Write(self,writer):
""" Write(self: GH_AbstractGraph,writer: GH_IWriter) -> bool """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*args): #cannot find CLR constructor
""" __new__(cls: type,nName: str,nDescription: str) """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
Description=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Description(self: GH_AbstractGraph) -> str
"""
GraphTypeID=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: GraphTypeID(self: GH_AbstractGraph) -> Guid
"""
Grips=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Grips(self: GH_AbstractGraph) -> List[GH_GraphGrip]
"""
Icon_16x16=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Icon_16x16(self: GH_AbstractGraph) -> Image
"""
IsValid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsValid(self: GH_AbstractGraph) -> bool
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Name(self: GH_AbstractGraph) -> str
"""
GH_Evaluator=None
GraphChanged=None
class GH_BezierGraph(GH_AbstractGraph,IGH_Graph,GH_ISerializable):
""" GH_BezierGraph() """
def AddGrip(self,*args):
""" AddGrip(self: GH_AbstractGraph,Grip: GH_GraphGrip) """
pass
def ClearCaches(self):
""" ClearCaches(self: GH_BezierGraph) """
pass
def ClearGrips(self,*args):
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self,*args):
""" CreateDerivedDuplicate(self: GH_BezierGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self,*args):
""" CreateGrips(self: GH_BezierGraph) """
pass
def Curve(self,*args):
""" Curve(self: GH_BezierGraph) -> Curve """
pass
def Draw_PreRenderGrip(self,g,cnt,index):
""" Draw_PreRenderGrip(self: GH_BezierGraph,g: Graphics,cnt: GH_GraphContainer,index: int) -> GH_GraphDrawInstruction """
pass
def GDI_GraphPath(self,reg):
""" GDI_GraphPath(self: GH_BezierGraph,reg: RectangleF) -> Array[PointF] """
pass
def GHGraphToPointArray(self,*args):
"""
GHGraphToPointArray(reg: RectangleF,pix_accuracy: float,eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph,reg: RectangleF,pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self,*args):
""" Internal_GripChanged(self: GH_AbstractGraph,grip: GH_GraphGrip,bIntermediate: bool) """
pass
def Read(self,reader):
""" Read(self: GH_BezierGraph,reader: GH_IReader) -> bool """
pass
def UpdateEquation(self,*args):
""" UpdateEquation(self: GH_BezierGraph) """
pass
def ValueAt(self,t):
""" ValueAt(self: GH_BezierGraph,t: float) -> float """
pass
def Write(self,writer):
""" Write(self: GH_BezierGraph,writer: GH_IWriter) -> bool """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: GraphTypeID(self: GH_BezierGraph) -> Guid
"""
Icon_16x16=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Icon_16x16(self: GH_BezierGraph) -> Image
"""
IsValid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsValid(self: GH_BezierGraph) -> bool
"""
class GH_ConicGraph(GH_AbstractGraph,IGH_Graph,GH_ISerializable):
""" GH_ConicGraph() """
def AddGrip(self,*args):
""" AddGrip(self: GH_AbstractGraph,Grip: GH_GraphGrip) """
pass
def ClearGrips(self,*args):
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self,*args):
""" CreateDerivedDuplicate(self: GH_ConicGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self,*args):
""" CreateGrips(self: GH_ConicGraph) """
pass
def Curve(self,*args):
""" Curve(self: GH_ConicGraph) -> NurbsCurve """
pass
def DestroyCurve(self,*args):
""" DestroyCurve(self: GH_ConicGraph) """
pass
def FitConic(self,*args):
""" FitConic(self: GH_ConicGraph,S: Point3d) -> NurbsCurve """
pass
def GDI_GraphPath(self,reg):
""" GDI_GraphPath(self: GH_ConicGraph,reg: RectangleF) -> Array[PointF] """
pass
def GHGraphToPointArray(self,*args):
"""
GHGraphToPointArray(reg: RectangleF,pix_accuracy: float,eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph,reg: RectangleF,pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self,*args):
""" Internal_GripChanged(self: GH_AbstractGraph,grip: GH_GraphGrip,bIntermediate: bool) """
pass
def MakeConic(self,*args):
""" MakeConic(self: GH_ConicGraph,w: float) -> NurbsCurve """
pass
def Read(self,reader):
""" Read(self: GH_ConicGraph,reader: GH_IReader) -> bool """
pass
def UpdateEquation(self,*args):
""" UpdateEquation(self: GH_ConicGraph) """
pass
def ValueAt(self,t):
""" ValueAt(self: GH_ConicGraph,t: float) -> float """
pass
def Write(self,writer):
""" Write(self: GH_ConicGraph,writer: GH_IWriter) -> bool """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: GraphTypeID(self: GH_ConicGraph) -> Guid
"""
Icon_16x16=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Icon_16x16(self: GH_ConicGraph) -> Image
"""
IsValid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsValid(self: GH_ConicGraph) -> bool
"""
class GH_DoubleSineGraph(GH_AbstractGraph,IGH_Graph,GH_ISerializable):
""" GH_DoubleSineGraph() """
def AddGrip(self,*args):
""" AddGrip(self: GH_AbstractGraph,Grip: GH_GraphGrip) """
pass
def ClearCaches(self):
""" ClearCaches(self: GH_DoubleSineGraph) """
pass
def ClearGrips(self,*args):
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self,*args):
""" CreateDerivedDuplicate(self: GH_DoubleSineGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self,*args):
""" CreateGrips(self: GH_DoubleSineGraph) """
pass
def Draw_PreRenderGraph(self,g,cnt):
""" Draw_PreRenderGraph(self: GH_DoubleSineGraph,g: Graphics,cnt: GH_GraphContainer) -> GH_GraphDrawInstruction """
pass
def Draw_PreRenderGrip(self,g,cnt,index):
""" Draw_PreRenderGrip(self: GH_DoubleSineGraph,g: Graphics,cnt: GH_GraphContainer,index: int) -> GH_GraphDrawInstruction """
pass
def GDI_GraphPath(self,reg):
""" GDI_GraphPath(self: GH_DoubleSineGraph,reg: RectangleF) -> Array[PointF] """
pass
def GHGraphToPointArray(self,*args):
"""
GHGraphToPointArray(reg: RectangleF,pix_accuracy: float,eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph,reg: RectangleF,pix_accuracy: float) -> Array[PointF]
"""
pass
def GraphAccuracy(self,*args):
""" GraphAccuracy(self: GH_DoubleSineGraph,reg: RectangleF) -> float """
pass
def Internal_GripChanged(self,*args):
""" Internal_GripChanged(self: GH_AbstractGraph,grip: GH_GraphGrip,bIntermediate: bool) """
pass
def Read(self,reader):
""" Read(self: GH_DoubleSineGraph,reader: GH_IReader) -> bool """
pass
def RecFromPoints(self,*args):
""" RecFromPoints(self: GH_DoubleSineGraph,a: PointF,b: PointF) -> Rectangle """
pass
def UpdateEquation(self,*args):
""" UpdateEquation(self: GH_DoubleSineGraph) """
pass
def ValueAt(self,t):
""" ValueAt(self: GH_DoubleSineGraph,t: float) -> float """
pass
def Write(self,writer):
""" Write(self: GH_DoubleSineGraph,writer: GH_IWriter) -> bool """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: GraphTypeID(self: GH_DoubleSineGraph) -> Guid
"""
Icon_16x16=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Icon_16x16(self: GH_DoubleSineGraph) -> Image
"""
IsValid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsValid(self: GH_DoubleSineGraph) -> bool
"""
m_eq0=None
m_eq1=None
m_path0=None
m_path1=None
class GH_GaussianGraph(GH_AbstractGraph,IGH_Graph,GH_ISerializable):
""" GH_GaussianGraph() """
def AddGrip(self,*args):
""" AddGrip(self: GH_AbstractGraph,Grip: GH_GraphGrip) """
pass
def ClearGrips(self,*args):
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self,*args):
""" CreateDerivedDuplicate(self: GH_GaussianGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self,*args):
""" CreateGrips(self: GH_GaussianGraph) """
pass
def GHGraphToPointArray(self,*args):
"""
GHGraphToPointArray(reg: RectangleF,pix_accuracy: float,eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph,reg: RectangleF,pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self,*args):
""" Internal_GripChanged(self: GH_GaussianGraph,grip: GH_GraphGrip,bIntermediate: bool) """
pass
def Read(self,reader):
""" Read(self: GH_GaussianGraph,reader: GH_IReader) -> bool """
pass
def UpdateEquation(self,*args):
""" UpdateEquation(self: GH_GaussianGraph) """
pass
def ValueAt(self,t):
""" ValueAt(self: GH_GaussianGraph,t: float) -> float """
pass
def Write(self,writer):
""" Write(self: GH_GaussianGraph,writer: GH_IWriter) -> bool """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: GraphTypeID(self: GH_GaussianGraph) -> Guid
"""
Icon_16x16=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Icon_16x16(self: GH_GaussianGraph) -> Image
"""
IsValid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsValid(self: GH_GaussianGraph) -> bool
"""
class GH_GraphContainer(object,GH_ISerializable,IGH_ResponsiveObject):
"""
GH_GraphContainer(n_graph: IGH_Graph)
GH_GraphContainer(n_graph: IGH_Graph,n_x0: float,n_x1: float,n_y0: float,n_y1: float)
"""
def ClearCaches(self):
""" ClearCaches(self: GH_GraphContainer) """
pass
def Duplicate(self):
""" Duplicate(self: GH_GraphContainer) -> GH_GraphContainer """
pass
def FromX(self,t):
""" FromX(self: GH_GraphContainer,t: float) -> float """
pass
def FromY(self,t):
""" FromY(self: GH_GraphContainer,t: float) -> float """
pass
def Internal_Render_Graph(self,*args):
""" Internal_Render_Graph(self: GH_GraphContainer,G: Graphics) """
pass
def Internal_Render_Grip(self,*args):
""" Internal_Render_Grip(self: GH_GraphContainer,g: Graphics,x: int,y: int) """
pass
def Internal_Render_Grips(self,*args):
""" Internal_Render_Grips(self: GH_GraphContainer,G: Graphics) """
pass
def Internal_Render_HorizontalConstraint(self,*args):
""" Internal_Render_HorizontalConstraint(self: GH_GraphContainer,g: Graphics,y: int) """
pass
def Internal_Render_InvalidIcon(self,*args):
""" Internal_Render_InvalidIcon(self: GH_GraphContainer,g: Graphics) """
pass
def Internal_Render_LockedIcon(self,*args):
""" Internal_Render_LockedIcon(self: GH_GraphContainer,g: Graphics) """
pass
def Internal_Render_TagGDIObjects(self,*args):
""" Internal_Render_TagGDIObjects(self: GH_GraphContainer,zoom: Single,bg_brush: SolidBrush,fg_brush: SolidBrush,fg_pen: Pen) -> (SolidBrush,SolidBrush,Pen) """
pass
def Internal_Render_TagX(self,*args):
""" Internal_Render_TagX(self: GH_GraphContainer,g: Graphics,graphrec: RectangleF,r_a: float,r_b: float) """
pass
def Internal_Render_TagY(self,*args):
""" Internal_Render_TagY(self: GH_GraphContainer,g: Graphics,graphrec: RectangleF,r_a: float,r_b: float) """
pass
def Internal_Render_TextTag(self,*args):
""" Internal_Render_TextTag(self: GH_GraphContainer,g: Graphics,graphrec: RectangleF,lowerright: bool,tag: str) """
pass
def Internal_Render_VerticalConstraint(self,*args):
""" Internal_Render_VerticalConstraint(self: GH_GraphContainer,g: Graphics,x: int) """
pass
def NearestGrip(self,*args):
""" NearestGrip(self: GH_GraphContainer,pt: PointF,max_search: float) -> int """
pass
def OnGraphChanged(self,bIntermediate):
""" OnGraphChanged(self: GH_GraphContainer,bIntermediate: bool) """
pass
def PrepareForUse(self):
""" PrepareForUse(self: GH_GraphContainer) """
pass
def Read(self,reader):
""" Read(self: GH_GraphContainer,reader: GH_IReader) -> bool """
pass
def RemapPointsToGraphRegion(self,pts):
""" RemapPointsToGraphRegion(self: GH_GraphContainer,pts: Array[PointF]) """
pass
def Render(self,G,bIncludeDomainTags,samples):
""" Render(self: GH_GraphContainer,G: Graphics,bIncludeDomainTags: bool,samples: List[float]) """
pass
@staticmethod
def Render_GraphBackground(G,region,bActive):
""" Render_GraphBackground(G: Graphics,region: RectangleF,bActive: bool) """
pass
@staticmethod
def Render_GraphGrid(G,region):
""" Render_GraphGrid(G: Graphics,region: RectangleF) """
pass
@staticmethod
def Render_GraphPen():
""" Render_GraphPen() -> Pen """
pass
@staticmethod
def Render_GuidePen():
""" Render_GuidePen() -> Pen """
pass
@staticmethod
def Render_HorizontalConstraint(g,rec,t):
""" Render_HorizontalConstraint(g: Graphics,rec: RectangleF,t: float) """
pass
@staticmethod
def Render_VerticalConstraint(g,rec,t):
""" Render_VerticalConstraint(g: Graphics,rec: RectangleF,t: float) """
pass
def RespondToKeyDown(self,sender,e):
""" RespondToKeyDown(self: GH_GraphContainer,sender: GH_Canvas,e: KeyEventArgs) -> GH_ObjectResponse """
pass
def RespondToKeyUp(self,sender,e):
""" RespondToKeyUp(self: GH_GraphContainer,sender: GH_Canvas,e: KeyEventArgs) -> GH_ObjectResponse """
pass
def RespondToMouseDoubleClick(self,sender,e):
""" RespondToMouseDoubleClick(self: GH_GraphContainer,sender: GH_Canvas,e: GH_CanvasMouseEvent) -> GH_ObjectResponse """
pass
def RespondToMouseDown(self,sender,e):
""" RespondToMouseDown(self: GH_GraphContainer,sender: GH_Canvas,e: GH_CanvasMouseEvent) -> GH_ObjectResponse """
pass
def RespondToMouseMove(self,sender,e):
""" RespondToMouseMove(self: GH_GraphContainer,sender: GH_Canvas,e: GH_CanvasMouseEvent) -> GH_ObjectResponse """
pass
def RespondToMouseUp(self,sender,e):
""" RespondToMouseUp(self: GH_GraphContainer,sender: GH_Canvas,e: GH_CanvasMouseEvent) -> GH_ObjectResponse """
pass
def SolveGraphPath(self,*args):
""" SolveGraphPath(self: GH_GraphContainer) -> GraphicsPath """
pass
def ToRegionBox(self,pt):
""" ToRegionBox(self: GH_GraphContainer,pt: PointF) -> PointF """
pass
def ToRegionBox_x(self,x):
""" ToRegionBox_x(self: GH_GraphContainer,x: float) -> Single """
pass
def ToRegionBox_y(self,y):
""" ToRegionBox_y(self: GH_GraphContainer,y: float) -> Single """
pass
def ToUnitBox(self,pt):
""" ToUnitBox(self: GH_GraphContainer,pt: PointF) -> PointF """
pass
def ToX(self,t_unit):
""" ToX(self: GH_GraphContainer,t_unit: float) -> float """
pass
def ToY(self,t_unit):
""" ToY(self: GH_GraphContainer,t_unit: float) -> float """
pass
def TryValueAt(self,t):
""" TryValueAt(self: GH_GraphContainer,t: float) -> float """
pass
def ValueAt(self,t):
""" ValueAt(self: GH_GraphContainer,t: float) -> float """
pass
def Write(self,writer):
""" Write(self: GH_GraphContainer,writer: GH_IWriter) -> bool """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,n_graph,n_x0=None,n_x1=None,n_y0=None,n_y1=None):
"""
__new__(cls: type,n_graph: IGH_Graph)
__new__(cls: type,n_graph: IGH_Graph,n_x0: float,n_x1: float,n_y0: float,n_y1: float)
"""
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
Graph=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Graph(self: GH_GraphContainer) -> IGH_Graph
Set: Graph(self: GH_GraphContainer)=value
"""
LockGrips=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: LockGrips(self: GH_GraphContainer) -> bool
Set: LockGrips(self: GH_GraphContainer)=value
"""
Region=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Region(self: GH_GraphContainer) -> RectangleF
Set: Region(self: GH_GraphContainer)=value
"""
X0=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: X0(self: GH_GraphContainer) -> float
Set: X0(self: GH_GraphContainer)=value
"""
X1=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: X1(self: GH_GraphContainer) -> float
Set: X1(self: GH_GraphContainer)=value
"""
Y0=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Y0(self: GH_GraphContainer) -> float
Set: Y0(self: GH_GraphContainer)=value
"""
Y1=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Y1(self: GH_GraphContainer) -> float
Set: Y1(self: GH_GraphContainer)=value
"""
GraphChanged=None
GraphChangedEventHandler=None
m_graphpath=None
class GH_GraphDrawInstruction(Enum,IComparable,IFormattable,IConvertible):
""" enum GH_GraphDrawInstruction,values: none (0),skip (1) """
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
none=None
skip=None
value__=None
class GH_GraphGrip(object):
"""
GH_GraphGrip()
GH_GraphGrip(nX: float,nY: float)
GH_GraphGrip(nX: float,nY: float,nConstraint: GH_GripConstraint)
GH_GraphGrip(nOther: GH_GraphGrip)
"""
def LimitToUnitDomain(self,bLimitX,bLimitY):
""" LimitToUnitDomain(self: GH_GraphGrip,bLimitX: bool,bLimitY: bool) """
pass
def OnGripChanged(self,bIntermediate):
""" OnGripChanged(self: GH_GraphGrip,bIntermediate: bool) """
pass
def SetIndex(self,nIndex):
""" SetIndex(self: GH_GraphGrip,nIndex: int) """
pass
def __eq__(self,*args):
""" x.__eq__(y) <==> x==y """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__(cls: type)
__new__(cls: type,nX: float,nY: float)
__new__(cls: type,nX: float,nY: float,nConstraint: GH_GripConstraint)
__new__(cls: type,nOther: GH_GraphGrip)
"""
pass
def __ne__(self,*args):
pass
Constraint=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Constraint(self: GH_GraphGrip) -> GH_GripConstraint
Set: Constraint(self: GH_GraphGrip)=value
"""
Index=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Index(self: GH_GraphGrip) -> int
"""
Point=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Point(self: GH_GraphGrip) -> PointF
"""
X=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: X(self: GH_GraphGrip) -> float
Set: X(self: GH_GraphGrip)=value
"""
Y=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Y(self: GH_GraphGrip) -> float
Set: Y(self: GH_GraphGrip)=value
"""
GripChanged=None
GripChangedEventHandler=None
m_c=None
m_i=None
m_x=None
m_y=None
class GH_GripConstraint(Enum,IComparable,IFormattable,IConvertible):
""" enum GH_GripConstraint,values: horizontal (1),none (0),vertical (2) """
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
horizontal=None
none=None
value__=None
vertical=None
class GH_LinearGraph(GH_AbstractGraph,IGH_Graph,GH_ISerializable):
""" GH_LinearGraph() """
def AddGrip(self,*args):
""" AddGrip(self: GH_AbstractGraph,Grip: GH_GraphGrip) """
pass
def ClearGrips(self,*args):
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self,*args):
""" CreateDerivedDuplicate(self: GH_LinearGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self,*args):
""" CreateGrips(self: GH_LinearGraph) """
pass
def Draw_PreRenderGraph(self,g,cnt):
""" Draw_PreRenderGraph(self: GH_LinearGraph,g: Graphics,cnt: GH_GraphContainer) -> GH_GraphDrawInstruction """
pass
def EmitProxyObject(self):
""" EmitProxyObject(self: GH_LinearGraph) -> IGH_GraphProxyObject """
pass
def GDI_GraphPath(self,reg):
""" GDI_GraphPath(self: GH_LinearGraph,reg: RectangleF) -> Array[PointF] """
pass
def GHGraphToPointArray(self,*args):
"""
GHGraphToPointArray(reg: RectangleF,pix_accuracy: float,eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph,reg: RectangleF,pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self,*args):
""" Internal_GripChanged(self: GH_AbstractGraph,grip: GH_GraphGrip,bIntermediate: bool) """
pass
def Read(self,reader):
""" Read(self: GH_LinearGraph,reader: GH_IReader) -> bool """
pass
def SetFromParameters(self,nA,nB):
""" SetFromParameters(self: GH_LinearGraph,nA: float,nB: float) """
pass
def UpdateEquation(self,*args):
""" UpdateEquation(self: GH_LinearGraph) """
pass
def ValueAt(self,t):
""" ValueAt(self: GH_LinearGraph,t: float) -> float """
pass
def Write(self,writer):
""" Write(self: GH_LinearGraph,writer: GH_IWriter) -> bool """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: GraphTypeID(self: GH_LinearGraph) -> Guid
"""
Icon_16x16=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Icon_16x16(self: GH_LinearGraph) -> Image
"""
IsValid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsValid(self: GH_LinearGraph) -> bool
"""
GH_LinearGraphProxy=None
class GH_ParabolaGraph(GH_AbstractGraph,IGH_Graph,GH_ISerializable):
""" GH_ParabolaGraph() """
def AddGrip(self,*args):
""" AddGrip(self: GH_AbstractGraph,Grip: GH_GraphGrip) """
pass
def ClearGrips(self,*args):
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self,*args):
""" CreateDerivedDuplicate(self: GH_ParabolaGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self,*args):
""" CreateGrips(self: GH_ParabolaGraph) """
pass
def Draw_PreRenderGraph(self,g,cnt):
""" Draw_PreRenderGraph(self: GH_ParabolaGraph,g: Graphics,cnt: GH_GraphContainer) -> GH_GraphDrawInstruction """
pass
def GHGraphToPointArray(self,*args):
"""
GHGraphToPointArray(reg: RectangleF,pix_accuracy: float,eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph,reg: RectangleF,pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self,*args):
""" Internal_GripChanged(self: GH_AbstractGraph,grip: GH_GraphGrip,bIntermediate: bool) """
pass
def Read(self,reader):
""" Read(self: GH_ParabolaGraph,reader: GH_IReader) -> bool """
pass
def UpdateEquation(self,*args):
""" UpdateEquation(self: GH_ParabolaGraph) """
pass
def ValueAt(self,t):
""" ValueAt(self: GH_ParabolaGraph,t: float) -> float """
pass
def Write(self,writer):
""" Write(self: GH_ParabolaGraph,writer: GH_IWriter) -> bool """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: GraphTypeID(self: GH_ParabolaGraph) -> Guid
"""
Icon_16x16=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Icon_16x16(self: GH_ParabolaGraph) -> Image
"""
IsValid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsValid(self: GH_ParabolaGraph) -> bool
"""
class GH_PerlinGraph(GH_AbstractGraph,IGH_Graph,GH_ISerializable):
""" GH_PerlinGraph() """
def AddGrip(self,*args):
""" AddGrip(self: GH_AbstractGraph,Grip: GH_GraphGrip) """
pass
def ClearGrips(self,*args):
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self,*args):
""" CreateDerivedDuplicate(self: GH_PerlinGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self,*args):
""" CreateGrips(self: GH_PerlinGraph) """
pass
def GHGraphToPointArray(self,*args):
"""
GHGraphToPointArray(reg: RectangleF,pix_accuracy: float,eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph,reg: RectangleF,pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self,*args):
""" Internal_GripChanged(self: GH_PerlinGraph,grip: GH_GraphGrip,bIntermediate: bool) """
pass
def Interpolate(self,*args):
""" Interpolate(self: GH_PerlinGraph,v0: float,v1: float,v2: float,v3: float,a: float) -> float """
pass
def Noise(self,*args):
""" Noise(self: GH_PerlinGraph,i: int) -> float """
pass
def Read(self,reader):
""" Read(self: GH_PerlinGraph,reader: GH_IReader) -> bool """
pass
def Smooth(self,*args):
""" Smooth(self: GH_PerlinGraph,x: float) -> float """
pass
def UpdateEquation(self,*args):
""" UpdateEquation(self: GH_PerlinGraph) """
pass
def ValueAt(self,t):
""" ValueAt(self: GH_PerlinGraph,t: float) -> float """
pass
def Write(self,writer):
""" Write(self: GH_PerlinGraph,writer: GH_IWriter) -> bool """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: GraphTypeID(self: GH_PerlinGraph) -> Guid
"""
Icon_16x16=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Icon_16x16(self: GH_PerlinGraph) -> Image
"""
IsValid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsValid(self: GH_PerlinGraph) -> bool
"""
amplitude=None
decay=None
frequency=None
x_offset=None
y_offset=None
class GH_PowerGraph(GH_AbstractGraph,IGH_Graph,GH_ISerializable):
""" GH_PowerGraph() """
def AddGrip(self,*args):
""" AddGrip(self: GH_AbstractGraph,Grip: GH_GraphGrip) """
pass
def ClearGrips(self,*args):
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self,*args):
""" CreateDerivedDuplicate(self: GH_PowerGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self,*args):
""" CreateGrips(self: GH_PowerGraph) """
pass
def GHGraphToPointArray(self,*args):
"""
GHGraphToPointArray(reg: RectangleF,pix_accuracy: float,eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph,reg: RectangleF,pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self,*args):
""" Internal_GripChanged(self: GH_AbstractGraph,grip: GH_GraphGrip,bIntermediate: bool) """
pass
def Read(self,reader):
""" Read(self: GH_PowerGraph,reader: GH_IReader) -> bool """
pass
def UpdateEquation(self,*args):
""" UpdateEquation(self: GH_PowerGraph) """
pass
def ValueAt(self,t):
""" ValueAt(self: GH_PowerGraph,t: float) -> float """
pass
def Write(self,writer):
""" Write(self: GH_PowerGraph,writer: GH_IWriter) -> bool """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: GraphTypeID(self: GH_PowerGraph) -> Guid
"""
Icon_16x16=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Icon_16x16(self: GH_PowerGraph) -> Image
"""
IsValid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsValid(self: GH_PowerGraph) -> bool
"""
class GH_SincGraph(GH_AbstractGraph,IGH_Graph,GH_ISerializable):
""" GH_SincGraph() """
def AddGrip(self,*args):
""" AddGrip(self: GH_AbstractGraph,Grip: GH_GraphGrip) """
pass
def ClearGrips(self,*args):
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self,*args):
""" CreateDerivedDuplicate(self: GH_SincGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self,*args):
""" CreateGrips(self: GH_SincGraph) """
pass
def GDI_GraphPath(self,reg):
""" GDI_GraphPath(self: GH_SincGraph,reg: RectangleF) -> Array[PointF] """
pass
def GHGraphToPointArray(self,*args):
"""
GHGraphToPointArray(reg: RectangleF,pix_accuracy: float,eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph,reg: RectangleF,pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self,*args):
""" Internal_GripChanged(self: GH_AbstractGraph,grip: GH_GraphGrip,bIntermediate: bool) """
pass
def Read(self,reader):
""" Read(self: GH_SincGraph,reader: GH_IReader) -> bool """
pass
def UpdateEquation(self,*args):
""" UpdateEquation(self: GH_SincGraph) """
pass
def ValueAt(self,t):
""" ValueAt(self: GH_SincGraph,t: float) -> float """
pass
def Write(self,writer):
""" Write(self: GH_SincGraph,writer: GH_IWriter) -> bool """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: GraphTypeID(self: GH_SincGraph) -> Guid
"""
Icon_16x16=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Icon_16x16(self: GH_SincGraph) -> Image
"""
IsValid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsValid(self: GH_SincGraph) -> bool
"""
amplitude=None
frequency=None
X0=None
X1=None
x_shift=None
Y0=None
Y1=None
y_shift=None
class GH_SineEquation(object,GH_ISerializable):
""" GH_SineEquation() """
def Read(self,reader):
""" Read(self: GH_SineEquation,reader: GH_IReader) -> bool """
pass
def SetEquationFromGrips(self):
""" SetEquationFromGrips(self: GH_SineEquation) """
pass
def ValueAt(self,t):
""" ValueAt(self: GH_SineEquation,t: float) -> float """
pass
def Write(self,writer):
""" Write(self: GH_SineEquation,writer: GH_IWriter) -> bool """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
amplitude=None
frequency=None
offset=None
shift=None
X0=None
X1=None
Y0=None
Y1=None
class GH_SineGraph(GH_AbstractGraph,IGH_Graph,GH_ISerializable):
""" GH_SineGraph() """
def AddGrip(self,*args):
""" AddGrip(self: GH_AbstractGraph,Grip: GH_GraphGrip) """
pass
def ClearGrips(self,*args):
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self,*args):
""" CreateDerivedDuplicate(self: GH_SineGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self,*args):
""" CreateGrips(self: GH_SineGraph) """
pass
def GDI_GraphPath(self,reg):
""" GDI_GraphPath(self: GH_SineGraph,reg: RectangleF) -> Array[PointF] """
pass
def GHGraphToPointArray(self,*args):
"""
GHGraphToPointArray(reg: RectangleF,pix_accuracy: float,eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph,reg: RectangleF,pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self,*args):
""" Internal_GripChanged(self: GH_AbstractGraph,grip: GH_GraphGrip,bIntermediate: bool) """
pass
def Read(self,reader):
""" Read(self: GH_SineGraph,reader: GH_IReader) -> bool """
pass
def UpdateEquation(self,*args):
""" UpdateEquation(self: GH_SineGraph) """
pass
def ValueAt(self,t):
""" ValueAt(self: GH_SineGraph,t: float) -> float """
pass
def Write(self,writer):
""" Write(self: GH_SineGraph,writer: GH_IWriter) -> bool """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: GraphTypeID(self: GH_SineGraph) -> Guid
"""
Icon_16x16=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Icon_16x16(self: GH_SineGraph) -> Image
"""
IsValid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsValid(self: GH_SineGraph) -> bool
"""
m_eq=None
class GH_SquareRootGraph(GH_AbstractGraph,IGH_Graph,GH_ISerializable):
""" GH_SquareRootGraph() """
def AddGrip(self,*args):
""" AddGrip(self: GH_AbstractGraph,Grip: GH_GraphGrip) """
pass
def ClearGrips(self,*args):
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self,*args):
""" CreateDerivedDuplicate(self: GH_SquareRootGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self,*args):
""" CreateGrips(self: GH_SquareRootGraph) """
pass
def Draw_PreRenderGraph(self,g,cnt):
""" Draw_PreRenderGraph(self: GH_SquareRootGraph,g: Graphics,cnt: GH_GraphContainer) -> GH_GraphDrawInstruction """
pass
def GHGraphToPointArray(self,*args):
"""
GHGraphToPointArray(reg: RectangleF,pix_accuracy: float,eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph,reg: RectangleF,pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self,*args):
""" Internal_GripChanged(self: GH_AbstractGraph,grip: GH_GraphGrip,bIntermediate: bool) """
pass
def Read(self,reader):
""" Read(self: GH_SquareRootGraph,reader: GH_IReader) -> bool """
pass
def UpdateEquation(self,*args):
""" UpdateEquation(self: GH_SquareRootGraph) """
pass
def ValueAt(self,t):
""" ValueAt(self: GH_SquareRootGraph,t: float) -> float """
pass
def Write(self,writer):
""" Write(self: GH_SquareRootGraph,writer: GH_IWriter) -> bool """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: GraphTypeID(self: GH_SquareRootGraph) -> Guid
"""
Icon_16x16=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Icon_16x16(self: GH_SquareRootGraph) -> Image
"""
IsValid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsValid(self: GH_SquareRootGraph) -> bool
"""
class IGH_Graph(GH_ISerializable):
# no doc
def ClearCaches(self):
""" ClearCaches(self: IGH_Graph) """
pass
def Draw_PostRenderGraph(self,g,cnt):
""" Draw_PostRenderGraph(self: IGH_Graph,g: Graphics,cnt: GH_GraphContainer) """
pass
def Draw_PostRenderGrid(self,g,cnt):
""" Draw_PostRenderGrid(self: IGH_Graph,g: Graphics,cnt: GH_GraphContainer) """
pass
def Draw_PostRenderGrip(self,g,cnt,index):
""" Draw_PostRenderGrip(self: IGH_Graph,g: Graphics,cnt: GH_GraphContainer,index: int) """
pass
def Draw_PostRenderTags(self,g,cnt):
""" Draw_PostRenderTags(self: IGH_Graph,g: Graphics,cnt: GH_GraphContainer) """
pass
def Draw_PreRenderGraph(self,g,cnt):
""" Draw_PreRenderGraph(self: IGH_Graph,g: Graphics,cnt: GH_GraphContainer) -> GH_GraphDrawInstruction """
pass
def Draw_PreRenderGrid(self,g,cnt):
""" Draw_PreRenderGrid(self: IGH_Graph,g: Graphics,cnt: GH_GraphContainer) -> GH_GraphDrawInstruction """
pass
def Draw_PreRenderGrip(self,g,cnt,index):
""" Draw_PreRenderGrip(self: IGH_Graph,g: Graphics,cnt: GH_GraphContainer,index: int) -> GH_GraphDrawInstruction """
pass
def Draw_PreRenderTags(self,g,cnt):
""" Draw_PreRenderTags(self: IGH_Graph,g: Graphics,cnt: GH_GraphContainer) -> GH_GraphDrawInstruction """
pass
def Duplicate(self):
""" Duplicate(self: IGH_Graph) -> IGH_Graph """
pass
def EmitProxyObject(self):
""" EmitProxyObject(self: IGH_Graph) -> IGH_GraphProxyObject """
pass
def GDI_GraphPath(self,reg):
""" GDI_GraphPath(self: IGH_Graph,reg: RectangleF) -> Array[PointF] """
pass
def OnGraphChanged(self,bIntermediate):
""" OnGraphChanged(self: IGH_Graph,bIntermediate: bool) """
pass
def PrepareForUse(self):
""" PrepareForUse(self: IGH_Graph) """
pass
def ValueAt(self,t):
""" ValueAt(self: IGH_Graph,t: float) -> float """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
Description=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Description(self: IGH_Graph) -> str
"""
GraphTypeID=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: GraphTypeID(self: IGH_Graph) -> Guid
"""
Grips=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Grips(self: IGH_Graph) -> List[GH_GraphGrip]
"""
Icon_16x16=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Icon_16x16(self: IGH_Graph) -> Image
"""
IsValid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsValid(self: IGH_Graph) -> bool
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Name(self: IGH_Graph) -> str
"""
GraphChanged=None
GraphChangedEventHandler=None
class IGH_GraphProxyObject:
# no doc
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
|
scripts/migrations/versions/45c7c3141a21_add_proto_type_settings_for_dial_vpn.py
|
lenz-li/FlexGW-1
| 212 |
108021
|
"""add proto type settings for dial vpn.
Revision ID: 45c7c3141a21
Revises: 313c830f061c
Create Date: 2014-10-10 10:22:23.395475
"""
# revision identifiers, used by Alembic.
revision = '45c7c3141a21'
down_revision = '313c830f061c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('dial_settings', sa.Column('proto', sa.String(length=80), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('dial_settings', 'proto')
### end Alembic commands ###
|
Lib/test/lazyimports/dict_update.py
|
mananpal1997/cinder
| 1,886 |
108053
|
<reponame>mananpal1997/cinder
from __future__ import lazy_imports
import warnings
vars = {}
vars.update(globals())
print(repr(vars['warnings']))
|
python/unpickle/app.py
|
nobgr/vulhub
| 9,681 |
108073
|
import pickle
import base64
from flask import Flask, request
app = Flask(__name__)
@app.route("/")
def index():
try:
user = base64.b64decode(request.cookies.get('user'))
user = pickle.loads(user)
username = user["username"]
except:
username = "Guest"
return "Hello %s" % username
if __name__ == "__main__":
app.run()
|
tools/dependency_finder.py
|
datalayer-contrib/jupyterwidgets-tutorial
| 342 |
108080
|
<gh_stars>100-1000
import re
import itertools
from pathlib import Path
import nbformat
from stdlib_list import stdlib_list
VALID_PACKAGE_CHARS = '[a-zA-Z0-9_]'
REG = re.compile(f'^\s*import ({VALID_PACKAGE_CHARS}+)|^\s*from ({VALID_PACKAGE_CHARS}+)\b+\simport', re.ASCII)
def import_statements(code_source):
"""
Find and return all lines in the code cells of a notebook that contain
import statements.
Parameters
----------
code_source : an nbformat NotebookNode object or str
Notebook whose cells will be checked for import statements or the
contents of a Python file (as a string).
Returns
-------
list
List of strings, each an import statement from the notebook.
"""
if isinstance(code_source, str):
import_code = [code_source]
else:
import_cells = [c for c in code_source['cells'] if
c['cell_type'] == 'code' and 'import' in c['source']]
import_code = [c['source'] for c in import_cells]
imports = [line for code in import_code
for line in code.split('\n') if REG.match(line)]
return imports
def dependency_names_from_import_statements(imports, unique=True):
"""
Extract the package names from a list of import statements.
Parameters
----------
imports : list
List of import statements from which package names will be extracted.
unique : bool, optional
If ``True``, return list of unique package names, otherwise return
package names in same order as input.
"""
packages = []
for i in imports:
imp = REG.search(i)
for g in imp.groups():
if g is not None:
packages.append(g)
continue
if unique:
packages = list(set(packages))
return packages
def bad_imports(imports, as_written=False):
"""
Try a bunch of imports and report whether each was successful.
Parameters
----------
imports : list
List of import statements (e.g. ``import numpy`` or
``from astropy import units``) to try. Leading whitespace in the
imports list is fine; it will be stripped before trying the import.
as_written : bool, optional
If ``True``, test the import statements exactly as they are passed in.
Otherwise, just test the package name (i.e. the top-level import).
Returns
-------
list
List of bool, ``True`` if the import fails, ``False`` if it succeeds.
"""
result = []
for imp in imports:
if as_written:
test = imp.strip()
else:
test = dependency_names_from_import_statements([imp])[0]
test = 'import ' + test
try:
exec(test)
except ModuleNotFoundError:
result.append(True)
else:
result.append(False)
return result
def identify_dependencies(directory, nb_version=4,
exclude_hidden=True, skip=None,
notebooks=True, python_files=True,
verbose=False):
"""
Find all notebooks in or below a directory, grab their import
statements, and translate that to a list of dependencies.
Parameters
----------
directory : str
Path to directory to be searched for notebook. All subdirectories of
this path will be searched.
nb_version : int, optional
Notebook version to assume when reading notebooks.
exclude_hidden : bool, optional
Exclude hidden directories or files (i.e. those whose name begins
with ``.``).
skip : list of str, optional
List of notebook or directory names to skip. If a directory name is
part of the list then all notebooks below that directory will be
skipped. The name must match exactly to cause a skip.
notebooks : bool, optional
If ``True``, check for imports in notebooks.
python_files : bool, optional
If ``True``, check for imports in python files (i.e. files that
end ``.py``).
verbose: bool, optional
If ``True``, print summary of progress while working.
"""
p = Path(directory)
notebook_paths = p.glob('**/*.ipynb') if notebooks else []
python_paths = p.glob('**/*.py') if python_files else []
dep_info = {
'path': [],
'imports': [],
'packages': [],
'missing': [],
}
for path in itertools.chain(notebook_paths, python_paths):
# Skip any directories or files that start with a dot...
hidden = [part.startswith('.') and part != '..' for part in path.parts]
skips = any(part in skip for part in path.parts) if skip else False
if any(hidden) or skips:
if verbose:
print(f'...Skipping {path}', path.parts[-1])
continue
if path.suffix == '.ipynb':
nbnode = nbformat.read(str(path), nb_version)
else:
with path.open() as f:
nbnode = f.read()
imports = import_statements(nbnode)
bads = bad_imports(imports)
any_bad = any(bads)
deps = dependency_names_from_import_statements(imports, unique=False)
# print(f'Checked file { path } found { "SOME" if any_bad else "no" } bad imports')
if any_bad:
bad_list = [p for p, b in zip(deps, bads) if b]
if verbose:
print(f' Missing packages: {bad_list}')
dep_info['path'].extend([str(path)] * len(imports))
dep_info['imports'].extend(imports)
dep_info['packages'].extend(deps)
dep_info['missing'].extend(bads)
return dep_info
def packages_to_install(dep_info, exclude=None):
"""
Produce a list of packages that need to be installed to use this set of
materials. Python standard library modules are excluded.
Parameters
----------
dep_info : dict
Dictionary of dependency information, generated by
`identify_dependencies`.
exclude : list, optional
List of packages to exclude from the results.
Returns
-------
list
List of packages needed for whatever set of files this was run on.
"""
if exclude is None:
exclude = []
packages = list(set(dep_info['packages']))
standard = stdlib_list("3.6")
packages = [p for p in packages if p not in standard and p not in exclude]
return packages
if __name__ == '__main__':
# Some day add options...
directory = '.'
dep_info = identify_dependencies(directory, skip=['setup.py'])
to_install = packages_to_install(dep_info)
print(' '.join(to_install))
|
stellargraph/utils/hyperbolic.py
|
DataLab-CQU/stellargraph
| 2,428 |
108090
|
<gh_stars>1000+
# -*- coding: utf-8 -*-
#
# Copyright 2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
"poincare_ball_distance",
"poincare_ball_exp",
"poincare_ball_mobius_add",
]
import tensorflow as tf
import numpy as np
# helper functions to manage numerical issues, inspired by https://github.com/dalab/hyperbolic_nn
PROJECTION_EPS = 1e-5
TANH_LIMIT = 15.0
ATANH_LIMIT = tf.math.nextafter(1, 0)
def _project(c, x):
"""
Ensure ``x`` lies on the Poincaré ball with curvature ``-c``, in the presence of small numerical
errors.
"""
max_norm = tf.math.rsqrt(c) * (1 - PROJECTION_EPS)
return tf.clip_by_norm(x, clip_norm=max_norm, axes=-1)
def _tanh(x):
return tf.tanh(tf.clip_by_value(x, -TANH_LIMIT, TANH_LIMIT))
def _atanh(x):
return tf.atanh(tf.clip_by_value(x, -ATANH_LIMIT, ATANH_LIMIT))
def poincare_ball_mobius_add(c, x, y):
r"""
Möbius addition of ``x`` and ``y``, on the Poincaré ball with curvature ``-c``: :math:`\mathbf{x} \oplus^c \mathbf{y}`.
See Section 2 of [1] for more details.
[1] <NAME>, <NAME>, and <NAME>, “Hyperbolic Neural Networks,” `arXiv:1805.09112 <http://arxiv.org/abs/1805.09112>`_, Jun. 2018.
Args:
c (tensorflow Tensor-like): the curvature of the hyperbolic space(s). Must be able to be
broadcast to ``x`` and ``y``.
x (tensorflow Tensor-like): a tensor containing vectors in hyperbolic space, where each
vector is an element of the last axis (for example, if ``x`` has shape ``(2, 3, 4)``, it
represents ``2 * 3 = 6`` hyperbolic vectors, each of length ``4``). Must be able to be
broadcast to ``y``.
y (tensorflow Tensor-like): a tensor containing vectors in hyperbolic space, where each
vector is an element of the last axis similar to ``x``. Must be able to be broadcast to
``x``.
Returns:
A TensorFlow Tensor containing the Möbius addition of each of the vectors (last axis) in
``x`` and ``y``, using the corresponding curvature from ``c``. This tensor has the same
shape as the Euclidean equivalent ``x + y``.
"""
x_norm2 = tf.reduce_sum(x * x, axis=-1, keepdims=True)
y_norm2 = tf.reduce_sum(y * y, axis=-1, keepdims=True)
x_dot_y = tf.reduce_sum(x * y, axis=-1, keepdims=True)
inner = 1 + 2 * c * x_dot_y
numer = (inner + c * y_norm2) * x + (1 - c * x_norm2) * y
denom = inner + c * c * x_norm2 * y_norm2
return _project(c, numer / denom)
def poincare_ball_exp(c, x, v):
r"""
The exponential map of ``v`` at ``x`` on the Poincaré ball with curvature ``-c``:
:math:`\exp_{\mathbf{x}}^c(\mathbf{v})`.
See Section 2 of [1] for more details.
[1] <NAME>, <NAME>, and <NAME>, “Hyperbolic Neural Networks,” `arXiv:1805.09112 <http://arxiv.org/abs/1805.09112>`_, Jun. 2018.
Args:
c (tensorflow Tensor-like): the curvature of the hyperbolic space(s). Must be able to be
broadcast to ``x`` and ``v``.
x (tensorflow Tensor-like, optional): a tensor containing vectors in hyperbolic space
representing the base points for the exponential map, where each vector is an element of
the last axis (for example, if ``x`` has shape ``(2, 3, 4)``, it represents ``2 * 3 =
6`` hyperbolic vectors, each of length ``4``). Must be able to be broadcast to ``v``. An
explicit ``x = None`` is equivalent to ``x`` being all zeros, but uses a more efficient
form of :math:`\exp_{\mathbf{0}}^c(\mathbf{v})`.
v (tensorflow Tensor-like): a tensor containing vectors in Euclidean space representing the
tangent vectors for the exponential map, where each vector is an element of the last
axis similar to ``x``. Must be able to be broadcast to ``x``.
"""
v_norm2 = tf.reduce_sum(v * v, axis=-1, keepdims=True)
c_v_norm = tf.sqrt(c * v_norm2)
if x is None:
coeff = _tanh(c_v_norm) / c_v_norm
return _project(c, coeff * v)
x_norm2 = tf.reduce_sum(x * x, axis=-1, keepdims=True)
inner = c_v_norm / (1 - c * x_norm2)
coeff = _tanh(inner) / c_v_norm
return poincare_ball_mobius_add(c, x, coeff * v)
def poincare_ball_distance(c, x, y):
r"""
Distance between ``x`` and ``y``, on the Poincaré ball with curvature ``-c``: :math:`d_c(\mathbf{x}, \mathbf{y})`.
See Section 2 of [1] for more details.
[1] <NAME>, <NAME>, and <NAME>, “Hyperbolic Neural Networks,” `arXiv:1805.09112 <http://arxiv.org/abs/1805.09112>`_, Jun. 2018.
Args:
c (tensorflow Tensor-like): the curvature of the hyperbolic space(s). Must be able to be
broadcast to ``x`` and ``y``.
x (tensorflow Tensor-like): a tensor containing vectors in hyperbolic space, where each
vector is an element of the last axis (for example, if ``x`` has shape ``(2, 3, 4)``, it
represents ``2 * 3 = 6`` hyperbolic vectors, each of length ``4``). Must be able to be
broadcast to ``y``.
y (tensorflow Tensor-like): a tensor containing vectors in hyperbolic space, where each
vector is an element of the last axis similar to ``x``. Must be able to be broadcast to
``x``.
Returns:
A TensorFlow Tensor containing the hyperbolic distance between each of the vectors (last
axis) in ``x`` and ``y``, using the corresponding curvature from ``c``. This tensor has the
same shape as the Euclidean equivalent ``tf.norm(x - y)``.
"""
sqrt_c = tf.sqrt(c)
return (2 / sqrt_c) * _atanh(
sqrt_c * tf.norm(poincare_ball_mobius_add(c, -x, y), axis=-1)
)
|
codes/utils/timer.py
|
CJWBW/HCFlow
| 123 |
108143
|
<filename>codes/utils/timer.py
import time
class ScopeTimer:
def __init__(self, name):
self.name = name
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.interval = self.end - self.start
print("{} {:.3E}".format(self.name, self.interval))
class Timer:
def __init__(self):
self.times = []
def tick(self):
self.times.append(time.time())
def get_average_and_reset(self):
if len(self.times) < 2:
return -1
avg = (self.times[-1] - self.times[0]) / (len(self.times) - 1)
self.times = [self.times[-1]]
return avg
def get_last_iteration(self):
if len(self.times) < 2:
return 0
return self.times[-1] - self.times[-2]
class TickTock:
def __init__(self):
self.time_pairs = []
self.current_time = None
def tick(self):
self.current_time = time.time()
def tock(self):
assert self.current_time is not None, self.current_time
self.time_pairs.append([self.current_time, time.time()])
self.current_time = None
def get_average_and_reset(self):
if len(self.time_pairs) == 0:
return -1
deltas = [t2 - t1 for t1, t2 in self.time_pairs]
avg = sum(deltas) / len(deltas)
self.time_pairs = []
return avg
def get_last_iteration(self):
if len(self.time_pairs) == 0:
return -1
return self.time_pairs[-1][1] - self.time_pairs[-1][0]
|
python/kwiver/kwiver_tools.py
|
johnwparent/kwiver
| 176 |
108156
|
"""
Console scripts for the tools provided by KWIVER.
These scripts are used in the wheel setup the environment to kwiver tools and
launch them in a subprocess.
"""
import os
import subprocess
import kwiver
import sys
from pkg_resources import iter_entry_points
from typing import Dict, List
from kwiver.vital import vital_logging
from kwiver.vital.util.initial_plugin_path import get_initial_plugin_path
KWIVER_BIN_DIR = os.path.join(os.path.dirname(os.path.abspath(kwiver.__file__)), 'bin')
KWIVER_SUPPORTED_TOOLS = ['kwiver', 'plugin_explorer']
logger = vital_logging.getLogger(__name__)
def _setup_environment() -> Dict:
"""
Create a dictionary with environment variables for running kwiver tools.
The dictionary includes appending LD_LIBRARY_PATH, adding path to vital
logging factory to VITAL_LOGGER_FACTORY, and path to default plugins in
KWIVER_PLUGIN_PATH.
Returns:
Dictionary with environment variables used for running tools
"""
# Add additional ld libraries
ld_library_paths = []
for entry_point in iter_entry_points('kwiver.env.ld_library_path'):
ld_library_path = entry_point.load()()
if not os.path.exists(ld_library_path):
logger.warn(f"Invalid path {ld_library_path} specified in {entry_point.name}")
else:
ld_library_paths.append(ld_library_path)
ld_library_path_str = ":".join(ld_library_paths)
# Add logger factories
vital_logger_factory = None
for entry_point in iter_entry_points('kwiver.env.logger_factory', name='vital_log4cplus_logger_factory'):
logger_factory = entry_point.load()()
vital_logger_factory = logger_factory
# Check if LD_LIBRARY_PATH is set to something and append it to the current ld library path
if os.environ.get('LD_LIBRARY_PATH'):
ld_library_path_str += os.environ.get('LD_LIBRARY_PATH')
tool_environment = {
"LD_LIBRARY_PATH": ld_library_path_str,
"VITAL_LOGGER_FACTORY": vital_logger_factory,
"KWIVER_PLUGIN_PATH": get_initial_plugin_path()
}
# Add the remaining environment variables without fiddling with what we have already set
for env_var_name, env_var_val in os.environ.items():
if env_var_name not in tool_environment.keys():
tool_environment[env_var_name] = env_var_val
return tool_environment
def _kwiver_tools(tool_name: str, args: List[str]) -> int:
"""
Configure logging, setup environment and run a subprocess with kwiver tool in it.
Args:
tool_name: Name of the tool that would be run as a subprocess
args: Command line argument provided by the user for the tool
Return:
Return code for the subprocess that runs the tool
"""
vital_logging._configure_logging()
assert tool_name in KWIVER_SUPPORTED_TOOLS, f"Unsupported tool {tool_name} specified"
tool_environment = _setup_environment()
tool_path = os.path.join(KWIVER_BIN_DIR, tool_name)
assert os.path.exists(tool_path), f"Tool {tool_name} not available in {tool_path}"
args.insert(0, tool_path)
subprocess_complete = subprocess.run(args, shell=False, check=False, env=tool_environment)
return subprocess_complete.returncode
def plugin_explorer() -> None:
"""
Console script function for plugin_explorer.
"""
cmd_args = ["--skip-relative"]
cmd_args.extend(sys.argv[1:])
raise SystemExit(_kwiver_tools("plugin_explorer", cmd_args))
def kwiver() -> None:
"""
Console script function for kwiver runner.
"""
raise SystemExit(_kwiver_tools("kwiver", sys.argv[1:]))
|
tests/unit/plugins/filter/test_users_groups.py
|
manala/ansible-roles
| 138 |
108157
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible_collections.community.general.tests.unit.compat import unittest
from ansible_collections.manala.roles.plugins.filter.users_groups import users_groups
from ansible.errors import AnsibleFilterError
class Test(unittest.TestCase):
def test_not_list(self):
with self.assertRaises(AnsibleFilterError) as error:
users_groups(NotImplemented)
self.assertEqual("Expected an iterable but was a <class 'NotImplementedType'>", str(error.exception))
def test_not_groups_list(self):
with self.assertRaises(AnsibleFilterError) as error:
users_groups([], NotImplemented)
self.assertEqual("Expected a groups iterable but was a <class 'NotImplementedType'>", str(error.exception))
def test_skipped(self):
self.assertListEqual([
{'user': 'foo'},
], users_groups([
{'user': 'foo'},
], [
{'skipped': True},
]))
def test(self):
self.assertListEqual([
{'user': 'foo', 'group': 'foo'},
{'user': 'bar', 'group': 'bar'},
], users_groups([
{'user': 'foo'},
{'user': 'bar', 'group': 'baz'},
], [
{'item': {'user': 'foo'}, 'stdout': 'foo'},
{'item': {'user': 'bar'}, 'stdout': 'bar'},
]))
|
tests/__init__.py
|
mochazi/objprint
| 191 |
108176
|
<gh_stars>100-1000
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/gaogaotiantian/objprint/blob/master/NOTICE.txt
|
dashboard/dashboard/graph_csv_test.py
|
ravitejavalluri/catapult
| 2,151 |
108215
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import csv
import StringIO
import unittest
import webapp2
import webtest
from dashboard import graph_csv
from dashboard.common import datastore_hooks
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import graph_data
class GraphCsvTest(testing_common.TestCase):
def setUp(self):
super(GraphCsvTest, self).setUp()
app = webapp2.WSGIApplication([('/graph_csv', graph_csv.GraphCsvHandler)])
self.testapp = webtest.TestApp(app)
self.SetCurrentUser('<EMAIL>', is_admin=True)
def _AddMockData(self):
master = graph_data.Master(id='ChromiumPerf').put()
bots = []
for name in ['win7', 'mac']:
bot = graph_data.Bot(id=name, parent=master).put()
bots.append(bot)
graph_data.TestMetadata(id='ChromiumPerf/%s/dromaeo' % name).put()
dom_test = graph_data.TestMetadata(
id='ChromiumPerf/%s/dromaeo/dom' % name, has_rows=True).put()
test_container_key = utils.GetTestContainerKey(dom_test)
for i in range(15000, 16000, 5):
graph_data.Row(parent=test_container_key, id=i, value=float(i * 2.5),
error=(i + 5)).put()
def _AddMockInternalData(self):
master = graph_data.Master(id='ChromiumPerf').put()
bots = []
for name in ['win7', 'mac']:
bot = graph_data.Bot(id=name, parent=master, internal_only=True).put()
bots.append(bot)
graph_data.TestMetadata(
id='ChromiumPerf/%s/dromaeo' % name, internal_only=True).put()
dom_test = graph_data.TestMetadata(
id='ChromiumPerf/%s/dromaeo/dom' % name,
has_rows=True,
internal_only=True).put()
test_container_key = utils.GetTestContainerKey(dom_test)
for i in range(1, 50):
graph_data.Row(
parent=test_container_key, id=i, value=float(i * 2), error=(i + 10),
internal_only=True).put()
def _CheckGet(
self, result_query, expected_result, whitelisted_ip='', status=200):
"""Asserts that the given query has the given CSV result.
Args:
result_query: The path and query string to request.
expected_result: The expected table of values (list of lists).
whitelisted_ip: The IP address to set as request remote address.
"""
response_rows = []
response = self.testapp.get(
result_query,
extra_environ={'REMOTE_ADDR': whitelisted_ip},
status=status)
if status != 200:
return
for row in csv.reader(StringIO.StringIO(response.body)):
response_rows.append(row)
self.assertEqual(expected_result, response_rows)
def testGetCsv(self):
self._AddMockData()
response = self.testapp.get(
'/graph_csv?test_path=ChromiumPerf/win7/dromaeo/dom')
for index, row, in enumerate(csv.reader(StringIO.StringIO(response.body))):
# Skip the headers
if index > 0:
expected_rev = str(15000 + ((index - 1) * 5))
expected_value = str(int(expected_rev) * 2.5)
self.assertEqual([expected_rev, expected_value], row)
def testPost(self):
self._AddMockData()
response = self.testapp.post(
'/graph_csv?', {'test_path': 'ChromiumPerf/win7/dromaeo/dom'})
for index, row, in enumerate(csv.reader(StringIO.StringIO(response.body))):
# Skip the headers
if index > 0:
expected_rev = str(15000 + ((index - 1) * 5))
expected_value = str(int(expected_rev) * 2.5)
self.assertEqual([expected_rev, expected_value], row)
def testRevNumRows(self):
self._AddMockData()
query = ('/graph_csv?test_path=ChromiumPerf/win7/dromaeo/dom&'
'rev=15270&num_points=5')
expected = [
['revision', 'value'],
['15250', '38125.0'],
['15255', '38137.5'],
['15260', '38150.0'],
['15265', '38162.5'],
['15270', '38175.0'],
]
self._CheckGet(query, expected)
def testAttrRows(self):
self._AddMockData()
query = ('/graph_csv?test_path=ChromiumPerf/win7/dromaeo/dom&'
'rev=15270&num_points=5&attr=revision,error,value')
expected = [
['revision', 'error', 'value'],
['15250', '15255.0', '38125.0'],
['15255', '15260.0', '38137.5'],
['15260', '15265.0', '38150.0'],
['15265', '15270.0', '38162.5'],
['15270', '15275.0', '38175.0'],
]
self._CheckGet(query, expected)
query = ('/graph_csv?test_path=ChromiumPerf/win7/dromaeo/dom&'
'rev=15270&num_points=5&attr=value')
expected = [
['value'],
['38125.0'],
['38137.5'],
['38150.0'],
['38162.5'],
['38175.0'],
]
self._CheckGet(query, expected)
query = ('/graph_csv?test_path=ChromiumPerf/win7/dromaeo/dom&'
'num_points=5&attr=revision,random,value')
expected = [
['revision', 'random', 'value'],
['15975', '', '39937.5'],
['15980', '', '39950.0'],
['15985', '', '39962.5'],
['15990', '', '39975.0'],
['15995', '', '39987.5'],
]
self._CheckGet(query, expected)
def testGet_WithNonInternalUserAndWhitelistedIP(self):
self._AddMockInternalData()
self.UnsetCurrentUser()
datastore_hooks.InstallHooks()
testing_common.SetIpWhitelist(['192.168.3.11'])
query = '/graph_csv?test_path=ChromiumPerf/win7/dromaeo/dom&num_points=3'
expected = [['revision', 'value']]
self._CheckGet(query, expected, status=500)
def testGet_WhitelistedIPOnly(self):
self.PatchDatastoreHooksRequest('192.168.3.11')
self._AddMockInternalData()
self.UnsetCurrentUser()
datastore_hooks.InstallHooks()
testing_common.SetIpWhitelist(['192.168.3.11'])
query = '/graph_csv?test_path=ChromiumPerf/win7/dromaeo/dom&num_points=3'
expected = [
['revision', 'value'],
['47', '94.0'],
['48', '96.0'],
['49', '98.0']
]
self._CheckGet(query, expected, whitelisted_ip='192.168.3.11')
def testGet_NoTestPathGiven_GivesError(self):
testing_common.SetIpWhitelist(['192.168.3.11'])
self.testapp.get(
'/graph_csv',
extra_environ={'REMOTE_ADDR': '192.168.3.11'},
status=400)
if __name__ == '__main__':
unittest.main()
|
python/test.py
|
dugsong/libdnet
| 103 |
108216
|
<filename>python/test.py
#!/usr/bin/env python
import sys, unittest
from os import listdir
from interfacefinder import mac_addr,local_ip,loopback_intf
for dir in listdir('build'):
if dir.startswith('lib.'):
sys.path.insert(0, "build/" + dir)
import dnet
class AddrTestCase(unittest.TestCase):
def test_addr_cmp(self):
for atxt in ('1.2.3.0', '0:d:e:a:d:0', 'fc00:e968:6179::de52:7100:fa:ce:0'):
a = dnet.addr(atxt)
b = dnet.addr(atxt)
assert a == b
b = dnet.addr(atxt[:-1] + '1')
assert a < b
assert b > a
def test_addr_bcast(self):
d = { 32:'10.0.0.0', 31:'10.0.0.1', 30:'10.0.0.3', 29:'10.0.0.7',
28:'10.0.0.15', 27:'10.0.0.31', 26:'10.0.0.63', 25:'10.0.0.127',
24:'10.0.0.255', 23:'10.0.1.255', 22:'10.0.3.255',
21:'10.0.7.255', 20:'10.0.15.255', 19:'10.0.31.255' }
for bits in d:
a = dnet.addr('%s/%d' % (d[32], bits))
b = a.bcast()
self.assertTrue(b.__repr__() == d[bits],
'wrong bcast for /%d' % bits)
def test_addr_net(self):
d = { 32:'172.16.58.3', 31:'192.168.127.12', 30:'172.16.17.32',
29:'172.16.31.10', 28:'192.168.127.12', 27:'192.168.3.11',
26:'172.16.17.32', 25:'192.168.3.11', 24:'172.16.31.10',
23:'192.168.127.12', 22:'172.16.31.10', 21:'172.16.17.32',
20:'192.168.127.12', 19:'192.168.3.11' }
for bits in d:
a = dnet.addr('%s/%d' % (d[32], bits))
b = a.net()
self.assertTrue(b.__repr__() == d[bits],
'wrong net for /%d' % bits)
def test_addr_properties(self):
atxt = '1.2.3.4/24'
a = dnet.addr(atxt)
assert a.type == dnet.ADDR_TYPE_IP and a.bits == 24
assert a.ip == b'\x01\x02\x03\x04' and a.__repr__() == atxt
try: self.assertTrue(a.eth == 'xxx', 'invalid eth property')
except ValueError: pass
atxt = '00:0d:0e:0a:0d:00'
a = dnet.addr(atxt)
assert a == dnet.addr('0:d:E:a:D:0')
assert a.type == dnet.ADDR_TYPE_ETH and a.bits == 48
assert a.eth == b'\x00\x0d\x0e\x0a\x0d\x00' and a.__repr__() == atxt
try: self.assertTrue(a.ip6 == 'xxx', 'invalid ip6 property')
except ValueError: pass
atxt = 'fe80::dead:beef:feed:face/48'
a = dnet.addr(atxt)
assert a == dnet.addr('fe80:0:0::dead:beef:feed:face/48')
assert a.type == dnet.ADDR_TYPE_IP6 and a.bits == 48
assert a.ip6 == b'\xfe\x80\x00\x00\x00\x00\x00\x00\xde\xad\xbe\xef\xfe\xed\xfa\xce' and a.__repr__() == atxt
try: self.assertTrue(a.ip == 'xxx', 'invalid ip property')
except ValueError: pass
class ArpTestCase(unittest.TestCase):
def setUp(self):
self.arp = dnet.arp()
self.assertTrue(self.arp, "couldn't open ARP handle")
def tearDown(self):
del self.arp
def test_arp(self):
# XXX - site-specific values here!
pa = dnet.addr(local_ip)
ha = dnet.addr(mac_addr)
self.assertTrue(self.arp.add(pa, ha) == None, "couldn't add ARP entry")
self.assertTrue(self.arp.get(pa) == ha, "couldn't find ARP entry")
self.assertTrue(self.arp.delete(pa) == None, "couldn't delete ARP entry")
self.assertTrue(self.arp.get(pa) == None, "wrong ARP entry present")
self.assertTrue(self.arp.add(pa, ha) == None, "couldn't add ARP entry")
self.assertTrue(self.arp.get(pa) == ha, "couldn't find ARP entry")
def __arp_cb(self, pa, arg):
# XXX - do nothing
return arg
def test_arp_loop(self):
assert self.arp.loop(self.__arp_cb, 0) == 0
assert self.arp.loop(self.__arp_cb, 123) == 123
def test_arp_misc(self):
sha = b'\x00\x0d\x0e\x0a\x0d\x00'
spa = b'\x01\x02\x03\x04'
dha = b'\x00\x0b\x0e\x0e\x0f\x00'
dpa = b'\x05\x06\x07\x08'
msg = dnet.arp_pack_hdr_ethip(dnet.ARP_OP_REQUEST, sha, spa, dha, dpa)
assert msg == b'\x00\x01\x08\x00\x06\x04\x00\x01\x00\r\x0e\n\r\x00\x01\x02\x03\x04\x00\x0b\x0e\x0e\x0f\x00\x05\x06\x07\x08'
class EthTestCase(unittest.TestCase):
def setUp(self):
self.dev = dnet.intf().get_dst(dnet.addr('1.2.3.4'))['name']
self.eth = dnet.eth(self.dev)
self.assertTrue(self.eth, "couldn't open Ethernet handle")
def tearDown(self):
del self.eth
def test_eth_get(self):
mac = self.eth.get()
self.assertTrue(mac, "couldn't get Ethernet address for %s" % self.dev)
def test_eth_misc(self):
n = b'\x00\x0d\x0e\x0a\x0d\x00'
a = '00:0d:0e:0a:0d:00'
self.assertTrue(dnet.eth_ntoa(n) == a)
self.assertTrue(dnet.eth_aton(a) == n)
dst = b'\x00\x0d\x0e\x0a\x0d\x01'
self.assertTrue(dnet.eth_pack_hdr(n, dst, dnet.ETH_TYPE_IP) ==
b'\x00\r\x0e\n\r\x00\x00\r\x0e\n\r\x01\x08\x00')
class FwTestCase(unittest.TestCase):
def setUp(self):
self.dev = dnet.intf().get_dst(dnet.addr('1.2.3.4'))['name']
self.fw = dnet.fw()
self.assertTrue(self.fw, "couldn't open firewall handle")
def tearDown(self):
del self.fw
def test_fw(self):
src = dnet.addr('1.2.3.4')
dst = dnet.addr('5.6.7.8')
d = { 'device':self.dev,
'op':dnet.FW_OP_BLOCK,
'dir':dnet.FW_DIR_OUT,
'proto':dnet.IP_PROTO_UDP,
'src':src,
'dst':dst,
'dport':(660, 666)
}
self.assertTrue(self.fw.add(d) == None,
"couldn't add firewall rule: %s" % d)
self.assertTrue(self.fw.delete(d) == None,
"couldn't delete firewall rule: %s" % d)
def __fw_cb(self, rule, arg):
# XXX - do nothing
return arg
def test_fw_loop(self):
assert self.fw.loop(self.__fw_cb, 0) == 0
# XXX - no guarantee of existing fw rules.
#assert self.fw.loop(self.__fw_cb, 123) == 123
class IntfTestCase(unittest.TestCase):
def setUp(self):
self.intf = dnet.intf()
self.assertTrue(self.intf, "couldn't open interface handle")
def tearDown(self):
del self.intf
def test_intf_get(self):
lo0 = self.intf.get(loopback_intf)
self.assertTrue(lo0['name'] == loopback_intf, "couldn't get loopback config")
self.assertTrue(self.intf.get_src(dnet.addr('127.0.0.1')) == lo0,
"couldn't get_src 127.0.0.1")
gw = self.intf.get_dst(dnet.addr('1.2.3.4'))
self.assertTrue(gw, "couldn't get outgoing interface")
def test_intf_set(self):
lo0 = self.intf.get(loopback_intf)
old_mtu = lo0['mtu']
new_mtu = 1234
lo0['mtu'] = new_mtu
self.intf.set(lo0)
lo0 = self.intf.get(loopback_intf)
assert lo0['mtu'] == new_mtu
lo0['mtu'] = old_mtu
self.intf.set(lo0)
def __intf_cb(self, ifent, arg):
# XXX - do nothing
return arg
def test_intf_loop(self):
assert self.intf.loop(self.__intf_cb, 0) == 0
assert self.intf.loop(self.__intf_cb, 123) == 123
class IpTestCase(unittest.TestCase):
def setUp(self):
self.ip = dnet.ip()
self.assertTrue(self.ip, "couldn't open raw IP handle")
def tearDown(self):
del self.ip
def test_ip_misc(self):
n = b'\x01\x02\x03\x04'
a = '1.2.3.4'
self.assertTrue(dnet.ip_ntoa(n) == a)
self.assertTrue(dnet.ip_aton(a) == n)
dst = b'\x05\x06\x07\x08'
hdr = dnet.ip_pack_hdr(0, dnet.IP_HDR_LEN, 666, 0, 255,dnet.IP_PROTO_UDP, n, dst)
self.assertTrue(hdr == b'E\x00\x00\x14\x02\x9a\x00\x00\xff\x11\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08')
hdr = dnet.ip_checksum(hdr)
self.assertTrue(hdr == '\x00\x00\x14\x02\x9a\x00\x00\xff\x11\xa9\x01\x02\x03\x04\x05\x06\x07\x08')
class RandTestCase(unittest.TestCase):
def setUp(self):
self.rand = dnet.rand()
self.assertTrue(self.rand, "couldn't open random handle")
def tearDown(self):
del self.rand
class RouteTestCase(unittest.TestCase):
def setUp(self):
self.route = dnet.route()
self.assertTrue(self.route, "couldn't open route handle")
def tearDown(self):
del self.route
def test_route(self):
dst = dnet.addr('1.2.3.4/24')
gw = dnet.addr('1172.16.31.10')
self.route.add(dst, gw)
self.assertTrue(self.route.get(dst) == gw)
self.route.delete(dst)
def __route_cb(self, dst, arg):
# XXX - do nothing
return arg
def test_route_loop(self):
assert self.route.loop(self.__route_cb, 0) == 0
assert self.route.loop(self.__route_cb, 123) == 123
if __name__ == '__main__':
unittest.main()
|
backend/boards/admin.py
|
LucasSantosGuedes/App-Gestao
| 142 |
108222
|
from django.contrib import admin
from .models import Board, List, Item, Label, Comment, Attachment, Notification
admin.site.register(Board)
admin.site.register(List)
admin.site.register(Item)
admin.site.register(Label)
admin.site.register(Comment)
admin.site.register(Attachment)
admin.site.register(Notification)
|
playbooks/roles/libraries/library/docker_pull_image.py
|
lowang-bh/lain-1
| 524 |
108260
|
<gh_stars>100-1000
#!/usr/bin/python
from subprocess import call, check_call
def main():
module = AnsibleModule(
argument_spec=dict(
image=dict(required=True),
registry=dict(default=''),
),
)
image = module.params['image']
registry = module.params['registry']
retval = call(['docker', 'inspect', '-f', '{{.Id}}', image])
if retval == 0:
# image already exists
module.exit_json(changed=False)
if registry:
src_image = '%s/%s' % (registry, image)
check_call(['docker', 'pull', src_image])
check_call(['docker', 'tag', src_image, image])
else:
check_call(['docker', 'pull', image])
module.exit_json(changed=True)
from ansible.module_utils.basic import *
main()
|
learntopredict/carracing/nn.py
|
adafok/brain-tokyo-workshop
| 1,097 |
108269
|
# neural network functions and classes
import numpy as np
import random
import json
import cma
from es import SimpleGA, CMAES, PEPG, OpenES
from env import make_env
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def relu(x):
return np.maximum(x, 0)
def passthru(x):
return x
# useful for discrete actions
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
# useful for discrete actions
def sample(p):
return np.argmax(np.random.multinomial(1, p))
"""
learning the model
"""
class RNNCell:
def __init__(self, input_size, weight, bias):
self.input_size=input_size
self.weight = weight
self.bias = bias
def __call__(self, x, h):
concat = np.concatenate((x, h), axis=1)
hidden = np.matmul(concat, self.weight)+self.bias
return np.tanh(hidden)
# LSTM in a few lines of numpy
class LSTMCell:
'''Numpy LSTM cell used for inference only.'''
def __init__(self, input_size, weight, bias, forget_bias=1.0):
self.input_size=input_size
self.W_full=weight # np.concatenate((Wxh, Whh), axis=0)
self.bias=bias
self.forget_bias=1.0
def __call__(self, x, h, c):
concat = np.concatenate((x, h), axis=1)
hidden = np.matmul(concat, self.W_full)+self.bias
i, g, f, o = np.split(hidden, 4, axis=1)
i = sigmoid(i)
g = np.tanh(g)
f = sigmoid(f+self.forget_bias)
o = sigmoid(o)
new_c = np.multiply(c, f) + np.multiply(g, i)
new_h = np.multiply(np.tanh(new_c), o)
return new_h, new_c
class RNNModel:
def __init__(self, game):
self.env_name = game.env_name
self.hidden_size = game.layers[0]
self.layer_1 = game.layers[1]
self.layer_2 = game.layers[2]
self.rnn_mode = True
self.input_size = game.input_size
self.output_size = game.output_size
self.render_mode = False
self.shapes = [ (self.input_size + self.hidden_size, 1*self.hidden_size), # RNN weights
(self.input_size + self.hidden_size, self.layer_1),# predict actions output
(self.layer_1, self.output_size)] # predict actions output
self.weight = []
self.bias = []
self.param_count = 0
idx = 0
for shape in self.shapes:
self.weight.append(np.zeros(shape=shape))
self.bias.append(np.zeros(shape=shape[1]))
self.param_count += (np.product(shape) + shape[1])
idx += 1
self.init_h = np.zeros((1, self.hidden_size))
self.h = self.init_h
self.param_count += 1*self.hidden_size
self.rnn = RNNCell(self.input_size, self.weight[0], self.bias[0])
def reset(self):
self.h = self.init_h
def make_env(self, seed=-1, render_mode=False):
self.render_mode = render_mode
self.env = make_env(self.env_name, seed=seed, render_mode=render_mode)
def get_action(self, real_obs):
obs = real_obs.reshape(1, 3)
# update rnn:
#update_obs = np.concatenate([obs, action], axis=1)
self.h = self.rnn(obs, self.h)
# get action
total_obs = np.concatenate([obs, self.h], axis=1)
# calculate action using 2 layer network from output
hidden = np.tanh(np.matmul(total_obs, self.weight[1]) + self.bias[1])
action = np.tanh(np.matmul(hidden, self.weight[2]) + self.bias[2])
return action[0]
def set_model_params(self, model_params):
pointer = 0
for i in range(len(self.shapes)):
w_shape = self.shapes[i]
b_shape = self.shapes[i][1]
s_w = np.product(w_shape)
s = s_w + b_shape
chunk = np.array(model_params[pointer:pointer+s])
self.weight[i] = chunk[:s_w].reshape(w_shape)
self.bias[i] = chunk[s_w:].reshape(b_shape)
pointer += s
# rnn states
s = self.hidden_size
self.init_h = model_params[pointer:pointer+s].reshape((1, self.hidden_size))
self.h = self.init_h
self.rnn = RNNCell(self.input_size, self.weight[0], self.bias[0])
def load_model(self, filename):
with open(filename) as f:
data = json.load(f)
print('loading file %s' % (filename))
self.data = data
model_params = np.array(data[0]) # assuming other stuff is in data
self.set_model_params(model_params)
def get_random_model_params(self, stdev=0.1):
return np.random.randn(self.param_count)*stdev
|
experiments/vera/combine_runs.py
|
Elfsong/pygaggle
| 166 |
108311
|
<reponame>Elfsong/pygaggle
"""Script to evaluate search engine on TREC-covid qrels."""
import argparse
import logging
import collections
from tqdm import tqdm
import numpy as np
def load_run(path, topk=1000):
"""Loads run into a dict of key: query_id, value: list of candidate doc
ids."""
# We want to preserve the order of runs so we can pair the run file with
# the TFRecord file.
print('Loading run...')
run = collections.OrderedDict()
with open(path) as f:
for line in tqdm(f):
query_id, _, doc_title, rank, score, _ = line.split()
if query_id not in run:
run[query_id] = {}
run[query_id][doc_title] = float(score)
return run
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Retrieve documents for TREC-COVID queries.')
parser.add_argument('--run_a', required=True, default='',
help='TREC mono run filee')
parser.add_argument('--run_b', required=True, default='',
help='TREC mono run filee')
parser.add_argument('--output', required=True, default='',
help='output filee')
parser.add_argument('--alpha', required=True, type=float)
args = parser.parse_args()
run_a = load_run(args.run_a)
run_b = load_run(args.run_b)
run = collections.OrderedDict()
for query_id in run_a:
run[query_id] = []
for doc_id in run_a[query_id]:
run[query_id].append((doc_id, args.alpha * run_a[query_id][doc_id] + (1.0 - args.alpha) * run_b[query_id][doc_id]))
with open(args.output, 'w') as fout:
for query_id, doc_ids_scores in run.items():
doc_ids_scores.sort(key=lambda x: x[1], reverse=True)
for rank, (doc_id, score) in enumerate(doc_ids_scores):
fout.write(f'{query_id} Q0 {doc_id} {rank + 1} {score} vera{args.alpha}\n')
print('Done!')
|
mt.py
|
rcmckee/BPT
| 123 |
108330
|
from torchtext import data
from torch.utils.data import DataLoader
from graph import MTBatcher, get_mt_dataset, MTDataset, DocumentMTDataset
from modules import make_translation_model
from optim import get_wrapper
from loss import LabelSmoothing
import numpy as np
import torch as th
import torch.optim as optim
import argparse
import yaml
import os
def run(proc_id, n_gpus, devices, config, checkpoint):
th.manual_seed(config['seed'])
np.random.seed(config['seed'])
th.cuda.manual_seed_all(config['seed'])
dev_id = devices[proc_id]
if n_gpus > 1:
dist_init_method = 'tcp://{master_ip}:{master_port}'.format(
master_ip='127.0.0.1', master_port='12345')
world_size = n_gpus
th.distributed.init_process_group(backend="nccl",
init_method=dist_init_method,
world_size=world_size,
rank=dev_id)
_dataset = config['dataset']
grad_accum = config['grad_accum']
if _dataset == 'iwslt':
TEXT = [data.Field(batch_first=True) for _ in range(2)]
dataset = get_mt_dataset('iwslt')
train, dev, test = dataset.splits(exts=('.tc.zh', '.tc.en'), fields=TEXT, root='./data')
train = DocumentMTDataset(train, context_length=config['context_len'], part=(proc_id, n_gpus))
dev = DocumentMTDataset(dev, context_length=config['context_len'])
test = DocumentMTDataset(test, context_length=config['context_len'])
vocab_zh, vocab_en = dataset.load_vocab(root='./data')
print('vocab size: ', len(vocab_zh), len(vocab_en))
vocab_sizes = [len(vocab_zh), len(vocab_en)]
TEXT[0].vocab = vocab_zh
TEXT[1].vocab = vocab_en
batcher = MTBatcher(TEXT, graph_type=config['graph_type'], **config.get('graph_attrs', {}))
train_loader = DataLoader(dataset=train,
batch_size=config['batch_size'] // n_gpus,
collate_fn=batcher,
shuffle=True,
num_workers=6)
dev_loader = DataLoader(dataset=dev,
batch_size=config['dev_batch_size'],
collate_fn=batcher,
shuffle=False)
test_loader = DataLoader(dataset=test,
batch_size=config['dev_batch_size'],
collate_fn=batcher,
shuffle=False)
elif _dataset == 'wmt':
TEXT = data.Field(batch_first=True)
dataset = get_mt_dataset('wmt14')
train, dev, test = dataset.splits(exts=['.en', '.de'], fields=[TEXT, TEXT], root='./data')
train = MTDataset(train, part=(proc_id, n_gpus))
dev = MTDataset(dev)
test = MTDataset(test)
vocab = dataset.load_vocab(root='./data')[0]
print('vocab size: ', len(vocab))
vocab_sizes = [len(vocab)]
TEXT.vocab = vocab
batcher = MTBatcher(TEXT, graph_type=config['graph_type'], **config.get('graph_attrs', {}))
train_loader = DataLoader(dataset=train,
batch_size=config['batch_size'] // n_gpus,
collate_fn=batcher,
shuffle=True,
num_workers=6)
dev_loader = DataLoader(dataset=dev,
batch_size=config['dev_batch_size'],
collate_fn=batcher,
shuffle=False)
test_loader = DataLoader(dataset=test,
batch_size=config['dev_batch_size'],
collate_fn=batcher,
shuffle=False)
elif _dataset == 'multi':
TEXT = [data.Field(batch_first=True) for _ in range(2)]
dataset = get_mt_dataset('multi30k')
train, dev, test = dataset.splits(exts=['.en.atok', '.de.atok'], fields=TEXT, root='./data')
train = MTDataset(train, part=(proc_id, n_gpus))
dev = MTDataset(dev)
test = MTDataset(test)
vocab_en, vocab_de = dataset.load_vocab(root='./data')
print('vocab size: ', len(vocab_en), len(vocab_de))
vocab_sizes = [len(vocab_en), len(vocab_de)]
TEXT[0].vocab = vocab_en
TEXT[1].vocab = vocab_de
batcher = MTBatcher(TEXT, graph_type=config['graph_type'], **config.get('graph_attrs', {}))
train_loader = DataLoader(dataset=train,
batch_size=config['batch_size'] // n_gpus,
collate_fn=batcher,
shuffle=True,
num_workers=6)
dev_loader = DataLoader(dataset=dev,
batch_size=config['dev_batch_size'],
collate_fn=batcher,
shuffle=False)
test_loader = DataLoader(dataset=test,
batch_size=config['dev_batch_size'],
collate_fn=batcher,
shuffle=False)
dim_model = config['dim_model']
dim_ff = config['dim_ff']
num_heads = config['num_heads']
n_layers = config['n_layers']
m_layers = config['m_layers']
dropouti = config['dropouti']
dropouth = config['dropouth']
dropouta = config['dropouta']
dropoutc = config['dropoutc']
rel_pos = config['rel_pos']
model = make_translation_model(vocab_sizes, dim_model, dim_ff, num_heads,
n_layers, m_layers,
dropouti=dropouti, dropouth=dropouth,
dropouta=dropouta, dropoutc=dropoutc,
rel_pos=rel_pos)
if checkpoint != -1:
with open('checkpoints/{}-{}.pkl'.format(checkpoint, config['save_name']), 'rb') as f:
state_dict = th.load(f, map_location=lambda storage, loc: storage)
model.load_state_dict(state_dict)
# tie weight
if config.get('share_weight', False):
model.embed[-1].lut.weight = model.generator.proj.weight
criterion = LabelSmoothing(vocab_sizes[-1], smoothing=0.1)
device = th.device(dev_id)
th.cuda.set_device(device)
model, criterion = model.to(device), criterion.to(device)
n_epochs = config['n_epochs']
optimizer = get_wrapper('noam')(
dim_model, config['factor'], config.get('warmup', 4000),
optim.Adam(model.parameters(), lr=config['lr'], betas=(0.9, 0.98), eps=1e-9,
weight_decay=config.get('weight_decay', 0)))
for _ in range(checkpoint + 1):
for _ in range(len(train_loader)):
optimizer.step()
log_interval = config['log_interval']
for epoch in range(checkpoint + 1, n_epochs):
if proc_id == 0:
print("epoch {}".format(epoch))
print("training...")
model.train()
tot = 0
hit = 0
loss_accum = 0
for i, batch in enumerate(train_loader):
batch.y = batch.y.to(device)
batch.g_enc.edata['etype'] = batch.g_enc.edata['etype'].to(device)
batch.g_enc.ndata['x'] = batch.g_enc.ndata['x'].to(device)
batch.g_enc.ndata['pos'] = batch.g_enc.ndata['pos'].to(device)
batch.g_dec.edata['etype'] = batch.g_dec.edata['etype'].to(device)
batch.g_dec.ndata['x'] = batch.g_dec.ndata['x'].to(device)
batch.g_dec.ndata['pos'] = batch.g_dec.ndata['pos'].to(device)
out = model(batch)
loss = criterion(out, batch.y) / len(batch.y)
loss_accum += loss.item() * len(batch.y)
tot += len(batch.y)
hit += (out.max(dim=-1)[1] == batch.y).sum().item()
if proc_id == 0:
if (i + 1) % log_interval == 0:
print('step {}, loss : {}, acc : {}'.format(i, loss_accum / tot, hit / tot))
tot = 0
hit = 0
loss_accum = 0
loss.backward()
if (i + 1) % grad_accum == 0:
for param in model.parameters():
if param.requires_grad and param.grad is not None:
if n_gpus > 1:
th.distributed.all_reduce(param.grad.data,
op=th.distributed.ReduceOp.SUM)
param.grad.data /= (n_gpus * grad_accum)
optimizer.step()
optimizer.zero_grad()
model.eval()
tot = 0
hit = 0
loss_accum = 0
for batch in dev_loader:
with th.no_grad():
batch.y = batch.y.to(device)
batch.g_enc.edata['etype'] = batch.g_enc.edata['etype'].to(device)
batch.g_enc.ndata['x'] = batch.g_enc.ndata['x'].to(device)
batch.g_enc.ndata['pos'] = batch.g_enc.ndata['pos'].to(device)
batch.g_dec.edata['etype'] = batch.g_dec.edata['etype'].to(device)
batch.g_dec.ndata['x'] = batch.g_dec.ndata['x'].to(device)
batch.g_dec.ndata['pos'] = batch.g_dec.ndata['pos'].to(device)
out = model(batch)
loss_accum += criterion(out, batch.y)
tot += len(batch.y)
hit += (out.max(dim=-1)[1] == batch.y).sum().item()
if n_gpus > 1:
th.distributed.barrier()
if proc_id == 0:
print('evaluate...')
print('loss : {}, acc : {}'.format(loss_accum / tot, hit / tot))
tot = 0
hit = 0
loss_accum = 0
for batch in test_loader:
with th.no_grad():
batch.y = batch.y.to(device)
batch.g_enc.edata['etype'] = batch.g_enc.edata['etype'].to(device)
batch.g_enc.ndata['x'] = batch.g_enc.ndata['x'].to(device)
batch.g_enc.ndata['pos'] = batch.g_enc.ndata['pos'].to(device)
batch.g_dec.edata['etype'] = batch.g_dec.edata['etype'].to(device)
batch.g_dec.ndata['x'] = batch.g_dec.ndata['x'].to(device)
batch.g_dec.ndata['pos'] = batch.g_dec.ndata['pos'].to(device)
out = model(batch)
loss_accum += criterion(out, batch.y)
tot += len(batch.y)
hit += (out.max(dim=-1)[1] == batch.y).sum().item()
if n_gpus > 1:
th.distributed.barrier()
if proc_id == 0:
print('testing...')
print('loss : {}, acc : {}'.format(loss_accum / tot, hit / tot))
if not os.path.exists('checkpoints'):
os.mkdir('checkpoints')
with open('checkpoints/{}-{}.pkl'.format(epoch, config['save_name']), 'wb') as f:
th.save(model.state_dict(), f)
if __name__ == '__main__':
argparser = argparse.ArgumentParser("machine translation")
argparser.add_argument('--config', type=str)
argparser.add_argument('--gpu', type=str, default='0')
argparser.add_argument('--checkpoint', type=int, default=-1)
args = argparser.parse_args()
with open(args.config, 'r') as f:
config = yaml.load(f)
devices = list(map(int, args.gpu.split(',')))
n_gpus = len(devices)
if n_gpus == 1:
run(0, n_gpus, devices, config, args.checkpoint)
else:
mp = th.multiprocessing
mp.spawn(run, args=(n_gpus, devices, config, args.checkpoint), nprocs=n_gpus)
|
tests/test_base.py
|
iamthad/blendergltf
| 343 |
108346
|
def test_get_custom_properties(exporters, mocker):
blender_data = mocker.MagicMock()
vector = mocker.MagicMock()
vector.to_list.return_value = [0.0, 0.0, 1.0]
blender_data.items.return_value = [
['str', 'spam'],
['float', 1.0],
['int', 42],
['bool', False],
['vector', vector],
]
assert exporters.BaseExporter.get_custom_properties(blender_data) == {
'str': 'spam',
'float': 1.0,
'int': 42,
'bool': False,
'vector': [0.0, 0.0, 1.0]
}
def test_ignore_properties(exporters, mocker):
blender_data = mocker.MagicMock()
blender_data.items.return_value = [
['_RNA_UI', None],
['cycles', None],
['cycles_visibility', None],
['str', 'remains'],
]
assert exporters.BaseExporter.get_custom_properties(blender_data) == {
'str': 'remains',
}
def test_invalid_properties(exporters, mocker):
blender_data = mocker.MagicMock()
blender_data.items.return_value = [
['unserializable', set()],
['str', 'remains'],
]
assert exporters.BaseExporter.get_custom_properties(blender_data) == {
'str': 'remains',
}
def test_check(exporters):
assert exporters.BaseExporter.check(None, None)
def test_default(exporters, mocker):
blender_data = mocker.MagicMock()
blender_data.name = 'Name'
assert exporters.BaseExporter.default(None, blender_data) == {'name': 'Name'}
def test_export(exporters):
assert exporters.BaseExporter.export(None, None) == {}
|
nb_third_party/dns/rdtypes/ANY/NXT.py
|
djprmf/namebench
| 226 |
108455
|
<reponame>djprmf/namebench<gh_stars>100-1000
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.exception
import dns.rdata
import dns.rdatatype
import dns.name
class NXT(dns.rdata.Rdata):
"""NXT record
@ivar next: the next name
@type next: dns.name.Name object
@ivar bitmap: the type bitmap
@type bitmap: string
@see: RFC 2535"""
__slots__ = ['next', 'bitmap']
def __init__(self, rdclass, rdtype, next, bitmap):
super(NXT, self).__init__(rdclass, rdtype)
self.next = next
self.bitmap = bitmap
def to_text(self, origin=None, relativize=True, **kw):
next = self.next.choose_relativity(origin, relativize)
bits = []
for i in xrange(0, len(self.bitmap)):
byte = ord(self.bitmap[i])
for j in xrange(0, 8):
if byte & (0x80 >> j):
bits.append(dns.rdatatype.to_text(i * 8 + j))
text = ' '.join(bits)
return '%s %s' % (next, text)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
next = tok.get_name()
next = next.choose_relativity(origin, relativize)
bitmap = ['\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00' ]
while 1:
token = tok.get().unescape()
if token.is_eol_or_eof():
break
if token.value.isdigit():
nrdtype = int(token.value)
else:
nrdtype = dns.rdatatype.from_text(token.value)
if nrdtype == 0:
raise dns.exception.SyntaxError("NXT with bit 0")
if nrdtype > 127:
raise dns.exception.SyntaxError("NXT with bit > 127")
i = nrdtype // 8
bitmap[i] = chr(ord(bitmap[i]) | (0x80 >> (nrdtype % 8)))
bitmap = dns.rdata._truncate_bitmap(bitmap)
return cls(rdclass, rdtype, next, bitmap)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
self.next.to_wire(file, None, origin)
file.write(self.bitmap)
def to_digestable(self, origin = None):
return self.next.to_digestable(origin) + self.bitmap
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(next, cused) = dns.name.from_wire(wire[: current + rdlen], current)
current += cused
rdlen -= cused
bitmap = wire[current : current + rdlen]
if not origin is None:
next = next.relativize(origin)
return cls(rdclass, rdtype, next, bitmap)
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
self.next = self.next.choose_relativity(origin, relativize)
def _cmp(self, other):
v = cmp(self.next, other.next)
if v == 0:
v = cmp(self.bitmap, other.bitmap)
return v
|
utils/generate_preprocess_macro.py
|
kuba-kazimierczak/kaguya
| 336 |
108474
|
import os
import sys
import re
import argparse
def gen_repeate_macro(out,format,count,start=1):
for num in range(start, count):
out.write(format.format(no=num,dec=num-1,inc=num+1))
out.write('\n')
def KAGUYA_PP_REPEAT(out,count):
out.write('#define KAGUYA_PP_REPEAT0(MACRO)\n')
gen_repeate_macro(out,'#define KAGUYA_PP_REPEAT{no}(MACRO) KAGUYA_PP_REPEAT{dec}(MACRO) MACRO({no})',count)
out.write('#define KAGUYA_PP_REPEAT(COUNT,MACRO) KAGUYA_PP_CAT(KAGUYA_PP_REPEAT,COUNT)(MACRO)\n')
def KAGUYA_PP_REPEAT_DEF(out,count):
out.write('#define KAGUYA_PP_REPEAT_DEF0(MACRO)\n')
gen_repeate_macro(out,'#define KAGUYA_PP_REPEAT_DEF{no}(MACRO) KAGUYA_PP_REPEAT_DEF{dec}(MACRO) MACRO({no})',count)
out.write('#define KAGUYA_PP_REPEAT_DEF(COUNT,MACRO) KAGUYA_PP_CAT(KAGUYA_PP_REPEAT_DEF,COUNT)(MACRO)\n')
def KAGUYA_PP_REVERSE_REPEAT(out,count):
out.write('#define KAGUYA_PP_REVERSE_REPEAT0(MACRO)\n')
gen_repeate_macro(out,'#define KAGUYA_PP_REVERSE_REPEAT{no}(MACRO) MACRO({no}) KAGUYA_PP_REVERSE_REPEAT{dec}(MACRO)',count)
out.write('#define KAGUYA_PP_REVERSE_REPEAT(COUNT,MACRO) KAGUYA_PP_CAT(KAGUYA_PP_REVERSE_REPEAT,COUNT)(MACRO)\n')
def KAGUYA_PP_REPEAT_ARG(out,count):
out.write('#define KAGUYA_PP_REPEAT_ARG0(MACRO)\n')
out.write('#define KAGUYA_PP_REPEAT_ARG1(MACRO) MACRO(1)\n')
gen_repeate_macro(out,'#define KAGUYA_PP_REPEAT_ARG{no}(MACRO) KAGUYA_PP_REPEAT_ARG{dec}(MACRO), MACRO({no})',count,2)
out.write('#define KAGUYA_PP_REPEAT_ARG(COUNT,MACRO) KAGUYA_PP_CAT(KAGUYA_PP_REPEAT_ARG,COUNT)(MACRO)\n')
def KAGUYA_PP_REPEAT_DEF_VA_ARG(out,count):
out.write('#define KAGUYA_PP_REPEAT_DEF_VA_ARG0(MACRO, ...)\n')
gen_repeate_macro(out,'#define KAGUYA_PP_REPEAT_DEF_VA_ARG{no}(MACRO, ...) KAGUYA_VA_ARG(KAGUYA_PP_REPEAT_DEF_VA_ARG{dec}(MACRO,__VA_ARGS__) MACRO({no},__VA_ARGS__))',count)
out.write('#define KAGUYA_PP_REPEAT_DEF_VA_ARG(COUNT,MACRO, ...) KAGUYA_VA_ARG(KAGUYA_PP_CAT(KAGUYA_PP_REPEAT_DEF_VA_ARG,COUNT)(MACRO,__VA_ARGS__))\n')
def KAGUYA_PP_WHILE(out,count):
out.write('#define KAGUYA_PP_WHILE0(MACRO,R) R\n')
gen_repeate_macro(out,'#define KAGUYA_PP_WHILE{no}(MACRO,R) MACRO(KAGUYA_PP_WHILE{dec}(MACRO,R))',count)
out.write('#define KAGUYA_PP_WHILE(COUNT,R,MACRO) KAGUYA_PP_CAT(KAGUYA_PP_WHILE,COUNT)(MACRO,R)\n')
def KAGUYA_PP_INC(out,count):
gen_repeate_macro(out,'#define KAGUYA_PP_INC{no} {inc}',count,0)
out.write('#define KAGUYA_PP_INC(N) KAGUYA_PP_CAT(KAGUYA_PP_INC,N)\n')
def KAGUYA_PP_DEC(out,count):
gen_repeate_macro(out,'#define KAGUYA_PP_DEC{no} {dec}',count)
out.write('#define KAGUYA_PP_DEC(N) KAGUYA_PP_CAT(KAGUYA_PP_DEC,N)\n')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("count", help="count of generate macro",type=int)
args = parser.parse_args()
out = sys.stdout
out.write('//generated by '+__file__+'\n')
out.write('#pragma once\n')
KAGUYA_PP_REPEAT(out,args.count)
out.write('\n\n')
KAGUYA_PP_REPEAT_DEF(out,args.count)
out.write('\n\n')
KAGUYA_PP_REVERSE_REPEAT(out,args.count)
out.write('\n\n')
KAGUYA_PP_REPEAT_ARG(out,args.count)
out.write('\n\n')
KAGUYA_PP_REPEAT_DEF_VA_ARG(out,args.count)
out.write('\n\n')
KAGUYA_PP_WHILE(out,args.count)
out.write('\n\n')
KAGUYA_PP_INC(out,args.count)
out.write('\n\n')
KAGUYA_PP_DEC(out,args.count)
|
ghostwriter/commandcenter/migrations/0007_auto_20210616_0340.py
|
bbhunter/Ghostwriter
| 601 |
108490
|
<reponame>bbhunter/Ghostwriter
# Generated by Django 3.0.10 on 2021-06-16 03:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('commandcenter', '0006_auto_20210614_2224'),
]
operations = [
migrations.AlterField(
model_name='namecheapconfiguration',
name='client_ip',
field=models.CharField(default='Whitelisted IP Address', help_text='Your external IP address registered with Namecheap', max_length=255, verbose_name='Whitelisted IP Address'),
),
migrations.AlterField(
model_name='namecheapconfiguration',
name='page_size',
field=models.IntegerField(default=100, help_text='Maximum number of domains to return (100 is the max allowed)', verbose_name='Page Size'),
),
]
|
RecoEcal/EgammaCoreTools/python/EcalSCDynamicDPhiParametersESProducer_cfi.py
|
ckamtsikis/cmssw
| 852 |
108512
|
import FWCore.ParameterSet.Config as cms
ecalSCDynamicDPhiParametersESProducer = cms.ESProducer("EcalSCDynamicDPhiParametersESProducer",
# Parameters from the analysis by <NAME> [https://indico.cern.ch/event/949294/contributions/3988389/attachments/2091573/3514649/2020_08_26_Clustering.pdf]
# dynamic dPhi parameters depending on cluster energy and seed crystal eta
dynamicDPhiParameterSets = cms.VPSet(
cms.PSet(
eMin = cms.double(0.),
etaMin = cms.double(2.),
yoffset = cms.double(0.0928887),
scale = cms.double(1.22321),
xoffset = cms.double(-0.260256),
width = cms.double(0.345852),
saturation = cms.double(0.12),
cutoff = cms.double(0.3)
),
cms.PSet(
eMin = cms.double(0.),
etaMin = cms.double(1.75),
yoffset = cms.double(0.05643),
scale = cms.double(1.60429),
xoffset = cms.double(-0.642352),
width = cms.double(0.458106),
saturation = cms.double(0.12),
cutoff = cms.double(0.45)
),
cms.PSet(
eMin = cms.double(0.),
etaMin = cms.double(1.479),
yoffset = cms.double(0.0497038),
scale = cms.double(0.975707),
xoffset = cms.double(-0.18149),
width = cms.double(0.431729),
saturation = cms.double(0.14),
cutoff = cms.double(0.55)
),
cms.PSet(
eMin = cms.double(0.),
etaMin = cms.double(0.),
yoffset = cms.double(0.0280506),
scale = cms.double(0.946048),
xoffset = cms.double(-0.101172),
width = cms.double(0.432767),
saturation = cms.double(0.14),
cutoff = cms.double(0.6)
)
)
)
|
tools/generate_inputs.py
|
juxiangyu/kaldi-onnx
| 224 |
108533
|
# Copyright 2019 Xiaomi, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
def generate_features(batch, chunk, dim):
data = np.random.rand(batch, chunk, dim)
print "genearted data shape:", data.shape
return data
def pad_context(input_data,
left_context,
right_context):
if left_context > 0:
first = np.expand_dims(input_data[:, 0, :], axis=1)
first = np.repeat(first, left_context, axis=1)
out_data = np.concatenate((first, input_data), axis=1)
else:
out_data = input_data
if right_context > 0:
last = np.expand_dims(input_data[:, -1, :], axis=1)
last = np.repeat(last, right_context, axis=1)
out_data = np.concatenate((out_data, last), axis=1)
print "genearted padded context data shape:", out_data.shape
return out_data
def save_mace_input(data, file_path):
# np.save(file_path, data)
data.astype(np.float32).tofile(file_path)
def save_kaldi_input(data, shape, out_path):
with open(out_path, 'w') as f:
for b in xrange(shape[0]):
header = 'utterance-id' + str(b) + ' [\n'
f.write(header)
for n in xrange(shape[1]):
d = data[b, n, :]
d_str = " ".join(str(x) for x in d)
if n < shape[1] - 1:
d_str = d_str + '\n'
else:
d_str = d_str + ' ]\n'
f.write(d_str)
# f.write('\n')
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument('--input-dim', type=int, dest='input_dim',
help='input dim', default=40)
parser.add_argument('--batch', type=int, dest='batch',
help='batch size', default=1)
parser.add_argument('--left-context', type=int, dest='left_context',
help='left context', required=True)
parser.add_argument('--right-context', type=int, dest='right_context',
help='right context size', required=True)
parser.add_argument('--chunk-size', type=int, dest='chunk_size',
help='chunk size', required=True)
parser.add_argument("--kaldi-file", required=True, type=str,
dest="kaldi_data_file",
help="kaldi data file path")
parser.add_argument("--mace-file", required=True, type=str,
dest="mace_data_file",
help="mace data file path")
args = parser.parse_args()
return args
def main():
args = get_args()
data = generate_features(args.batch, args.chunk_size, args.input_dim)
save_kaldi_input(data, [args.batch, args.chunk_size,
args.input_dim], args.kaldi_data_file)
mace_data = pad_context(data, args.left_context, args.right_context)
save_mace_input(mace_data, args.mace_data_file)
if __name__ == "__main__":
main()
|
venv/Lib/site-packages/nipype/interfaces/mne/__init__.py
|
richung99/digitizePlots
| 585 |
108553
|
# -*- coding: utf-8 -*-
"""MNE is a software for exploring, visualizing, and analyzing human neurophysiological data."""
from .base import WatershedBEM
|
examples/devel/cgo08.py
|
dualword/pymol-open-source
| 636 |
108574
|
from pymol.cgo import *
from pymol import cmd
from random import random, seed
from chempy import cpv
# CGO cones
# first draw some walls
obj = [
COLOR, 1.0, 1.0, 1.0,
BEGIN, TRIANGLE_STRIP,
NORMAL, 0.0, 0.0, 1.0,
VERTEX, 0.0, 0.0, 0.0,
VERTEX, 10.0, 0.0, 0.0,
VERTEX, 0.0, 10.0, 0.0,
VERTEX, 10.0, 10.0, 0.0,
END,
BEGIN, TRIANGLE_STRIP,
NORMAL, 1.0, 0.0, 0.0,
VERTEX, 0.0, 0.0, 0.0,
VERTEX, 0.0, 10.0, 0.0,
VERTEX, 0.0, 0.0, 10.0,
VERTEX, 0.0, 10.0, 10.0,
END,
BEGIN, TRIANGLE_STRIP,
NORMAL, 0.0, 1.0, 0.0,
VERTEX, 0.0, 0.0, 0.0,
VERTEX, 0.0, 0.0, 10.0,
VERTEX, 10.0, 0.0, 0.0,
VERTEX, 10.0, 0.0, 10.0,
END
]
seed(0x1)
def random_conic(box, size, min_axis):
# return a random ellipsoid record of the form:
# [ ELLIPSOID, x_pos, y_pos, z_pos, size, x0, y0, z0, x1, y1, z2, x2, y2, z2 ]
# where the xyz vectors are orthogonal and of length 1.0 or less.
box = box - size
tmp0 = [ size + random() * box, size + random() * box, size + random() * box ]
tmp1 = cpv.random_vector()
tmp2 = cpv.scale(tmp1,box/10)
tmp1 = cpv.add(tmp2,tmp0)
return [ CONE,
tmp0[0], tmp0[1], tmp0[2], # coordinates
tmp1[0], tmp1[1], tmp1[2],
(abs(random())*0.4+0.2) * size, # radii
(abs(random())*0.1+0.01) * size,
random(), random(), random(), # colors
random(), random(), random(),
1.0, 1.0 ]
for count in range(50):
obj.extend( random_conic(10.0, 1.5, 0.2) )
# then we load it into PyMOL
cmd.load_cgo(obj,'cgo08')
# rotate the view
cmd.turn('y',-45)
cmd.turn('x',30)
# zoom out a bit
cmd.zoom('all', 2)
# move the read clipping plane back a bit to brighten things up
cmd.clip('far',-5)
|
rq/cli/__init__.py
|
dralley/rq
| 4,261 |
108578
|
<gh_stars>1000+
# flake8: noqa
from .cli import main
# TODO: the following imports can be removed when we drop the `rqinfo` and
# `rqworkers` commands in favor of just shipping the `rq` command.
from .cli import info, worker
|
adi_analyze/utils/ColorUtils_test.py
|
Bertlk/ADI
| 226 |
108612
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/9/19 3:37 下午
# @Author : kewen
# @File : ColorUtils_test.py
import unittest
from utils.ColorUtils import toHex, get_n_rgb_colors
class Test(unittest.TestCase):
def test_toHex(self):
rgb = [244, 255, 196]
self.assertEqual(toHex(rgb), "#f4ffc4")
def test_get_n_rgb(self):
get_n_rgb_colors(3)
if __name__ == '__main__':
unittest.main()
|
loophole/polar/pb/sportprofile_mclaren_settings_pb2.py
|
oscarpicas/loophole
| 153 |
108636
|
<reponame>oscarpicas/loophole<gh_stars>100-1000
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: sportprofile_mclaren_settings.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='sportprofile_mclaren_settings.proto',
package='data',
serialized_pb=_b('\n#sportprofile_mclaren_settings.proto\x12\x04\x64\x61ta\"3\n\x1dPbMcLarenSportProfileSettings\x12\x12\n\nauto_start\x18\x04 \x02(\x08')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_PBMCLARENSPORTPROFILESETTINGS = _descriptor.Descriptor(
name='PbMcLarenSportProfileSettings',
full_name='data.PbMcLarenSportProfileSettings',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='auto_start', full_name='data.PbMcLarenSportProfileSettings.auto_start', index=0,
number=4, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=45,
serialized_end=96,
)
DESCRIPTOR.message_types_by_name['PbMcLarenSportProfileSettings'] = _PBMCLARENSPORTPROFILESETTINGS
PbMcLarenSportProfileSettings = _reflection.GeneratedProtocolMessageType('PbMcLarenSportProfileSettings', (_message.Message,), dict(
DESCRIPTOR = _PBMCLARENSPORTPROFILESETTINGS,
__module__ = 'sportprofile_mclaren_settings_pb2'
# @@protoc_insertion_point(class_scope:data.PbMcLarenSportProfileSettings)
))
_sym_db.RegisterMessage(PbMcLarenSportProfileSettings)
# @@protoc_insertion_point(module_scope)
|
lib/disco/schemes/scheme_http.py
|
jgrnt/disco
| 786 |
108666
|
<gh_stars>100-1000
from disco import comm
def open(url, task=None):
return comm.open_url(url)
def input_stream(fd, sze, url, params):
"""Opens the specified url using an http client."""
import disco.worker
file = open(url, task=disco.worker.active_task)
return file, len(file), file.url
|
src/web/monitorforms/linux-service-running/__init__.py
|
anderson-attilio/runbook
| 155 |
108667
|
<gh_stars>100-1000
from wtforms import TextField, TextAreaField, SelectField
from wtforms.validators import DataRequired, Optional
from ..datacenter import DatacenterCheckForm
class CheckForm(DatacenterCheckForm):
''' Class that creates an form for the monitor Docker: Container is Running'''
title = "Linux: Service is Running"
description = """
<p>This monitor opens an SSH connection to the specified server and checks whether the specified service is running. This monitor will automatically determine whether to use the <code>systemctl</code> or the <code>service</code> commands.</p>
<p>The SSH connection is authenticated by an SSH key; it is recommended that you generate a unique SSH public/private key pair for this purpose. The <code>Gateway</code> field can be used to specify a bastion or "jump" host; this setting will cause the monitor to first SSH to the specified <code>Gateway</code> host and then SSH to the specified target host.</p>
<p>Success and Failure are determined by the ability to connect to the remote host, and the results from executed Docker commands. If the Docker commands require <code>sudo</code> to be used, simply specify True on the "use Sudo" field.</p>
"""
webhook_include = "monitors/webhooks/general.html"
placeholders = DatacenterCheckForm.placeholders
placeholders.update({'service_name' : 'nginx'})
service_name = TextField(
"Service",
description="""
Specify the service to check in the format you would use to issue either the systemctl or service command.
""",
validators=[DataRequired(message='Service is a required field')])
host_string = TextField(
"Target Host",
description=DatacenterCheckForm.descriptions['ssh']['host_string'],
validators=[DataRequired(message='Target Host is a required field')])
gateway = TextField(
"Gateway Host",
description=DatacenterCheckForm.descriptions['ssh']['gateway'],
validators=[Optional()])
username = TextField(
"SSH Username",
description=DatacenterCheckForm.descriptions['ssh']['username'],
validators=[DataRequired(message="Username is a required field")])
sshkey = TextAreaField(
"SSH Private Key",
description=DatacenterCheckForm.descriptions['ssh']['sshkey'],
validators=[DataRequired(message='SSH Key is a required field')])
use_sudo = SelectField(
"Use Sudo",
description=DatacenterCheckForm.descriptions['ssh']['use_sudo'],
choices=[('true', 'True'), ('false', 'False')],
validators=[DataRequired(message="Use Sudo is a required field")])
if __name__ == '__main__':
pass
|
ansible/roles/lib_ops_utils/library/yum_repo_exclude.py
|
fahlmant/openshift-tools
| 164 |
108668
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
'''
See Ansible Module Documentation (Below)
'''
import re
import iniparse
DOCUMENTATION = '''
---
module: yum_repo_exclude
short_description: manage packages on a YUM repo's exclude line
description:
- Add package names or patterns to a YUM repository configuration's exclude line
options:
name=dict(required=True),
repo=dict(required=True),
patterns=dict(required=True, type='list'),
state=dict(required=False, default='present', choices=['present', 'absent']),
name:
description:
- Filename where the repository configuration exists
required: true
state:
description:
- One of 'present', 'absent'. If 'present', patterns are added (if necessary). If 'absent', patterns are removed (if necessary).
required: true
default: present
repo:
description:
- The name of the repository
required: true
patterns:
description:
- A list of package names and/or package patterns
required: true
author:
- "<NAME> (<EMAIL>)"
'''
EXAMPLES = '''
tasks:
- name: Don't install foo from repo bar
yum_repo_exclude:
name: /etc/yum.repos.d/bar.repo
repo: bar
patterns: [ foo ]
- name: Stop excluding baz and qux-* from repo bar
yum_repo_exclude:
name: /etc/yum.repos.d/bar.repo
repo: bar
patterns: [ baz, qux-* ]
state: absent
'''
class YumRepoExcludeError(Exception):
'''All YumRepoExclude methods throw this exception when errors occur'''
def __init__(self, msg):
super(YumRepoExcludeError, self).__init__(msg)
self.msg = msg
class YumRepoExclude(object):
'''A YUM repo's exclude option'''
def __init__(self, filename, repo):
'''Create an exclude'''
self.filename = filename
self.repo = repo
def get(self):
'''Get the current exclude value'''
ini = None
with open(self.filename) as repofile:
ini = iniparse.INIConfig(repofile)
repoobj = ini[self.repo]
if not getattr(repoobj, "__getitem__", None):
raise YumRepoExcludeError("Repository {} not found in file {}".format(self.repo, self.filename))
current = repoobj["exclude"]
if getattr(current, "__getitem__", None):
return re.split(r'\s+', current)
return list()
def set(self, patterns):
'''Update the exclude value'''
with open(self.filename, 'r+') as repofile:
ini = iniparse.INIConfig(repofile)
repoobj = ini[self.repo]
if not getattr(repoobj, "__getitem__", None):
raise YumRepoExcludeError("Repository {} not found in file {}".format(self.repo, self.filename))
repoobj["exclude"] = " ".join(patterns)
repofile.seek(0)
repofile.write(re.sub(r'^exclude += +', 'exclude=', str(ini), flags=re.M))
repofile.truncate()
def main():
'''Ansible module to add/remove packages or patterns from a YUM repo's exclude line'''
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
repo=dict(required=True),
patterns=dict(required=True, type='list'),
state=dict(required=False, default='present', choices=['present', 'absent']),
),
supports_check_mode=True
)
name = module.params['name']
repo = module.params['repo']
patterns = module.params['patterns']
state = module.params['state']
changed = False
yumrepo = YumRepoExclude(name, repo)
try:
current = set(yumrepo.get())
if state == 'absent':
expected = current - set(patterns)
elif state == 'present':
expected = current | set(patterns)
if current != expected:
yumrepo.set(expected)
current = set(yumrepo.get())
if current == expected:
changed = True
else:
module.fail_json(msg="Update to repo {} from {} failed. Expected {}, got {}".format(repo, name,
expected, current))
except YumRepoExcludeError as ex:
module.fail_json(msg=ex.msg)
return module.exit_json(changed=changed)
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, wrong-import-position
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
modelvshuman/datasets/noise_generalisation.py
|
TizianThieringer/model-vs-human
| 158 |
108714
|
<reponame>TizianThieringer/model-vs-human
from dataclasses import dataclass, field
from os.path import join as pjoin
from typing import List
from .registry import register_dataset
from .. import constants as c
from . import decision_mappings, info_mappings
from .dataloaders import PytorchLoader
from ..evaluation import metrics as m
from .base import Dataset
from .experiments import *
__all__ = ["colour", "contrast", "high_pass", "low_pass",
"phase_scrambling", "power_equalisation",
"false_colour", "rotation", "eidolonI",
"eidolonII", "eidolonIII", "uniform_noise"]
@dataclass
class NoiseGeneralisationParams:
path: str = ""
experiments: List = field(default_factory=list)
image_size: int = 224
metrics: list = field(default_factory=lambda: [m.Accuracy(topk=1)])
decision_mapping: object = decision_mappings.ImageNetProbabilitiesTo16ClassesMapping()
info_mapping: object = info_mappings.InfoMappingWithSessions()
contains_sessions: bool = True
def _get_dataset(name, params, *args, **kwargs):
assert params is not None, "Dataset params are missing"
params.path = pjoin(c.DATASET_DIR, name)
return Dataset(name=name,
params=params,
loader=PytorchLoader,
*args,
**kwargs)
@register_dataset(name="colour")
def colour(*args, **kwargs):
return _get_dataset(name="colour",
params=NoiseGeneralisationParams(experiments=[colour_experiment]),
*args, **kwargs)
@register_dataset(name="contrast")
def contrast(*args, **kwargs):
return _get_dataset(name="contrast",
params=NoiseGeneralisationParams(experiments=[contrast_experiment]),
*args, **kwargs)
@register_dataset(name="high-pass")
def high_pass(*args, **kwargs):
return _get_dataset(name="high-pass",
params=NoiseGeneralisationParams(experiments=[high_pass_experiment]),
*args, **kwargs)
@register_dataset(name="low-pass")
def low_pass(*args, **kwargs):
return _get_dataset(name="low-pass",
params=NoiseGeneralisationParams(experiments=[low_pass_experiment]),
*args, **kwargs)
@register_dataset(name="phase-scrambling")
def phase_scrambling(*args, **kwargs):
return _get_dataset(name="phase-scrambling",
params=NoiseGeneralisationParams(experiments=[phase_scrambling_experiment]),
*args, **kwargs)
@register_dataset(name="power-equalisation")
def power_equalisation(*args, **kwargs):
return _get_dataset(name="power-equalisation",
params=NoiseGeneralisationParams(experiments=[power_equalisation_experiment]),
*args, **kwargs)
@register_dataset(name="false-colour")
def false_colour(*args, **kwargs):
return _get_dataset(name="false-colour",
params=NoiseGeneralisationParams(experiments=[false_colour_experiment]),
*args, **kwargs)
@register_dataset(name="rotation")
def rotation(*args, **kwargs):
return _get_dataset(name="rotation",
params=NoiseGeneralisationParams(experiments=[rotation_experiment]),
*args, **kwargs)
@register_dataset(name="eidolonI")
def eidolonI(*args, **kwargs):
return _get_dataset(name="eidolonI",
params=NoiseGeneralisationParams(experiments=[eidolonI_experiment]),
*args, **kwargs)
@register_dataset(name="eidolonII")
def eidolonII(*args, **kwargs):
return _get_dataset(name="eidolonII",
params=NoiseGeneralisationParams(experiments=[eidolonII_experiment]),
*args, **kwargs)
@register_dataset(name="eidolonIII")
def eidolonIII(*args, **kwargs):
return _get_dataset(name="eidolonIII",
params=NoiseGeneralisationParams(experiments=[eidolonIII_experiment]),
*args, **kwargs)
@register_dataset(name="uniform-noise")
def uniform_noise(*args, **kwargs):
return _get_dataset(name="uniform-noise",
params=NoiseGeneralisationParams(experiments=[uniform_noise_experiment]),
*args, **kwargs)
|
tests/estimator/test_mixture_of_experts.py
|
LongmaoTeamTf/deep_recommenders
| 143 |
108721
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
sys.dont_write_bytecode = True
import numpy as np
import tensorflow as tf
if tf.__version__ >= "2.0.0":
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()
from absl.testing import parameterized
from deep_recommenders.datasets import SyntheticForMultiTask
from deep_recommenders.estimator.models.multi_task_learning import MMoE
class TestMixtureOfExperts(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(32, 64, 128, 512)
def test_mmoe(self, batch_size):
def build_columns():
return [
tf.feature_column.numeric_column("C{}".format(i))
for i in range(100)
]
columns = build_columns()
model = MMoE(columns,
num_tasks=2,
num_experts=2,
task_hidden_units=[32, 10],
expert_hidden_units=[64, 32])
dataset = SyntheticForMultiTask(5000)
with self.session() as sess:
iterator = tf.data.make_one_shot_iterator(dataset.input_fn(batch_size=batch_size))
x, y = iterator.get_next()
y_pred = model(x)
sess.run(tf.global_variables_initializer())
a = sess.run(y_pred[0])
b = sess.run(y_pred[1])
self.assertAllEqual(len(y_pred), 2)
self.assertAllEqual(a.shape, (batch_size, 1))
self.assertAllEqual(b.shape, (batch_size, 1))
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.test.main()
|
libsaas/services/basecamp/accesses.py
|
MidtownFellowship/libsaas
| 155 |
108741
|
<reponame>MidtownFellowship/libsaas
from libsaas import http, parsers
from libsaas.services import base
from .resource import BasecampResource
class AccessResource(BasecampResource):
path = 'accesses'
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
class Accesses(AccessResource):
@base.apimethod
def grant(self, obj):
"""
Create a new resource.
:var obj: a Python object representing the resource to be created,
usually in the same format as returned from `get`. Refer to the
upstream documentation for details.
"""
self.require_collection()
request = http.Request('POST', self.get_url(), self.wrap_object(obj))
return request, parsers.parse_empty
class Access(AccessResource):
def get(self, *args, **kwargs):
raise base.MethodNotSupported()
@base.apimethod
def revoke(self):
"""
Delete this resource.
"""
self.require_item()
request = http.Request('DELETE', self.get_url())
return request, parsers.parse_empty
|
build/mac/should_use_hermetic_xcode.py
|
google-ar/chromium
| 777 |
108799
|
<filename>build/mac/should_use_hermetic_xcode.py
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints "1" if Chrome targets should be built with hermetic xcode. Otherwise
prints "0".
Usage:
python should_use_hermetic_xcode.py <target_os>
"""
import os
import sys
def _IsCorpMachine():
return os.path.isdir('/Library/GoogleCorpSupport/')
def main():
allow_corp = sys.argv[1] == 'mac' and _IsCorpMachine()
if os.environ.get('FORCE_MAC_TOOLCHAIN') or allow_corp:
return "1"
else:
return "0"
if __name__ == '__main__':
print main()
sys.exit(0)
|
suzieq/sqobjects/routes.py
|
foobug/suzieq
| 487 |
108818
|
<filename>suzieq/sqobjects/routes.py
from suzieq.sqobjects.basicobj import SqObject
import pandas as pd
class RoutesObj(SqObject):
def __init__(self, **kwargs):
super().__init__(table='routes', **kwargs)
self._addnl_filter = 'metric != 4278198272'
self._valid_get_args = ['namespace', 'hostname', 'columns', 'prefix',
'vrf', 'protocol', 'prefixlen', 'ipvers',
'add_filter', 'address', 'query_str']
def lpm(self, **kwargs):
'''Get the lpm for the given address'''
if not kwargs.get("address", None):
raise AttributeError('ip address is mandatory parameter')
if isinstance(kwargs['address'], list):
if len(kwargs['address']) > 1:
raise AttributeError('Just one address at a time')
kwargs['address'] = kwargs['address'][0]
return self.engine.lpm(**kwargs)
def summarize(self, namespace=[], vrf=[], hostname=[], query_str=''):
"""Summarize routing info for one or more namespaces"""
if self.columns != ["default"]:
self.summarize_df = pd.DataFrame(
{'error':
['ERROR: You cannot specify columns with summarize']})
return self.summarize_df
return self.engine.summarize(namespace=namespace, vrf=vrf,
query_str=query_str,
hostname=hostname)
|
pyrival/numerical/polynomial.py
|
MattJDavidson/aoc2021
| 748 |
108842
|
def poly(a, x):
val = 0
for ai in reversed(a):
val *= x
val += ai
return val
def diff(a):
return [a[i + 1] * (i + 1) for i in range(len(a) - 1)]
def divroot(a, x0):
b, a[-1] = a[-1], 0
for i in reversed(range(len(a) - 1)):
a[i], b = a[i + 1] * x0 + b, a[i]
a.pop()
return a
|
tflib/ops/ops.py
|
AlexBlack2202/EigenGAN-Tensorflow
| 302 |
108857
|
<gh_stars>100-1000
import tensorflow as tf
def tile_concat(a_list, b_list=None):
# tile all elements of `b_list` and then concat `a_list + b_list` along the channel axis
# `a` shape: (N, H, W, C_a)
# `b` shape: can be (N, 1, 1, C_b) or (N, C_b)
if b_list is None:
b_list = []
a_list = list(a_list) if isinstance(a_list, (list, tuple)) else [a_list]
b_list = list(b_list) if isinstance(b_list, (list, tuple)) else [b_list]
for i, b in enumerate(b_list):
b = tf.reshape(b, [-1, 1, 1, b.shape[-1]])
b = tf.tile(b, [1, a_list[0].shape[1], a_list[0].shape[2], 1])
b_list[i] = b
return tf.concat(a_list + b_list, axis=-1)
def reshape(x, shape):
x = tf.convert_to_tensor(x)
shape = [x.shape[i] if shape[i] == 0 else shape[i] for i in range(len(shape))]
shape = [tf.shape(x)[i] if shape[i] is None else shape[i] for i in range(len(shape))]
return tf.reshape(x, shape)
def minmax_norm(x, epsilon=1e-12):
x = tf.to_float(x)
min_val = tf.reduce_min(x)
max_val = tf.reduce_max(x)
norm_x = (x - min_val) / tf.maximum((max_val - min_val), epsilon)
return norm_x
def gram_schmidt(vectors):
"""Gram-Schmidt process. Modified from https://stackoverflow.com/questions/48119473.
Parameters
----------
vectors: 2D tensor - [v1, v2, ...]
"""
basis = (vectors[0:1, :] / tf.norm(vectors[0:1, :]))
for i in range(1, vectors.shape[0]):
v = vectors[i:i + 1, :]
w = v - tf.matmul(tf.matmul(v, basis, transpose_b=True), basis)
basis = tf.concat([basis, w / tf.norm(w)], axis=0)
return basis
|
humor/scripts/process_amass_data.py
|
DalhousieAI/humor
| 143 |
108894
|
<reponame>DalhousieAI/humor
import sys, os
cur_file_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(cur_file_path, '..'))
import glob
import os
import argparse
import time
import numpy as np
import torch
import matplotlib.pyplot as plt
from sklearn.cluster import DBSCAN
from body_model.body_model import BodyModel
from body_model.utils import SMPL_JOINTS, KEYPT_VERTS
from viz.utils import viz_smpl_seq
from utils.torch import copy2cpu as c2c
from utils.transforms import batch_rodrigues, compute_world2aligned_mat, compute_world2aligned_joints_mat, axisangle2matrots
#
# Processing options
#
OUT_FPS = 30
SAVE_KEYPT_VERTS = True # save vertex locations of certain keypoints
SAVE_HAND_POSE = False # save joint angles for the hand
SAVE_VELOCITIES = True # save all parameter velocities available
SAVE_ALIGN_ROT = True # save rot mats that go from world root orient to aligned root orient
DISCARD_TERRAIN_SEQUENCES = True # throw away sequences where the person steps onto objects (determined by a heuristic)
# optional viz during processing
VIZ_PLOTS = False
VIZ_SEQ = False
ALL_DATASETS = ['ACCAD', 'BMLmovi', 'BioMotionLab_NTroje', 'BMLhandball', 'CMU', 'DanceDB', 'DFaust_67',
'EKUT', 'Eyes_Japan_Dataset', 'HumanEva', 'KIT', 'MPI_HDM05',
'MPI_Limits', 'MPI_mosh', 'SFU', 'SSM_synced', 'TCD_handMocap',
'TotalCapture', 'Transitions_mocap'] # everything in AMASS
TRAIN_DATASETS = ['CMU', 'MPI_Limits', 'TotalCapture', 'Eyes_Japan_Dataset', 'KIT', 'BioMotionLab_NTroje', 'BMLmovi',
'EKUT', 'ACCAD'] # HuMoR training dataset
TEST_DATASETS = ['Transitions_mocap', 'HumanEva'] # HuMoR test datasets
VAL_DATASETS = ['MPI_HDM05', 'SFU', 'MPI_mosh'] # HuMoR validation datasets
# if sequence is longer than this, splits into sequences of this size to avoid running out of memory
# ~ 4000 for 12 GB GPU, ~2000 for 8 GB
SPLIT_FRAME_LIMIT = 2000
NUM_BETAS = 16 # size of SMPL shape parameter to use
DISCARD_SHORTER_THAN = 1.0 # seconds
# for determining floor height
FLOOR_VEL_THRESH = 0.005
FLOOR_HEIGHT_OFFSET = 0.01
# for determining contacts
CONTACT_VEL_THRESH = 0.005 #0.015
CONTACT_TOE_HEIGHT_THRESH = 0.04
CONTACT_ANKLE_HEIGHT_THRESH = 0.08
# for determining terrain interaction
TERRAIN_HEIGHT_THRESH = 0.04 # if static toe is above this height
ROOT_HEIGHT_THRESH = 0.04 # if maximum "static" root height is more than this + root_floor_height
CLUSTER_SIZE_THRESH = 0.25 # if cluster has more than this faction of fps (30 for 120 fps)
#
# Processing
#
def debug_viz_seq(body, fps, contacts=None):
viz_smpl_seq(body, imw=1080, imh=1080, fps=fps, contacts=contacts,
render_body=False, render_joints=True, render_skeleton=False, render_ground=True)
def get_body_model_sequence(smplh_path, gender, num_frames,
pose_body, pose_hand, betas, root_orient, trans):
gender = str(gender)
bm_path = os.path.join(smplh_path, gender + '/model.npz')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
bm = BodyModel(bm_path=bm_path, num_betas=NUM_BETAS, batch_size=num_frames).to(device)
pose_body = torch.Tensor(pose_body).to(device)
pose_hand = torch.Tensor(pose_hand).to(device)
betas = torch.Tensor(np.repeat(betas[:NUM_BETAS][np.newaxis], num_frames, axis=0)).to(device)
root_orient = torch.Tensor(root_orient).to(device)
trans = torch.Tensor(trans).to(device)
body = bm(pose_body=pose_body, pose_hand=pose_hand, betas=betas, root_orient=root_orient, trans=trans)
return body
def determine_floor_height_and_contacts(body_joint_seq, fps):
'''
Input: body_joint_seq N x 21 x 3 numpy array
Contacts are N x 4 where N is number of frames and each row is left heel/toe, right heel/toe
'''
num_frames = body_joint_seq.shape[0]
# compute toe velocities
root_seq = body_joint_seq[:, SMPL_JOINTS['hips'], :]
left_toe_seq = body_joint_seq[:, SMPL_JOINTS['leftToeBase'], :]
right_toe_seq = body_joint_seq[:, SMPL_JOINTS['rightToeBase'], :]
left_toe_vel = np.linalg.norm(left_toe_seq[1:] - left_toe_seq[:-1], axis=1)
left_toe_vel = np.append(left_toe_vel, left_toe_vel[-1])
right_toe_vel = np.linalg.norm(right_toe_seq[1:] - right_toe_seq[:-1], axis=1)
right_toe_vel = np.append(right_toe_vel, right_toe_vel[-1])
if VIZ_PLOTS:
fig = plt.figure()
steps = np.arange(num_frames)
plt.plot(steps, left_toe_vel, '-r', label='left vel')
plt.plot(steps, right_toe_vel, '-b', label='right vel')
plt.legend()
plt.show()
plt.close()
# now foot heights (z is up)
left_toe_heights = left_toe_seq[:, 2]
right_toe_heights = right_toe_seq[:, 2]
root_heights = root_seq[:, 2]
if VIZ_PLOTS:
fig = plt.figure()
steps = np.arange(num_frames)
plt.plot(steps, left_toe_heights, '-r', label='left toe height')
plt.plot(steps, right_toe_heights, '-b', label='right toe height')
plt.plot(steps, root_heights, '-g', label='root height')
plt.legend()
plt.show()
plt.close()
# filter out heights when velocity is greater than some threshold (not in contact)
all_inds = np.arange(left_toe_heights.shape[0])
left_static_foot_heights = left_toe_heights[left_toe_vel < FLOOR_VEL_THRESH]
left_static_inds = all_inds[left_toe_vel < FLOOR_VEL_THRESH]
right_static_foot_heights = right_toe_heights[right_toe_vel < FLOOR_VEL_THRESH]
right_static_inds = all_inds[right_toe_vel < FLOOR_VEL_THRESH]
all_static_foot_heights = np.append(left_static_foot_heights, right_static_foot_heights)
all_static_inds = np.append(left_static_inds, right_static_inds)
if VIZ_PLOTS:
fig = plt.figure()
steps = np.arange(left_static_foot_heights.shape[0])
plt.plot(steps, left_static_foot_heights, '-r', label='left static height')
plt.legend()
plt.show()
plt.close()
# fig = plt.figure()
# plt.hist(all_static_foot_heights)
# plt.show()
# plt.close()
discard_seq = False
if all_static_foot_heights.shape[0] > 0:
cluster_heights = []
cluster_root_heights = []
cluster_sizes = []
# cluster foot heights and find one with smallest median
clustering = DBSCAN(eps=0.005, min_samples=3).fit(all_static_foot_heights.reshape(-1, 1))
all_labels = np.unique(clustering.labels_)
# print(all_labels)
if VIZ_PLOTS:
plt.figure()
min_median = min_root_median = float('inf')
for cur_label in all_labels:
cur_clust = all_static_foot_heights[clustering.labels_ == cur_label]
cur_clust_inds = np.unique(all_static_inds[clustering.labels_ == cur_label]) # inds in the original sequence that correspond to this cluster
if VIZ_PLOTS:
plt.scatter(cur_clust, np.zeros_like(cur_clust), label='foot %d' % (cur_label))
# get median foot height and use this as height
cur_median = np.median(cur_clust)
cluster_heights.append(cur_median)
cluster_sizes.append(cur_clust.shape[0])
# get root information
cur_root_clust = root_heights[cur_clust_inds]
cur_root_median = np.median(cur_root_clust)
cluster_root_heights.append(cur_root_median)
if VIZ_PLOTS:
plt.scatter(cur_root_clust, np.zeros_like(cur_root_clust), label='root %d' % (cur_label))
# update min info
if cur_median < min_median:
min_median = cur_median
min_root_median = cur_root_median
# print(cluster_heights)
# print(cluster_root_heights)
# print(cluster_sizes)
if VIZ_PLOTS:
plt.show()
plt.close()
floor_height = min_median
offset_floor_height = floor_height - FLOOR_HEIGHT_OFFSET # toe joint is actually inside foot mesh a bit
if DISCARD_TERRAIN_SEQUENCES:
# print(min_median + TERRAIN_HEIGHT_THRESH)
# print(min_root_median + ROOT_HEIGHT_THRESH)
for cluster_root_height, cluster_height, cluster_size in zip (cluster_root_heights, cluster_heights, cluster_sizes):
root_above_thresh = cluster_root_height > (min_root_median + ROOT_HEIGHT_THRESH)
toe_above_thresh = cluster_height > (min_median + TERRAIN_HEIGHT_THRESH)
cluster_size_above_thresh = cluster_size > int(CLUSTER_SIZE_THRESH*fps)
if root_above_thresh and toe_above_thresh and cluster_size_above_thresh:
discard_seq = True
print('DISCARDING sequence based on terrain interaction!')
break
else:
floor_height = offset_floor_height = 0.0
# now find contacts (feet are below certain velocity and within certain range of floor)
# compute heel velocities
left_heel_seq = body_joint_seq[:, SMPL_JOINTS['leftFoot'], :]
right_heel_seq = body_joint_seq[:, SMPL_JOINTS['rightFoot'], :]
left_heel_vel = np.linalg.norm(left_heel_seq[1:] - left_heel_seq[:-1], axis=1)
left_heel_vel = np.append(left_heel_vel, left_heel_vel[-1])
right_heel_vel = np.linalg.norm(right_heel_seq[1:] - right_heel_seq[:-1], axis=1)
right_heel_vel = np.append(right_heel_vel, right_heel_vel[-1])
left_heel_contact = left_heel_vel < CONTACT_VEL_THRESH
right_heel_contact = right_heel_vel < CONTACT_VEL_THRESH
left_toe_contact = left_toe_vel < CONTACT_VEL_THRESH
right_toe_contact = right_toe_vel < CONTACT_VEL_THRESH
# compute heel heights
left_heel_heights = left_heel_seq[:, 2] - floor_height
right_heel_heights = right_heel_seq[:, 2] - floor_height
left_toe_heights = left_toe_heights - floor_height
right_toe_heights = right_toe_heights - floor_height
left_heel_contact = np.logical_and(left_heel_contact, left_heel_heights < CONTACT_ANKLE_HEIGHT_THRESH)
right_heel_contact = np.logical_and(right_heel_contact, right_heel_heights < CONTACT_ANKLE_HEIGHT_THRESH)
left_toe_contact = np.logical_and(left_toe_contact, left_toe_heights < CONTACT_TOE_HEIGHT_THRESH)
right_toe_contact = np.logical_and(right_toe_contact, right_toe_heights < CONTACT_TOE_HEIGHT_THRESH)
contacts = np.zeros((num_frames, len(SMPL_JOINTS)))
contacts[:,SMPL_JOINTS['leftFoot']] = left_heel_contact
contacts[:,SMPL_JOINTS['leftToeBase']] = left_toe_contact
contacts[:,SMPL_JOINTS['rightFoot']] = right_heel_contact
contacts[:,SMPL_JOINTS['rightToeBase']] = right_toe_contact
# hand contacts
left_hand_contact = detect_joint_contact(body_joint_seq, 'leftHand', floor_height, CONTACT_VEL_THRESH, CONTACT_ANKLE_HEIGHT_THRESH)
right_hand_contact = detect_joint_contact(body_joint_seq, 'rightHand', floor_height, CONTACT_VEL_THRESH, CONTACT_ANKLE_HEIGHT_THRESH)
contacts[:,SMPL_JOINTS['leftHand']] = left_hand_contact
contacts[:,SMPL_JOINTS['rightHand']] = right_hand_contact
# knee contacts
left_knee_contact = detect_joint_contact(body_joint_seq, 'leftLeg', floor_height, CONTACT_VEL_THRESH, CONTACT_ANKLE_HEIGHT_THRESH)
right_knee_contact = detect_joint_contact(body_joint_seq, 'rightLeg', floor_height, CONTACT_VEL_THRESH, CONTACT_ANKLE_HEIGHT_THRESH)
contacts[:,SMPL_JOINTS['leftLeg']] = left_knee_contact
contacts[:,SMPL_JOINTS['rightLeg']] = right_knee_contact
return offset_floor_height, contacts, discard_seq
def detect_joint_contact(body_joint_seq, joint_name, floor_height, vel_thresh, height_thresh):
# calc velocity
joint_seq = body_joint_seq[:, SMPL_JOINTS[joint_name], :]
joint_vel = np.linalg.norm(joint_seq[1:] - joint_seq[:-1], axis=1)
joint_vel = np.append(joint_vel, joint_vel[-1])
# determine contact by velocity
joint_contact = joint_vel < vel_thresh
# compute heights
joint_heights = joint_seq[:, 2] - floor_height
# compute contact by vel + height
joint_contact = np.logical_and(joint_contact, joint_heights < height_thresh)
return joint_contact
def compute_align_mats(root_orient):
''' compute world to canonical frame for each timestep (rotation around up axis) '''
num_frames = root_orient.shape[0]
# convert aa to matrices
root_orient_mat = batch_rodrigues(torch.Tensor(root_orient).reshape(-1, 3)).numpy().reshape((num_frames, 9))
# return compute_world2aligned_mat(torch.Tensor(root_orient_mat).reshape((num_frames, 3, 3))).numpy()
# rotate root so aligning local body right vector (-x) with world right vector (+x)
# with a rotation around the up axis (+z)
body_right = -root_orient_mat.reshape((num_frames, 3, 3))[:,:,0] # in body coordinates body x-axis is to the left
world2aligned_mat, world2aligned_aa = compute_align_from_right(body_right)
return world2aligned_mat
def compute_joint_align_mats(joint_seq):
'''
Compute world to canonical frame for each timestep (rotation around up axis)
from the given joint seq (T x J x 3)
'''
left_idx = SMPL_JOINTS['leftUpLeg']
right_idx = SMPL_JOINTS['rightUpLeg']
body_right = joint_seq[:, right_idx] - joint_seq[:, left_idx]
body_right = body_right / np.linalg.norm(body_right, axis=1)[:,np.newaxis]
world2aligned_mat, world2aligned_aa = compute_align_from_right(body_right)
return world2aligned_mat
def compute_align_from_right(body_right):
world2aligned_angle = np.arccos(body_right[:,0] / (np.linalg.norm(body_right[:,:2], axis=1) + 1e-8)) # project to world x axis, and compute angle
body_right[:,2] = 0.0
world2aligned_axis = np.cross(body_right, np.array([[1.0, 0.0, 0.0]]))
world2aligned_aa = (world2aligned_axis / (np.linalg.norm(world2aligned_axis, axis=1)[:,np.newaxis]+ 1e-8)) * world2aligned_angle[:,np.newaxis]
world2aligned_mat = batch_rodrigues(torch.Tensor(world2aligned_aa).reshape(-1, 3)).numpy()
return world2aligned_mat, world2aligned_aa
def estimate_velocity(data_seq, h):
'''
Given some data sequence of T timesteps in the shape (T, ...), estimates
the velocity for the middle T-2 steps using a second order central difference scheme.
- h : step size
'''
data_tp1 = data_seq[2:]
data_tm1 = data_seq[0:-2]
data_vel_seq = (data_tp1 - data_tm1) / (2*h)
return data_vel_seq
def estimate_angular_velocity(rot_seq, h):
'''
Given a sequence of T rotation matrices, estimates angular velocity at T-2 steps.
Input sequence should be of shape (T, ..., 3, 3)
'''
# see https://en.wikipedia.org/wiki/Angular_velocity#Calculation_from_the_orientation_matrix
dRdt = estimate_velocity(rot_seq, h)
R = rot_seq[1:-1]
RT = np.swapaxes(R, -1, -2)
# compute skew-symmetric angular velocity tensor
w_mat = np.matmul(dRdt, RT)
# pull out angular velocity vector
# average symmetric entries
w_x = (-w_mat[..., 1, 2] + w_mat[..., 2, 1]) / 2.0
w_y = (w_mat[..., 0, 2] - w_mat[..., 2, 0]) / 2.0
w_z = (-w_mat[..., 0, 1] + w_mat[..., 1, 0]) / 2.0
w = np.stack([w_x, w_y, w_z], axis=-1)
return w
def process_seq(data_paths):
start_t = time.time()
input_file_path, output_file_path, smplh_path = data_paths
print(input_file_path)
# load in input data
# we leave out "dmpls" and "marker_data"/"marker_label" which are not present in all datasets
bdata = np.load(input_file_path)
gender = np.array(bdata['gender'], ndmin=1)[0]
gender = str(gender, 'utf-8') if isinstance(gender, bytes) else str(gender)
fps = bdata['mocap_framerate']
num_frames = bdata['poses'].shape[0]
trans = bdata['trans'][:] # global translation
root_orient = bdata['poses'][:, :3] # global root orientation (1 joint)
pose_body = bdata['poses'][:, 3:66] # body joint rotations (21 joints)
pose_hand = bdata['poses'][:, 66:] # finger articulation joint rotations
betas = bdata['betas'][:] # body shape parameters
# correct mislabeled data
if input_file_path.find('BMLhandball') >= 0:
fps = 240
if input_file_path.find('20160930_50032') >= 0 or input_file_path.find('20161014_50033') >= 0:
fps = 59
print(gender)
print('fps: %d' % (fps))
print(trans.shape)
print(root_orient.shape)
print(pose_body.shape)
print(pose_hand.shape)
print(betas.shape)
# only keep middle 80% of sequences to avoid redundanct static poses
trim_data = [trans, root_orient, pose_body, pose_hand]
for i, data_seq in enumerate(trim_data):
trim_data[i] = data_seq[int(0.1*num_frames):int(0.9*num_frames)]
trans, root_orient, pose_body, pose_hand = trim_data
num_frames = trans.shape[0]
print(trans.shape)
print(root_orient.shape)
print(pose_body.shape)
print(pose_hand.shape)
print(betas.shape)
# discard if shorter than threshold
if num_frames < DISCARD_SHORTER_THAN*fps:
print('Sequence shorter than %f s, discarding...' % (DISCARD_SHORTER_THAN))
return
# must do SMPL forward pass to get joints
# split into manageable chunks to avoid running out of GPU memory for SMPL
body_joint_seq = []
body_vtx_seq = []
process_inds = [0, min([num_frames, SPLIT_FRAME_LIMIT])]
while process_inds[0] < num_frames:
print(process_inds)
sidx, eidx = process_inds
body = get_body_model_sequence(smplh_path, gender, process_inds[1] - process_inds[0],
pose_body[sidx:eidx], pose_hand[sidx:eidx], betas, root_orient[sidx:eidx], trans[sidx:eidx])
cur_joint_seq = c2c(body.Jtr)
cur_body_joint_seq = cur_joint_seq[:, :len(SMPL_JOINTS), :]
body_joint_seq.append(cur_body_joint_seq)
# save specific vertices if desired
if SAVE_KEYPT_VERTS:
cur_vtx_seq = c2c(body.v)
cur_mojo_seq = cur_vtx_seq[:,KEYPT_VERTS,:]
body_vtx_seq.append(cur_mojo_seq)
process_inds[0] = process_inds[1]
process_inds[1] = min([num_frames, process_inds[1] + SPLIT_FRAME_LIMIT])
joint_seq = np.concatenate(body_joint_seq, axis=0)
print(joint_seq.shape)
vtx_seq = None
if SAVE_KEYPT_VERTS:
vtx_seq = np.concatenate(body_vtx_seq, axis=0)
print(vtx_seq.shape)
# determine floor height and foot contacts
floor_height, contacts, discard_seq = determine_floor_height_and_contacts(joint_seq, fps)
print('Floor height: %f' % (floor_height))
# translate so floor is at z=0
trans[:,2] -= floor_height
joint_seq[:,:,2] -= floor_height
if SAVE_KEYPT_VERTS:
vtx_seq[:,:,2] -= floor_height
# need the joint transform at all steps to find the angular velocity
joints_world2aligned_rot = compute_joint_align_mats(joint_seq)
# estimate various velocities based on full frame rate
# with second order central difference.
joint_vel_seq = vtx_vel_seq = trans_vel_seq = root_orient_vel_seq = pose_body_vel_seq = None
if SAVE_VELOCITIES:
h = 1.0 / fps
# joints
joint_vel_seq = estimate_velocity(joint_seq, h)
if SAVE_KEYPT_VERTS:
# vertices
vtx_vel_seq = estimate_velocity(vtx_seq, h)
# translation
trans_vel_seq = estimate_velocity(trans, h)
# root orient
root_orient_mat = axisangle2matrots(root_orient.reshape(num_frames, 1, 3)).reshape((num_frames, 3, 3))
root_orient_vel_seq = estimate_angular_velocity(root_orient_mat, h)
# body pose
pose_body_mat = axisangle2matrots(pose_body.reshape(num_frames, len(SMPL_JOINTS)-1, 3)).reshape((num_frames, len(SMPL_JOINTS)-1, 3, 3))
pose_body_vel_seq = estimate_angular_velocity(pose_body_mat, h)
# joint up-axis angular velocity (need to compute joint frames first...)
joint_orient_vel_seq = -estimate_angular_velocity(joints_world2aligned_rot, h)
# only need around z
joint_orient_vel_seq = joint_orient_vel_seq[:,2]
# exit()
# throw out edge frames for other data so velocities are accurate
num_frames = num_frames - 2
contacts = contacts[1:-1]
trans = trans[1:-1]
root_orient = root_orient[1:-1]
pose_body = pose_body[1:-1]
pose_hand = pose_hand[1:-1]
joint_seq = joint_seq[1:-1]
if SAVE_KEYPT_VERTS:
vtx_seq = vtx_seq[1:-1]
# downsample before saving
if OUT_FPS != fps:
if OUT_FPS > fps:
print('Cannot supersample data, saving at data rate!')
else:
fps_ratio = float(OUT_FPS) / fps
print('Downsamp ratio: %f' % (fps_ratio))
new_num_frames = int(fps_ratio*num_frames)
print('Downsamp num frames: %d' % (new_num_frames))
# print(cur_num_frames)
# print(new_num_frames)
downsamp_inds = np.linspace(0, num_frames-1, num=new_num_frames, dtype=int)
# print(downsamp_inds)
# update data to save
fps = OUT_FPS
num_frames = new_num_frames
contacts = contacts[downsamp_inds]
trans = trans[downsamp_inds]
root_orient = root_orient[downsamp_inds]
pose_body = pose_body[downsamp_inds]
pose_hand = pose_hand[downsamp_inds]
joint_seq = joint_seq[downsamp_inds]
if SAVE_KEYPT_VERTS:
vtx_seq = vtx_seq[downsamp_inds]
if SAVE_VELOCITIES:
joint_vel_seq = joint_vel_seq[downsamp_inds]
if SAVE_KEYPT_VERTS:
vtx_vel_seq = vtx_vel_seq[downsamp_inds]
trans_vel_seq = trans_vel_seq[downsamp_inds]
root_orient_vel_seq = root_orient_vel_seq[downsamp_inds]
pose_body_vel_seq = pose_body_vel_seq[downsamp_inds]
# joint up-axis angular velocity (need to compute joint frames first...)
joint_orient_vel_seq = joint_orient_vel_seq[downsamp_inds]
world2aligned_rot = None
if SAVE_ALIGN_ROT:
# compute rotation to canonical frame (forward facing +y) for every frame
world2aligned_rot = compute_align_mats(root_orient)
# NOTE: debug viz
if VIZ_SEQ:
body = get_body_model_sequence(smplh_path, gender, num_frames,
pose_body, pose_hand, betas, root_orient, trans)
# debug_viz_seq(body, fps, contacts=contacts)
viz_smpl_seq(body, imw=1080, imh=1080, fps=fps, contacts=contacts,
render_body=True, render_joints=True, render_skeleton=False, render_ground=True,
joints_seq=joint_seq) #,
# joints_vel=root_orient_vel_seq.reshape((-1, 1, 3)).repeat(22, axis=1))
# points_seq=vtx_seq,
# points_vel_seq=vtx_vel_seq)
# root_orient_vel_seq.reshape((-1, 1, 3)).repeat(22, axis=1)
if discard_seq:
print('Terrain interaction detected, discarding...')
return
if not SAVE_HAND_POSE:
pose_hand = None
# save
# add number of frames and framrate to file path for each of loading
output_file_path = output_file_path[:-4] + '_%d_frames_%d_fps.npz' % (num_frames, int(fps))
np.savez(output_file_path, fps=fps,
gender=str(gender),
floor_height=floor_height,
contacts=contacts,
trans=trans,
root_orient=root_orient,
pose_body=pose_body,
pose_hand=pose_hand,
betas=betas,
joints=joint_seq,
mojo_verts=vtx_seq,
joints_vel=joint_vel_seq,
mojo_verts_vel=vtx_vel_seq,
trans_vel=trans_vel_seq,
root_orient_vel=root_orient_vel_seq,
joint_orient_vel_seq=joint_orient_vel_seq,
pose_body_vel=pose_body_vel_seq,
world2aligned_rot=world2aligned_rot)
print('Seq process time: %f s' % (time.time() - start_t))
def main(config):
start_time = time.time()
out_folder = config.out
if not os.path.exists(out_folder):
os.mkdir(out_folder)
# get all available datasets
all_dataset_dirs = [os.path.join(config.amass_root, f) for f in sorted(os.listdir(config.amass_root)) if f[0] != '.']
all_dataset_dirs = [f for f in all_dataset_dirs if os.path.isdir(f)]
print('Found %d available datasets from raw AMASS data source.' % (len(all_dataset_dirs)))
all_dataset_names = [f.split('/')[-1] for f in all_dataset_dirs]
print(all_dataset_names)
# requested datasets
dataset_dirs = [os.path.join(config.amass_root, f) for f in config.datasets]
dataset_names = config.datasets
print('Requested datasets:')
print(dataset_dirs)
print(dataset_names)
# go through each dataset to set up directory structure before processing
all_seq_in_files = []
all_seq_out_files = []
for data_dir, data_name in zip(dataset_dirs, dataset_names):
if not os.path.exists(data_dir):
print('Could not find dataset %s in available raw AMASS data!' % (data_name))
return
cur_output_dir = os.path.join(out_folder, data_name)
if not os.path.exists(cur_output_dir):
os.mkdir(cur_output_dir)
# first create subject structure in output
cur_subject_dirs = [f for f in sorted(os.listdir(data_dir)) if f[0] != '.' and os.path.isdir(os.path.join(data_dir, f))]
print(cur_subject_dirs)
for subject_dir in cur_subject_dirs:
cur_subject_out = os.path.join(cur_output_dir, subject_dir)
if not os.path.exists(cur_subject_out):
os.mkdir(cur_subject_out)
# then collect all sequence input files
input_seqs = glob.glob(os.path.join(data_dir, '*/*_poses.npz'))
print(len(input_seqs))
# and create output sequence file names
output_file_names = ['/'.join(f.split('/')[-2:]) for f in input_seqs]
output_seqs = [os.path.join(cur_output_dir, f) for f in output_file_names]
print(len(output_seqs))
already_processed = [i for i in range(len(output_seqs)) if len(glob.glob(output_seqs[i][:-4] + '*.npz')) == 1]
already_processed_output_names = [output_file_names[i] for i in already_processed]
print('Already processed these sequences, skipping:')
print(already_processed_output_names)
not_already_processed = [i for i in range(len(output_seqs)) if len(glob.glob(output_seqs[i][:-4] + '*.npz')) == 0]
input_seqs = [input_seqs[i] for i in not_already_processed]
output_seqs = [output_seqs[i] for i in not_already_processed]
all_seq_in_files += input_seqs
all_seq_out_files += output_seqs
smplh_paths = [config.smplh_root]*len(all_seq_in_files)
data_paths = list(zip(all_seq_in_files, all_seq_out_files, smplh_paths))
for data_in in data_paths:
process_seq(data_in)
total_time = time.time() - start_time
print('TIME TO PROCESS: %f min' % (total_time / 60.0))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--amass-root', type=str, default='./data/amass_raw', help='Root directory of raw AMASS dataset.')
parser.add_argument('--datasets', type=str, nargs='+', default=ALL_DATASETS, help='Which datasets to process. By default processes all.')
parser.add_argument('--out', type=str, default='./data/amass_processed', help='Root directory to save processed output to.')
parser.add_argument('--smplh-root', type=str, default='./body_models/smplh', help='Root directory of the SMPL+H body model.')
config = parser.parse_known_args()
config = config[0]
main(config)
# python caspr_humans/scripts/process_amass_data.py --out ./data/amass_full_processed
|
PythonAPI/examples/rss/manual_control_rss.py
|
cpc/carla
| 7,883 |
108896
|
<filename>PythonAPI/examples/rss/manual_control_rss.py
#!/usr/bin/env python
# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
# Copyright (c) 2019-2020 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
# Allows controlling a vehicle with a keyboard. For a simpler and more
# documented example, please take a look at tutorial.py.
"""
Welcome to CARLA manual control.
Use ARROWS or WASD keys for control.
W : throttle
S : brake
AD : steer
Q : toggle reverse
Space : hand-brake
P : toggle autopilot
TAB : change view
Backspace : change vehicle
R : toggle recording images to disk
F2 : toggle RSS visualization mode
B : toggle RSS Road Boundaries Mode
G : RSS check drop current route
T : toggle RSS
N : pause simulation
F1 : toggle HUD
H/? : toggle help
ESC : quit
"""
from __future__ import print_function
# ==============================================================================
# -- find carla module ---------------------------------------------------------
# ==============================================================================
import glob
import os
import sys
import signal
try:
sys.path.append(glob.glob(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + '/carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
# ==============================================================================
# -- imports -------------------------------------------------------------------
# ==============================================================================
import carla
from carla import ColorConverter as cc
import argparse
import logging
import math
import random
import weakref
from rss_sensor import RssSensor # pylint: disable=relative-import
from rss_visualization import RssUnstructuredSceneVisualizer, RssBoundingBoxVisualizer, RssStateVisualizer # pylint: disable=relative-import
try:
import pygame
from pygame.locals import KMOD_CTRL
from pygame.locals import KMOD_SHIFT
from pygame.locals import K_BACKSPACE
from pygame.locals import K_TAB
from pygame.locals import K_DOWN
from pygame.locals import K_ESCAPE
from pygame.locals import K_F1
from pygame.locals import K_F2
from pygame.locals import K_LEFT
from pygame.locals import K_RIGHT
from pygame.locals import K_SLASH
from pygame.locals import K_SPACE
from pygame.locals import K_UP
from pygame.locals import K_a
from pygame.locals import K_b
from pygame.locals import K_d
from pygame.locals import K_g
from pygame.locals import K_h
from pygame.locals import K_n
from pygame.locals import K_p
from pygame.locals import K_q
from pygame.locals import K_r
from pygame.locals import K_s
from pygame.locals import K_w
from pygame.locals import K_l
from pygame.locals import K_i
from pygame.locals import K_z
from pygame.locals import K_x
from pygame.locals import MOUSEBUTTONDOWN
from pygame.locals import MOUSEBUTTONUP
except ImportError:
raise RuntimeError('cannot import pygame, make sure pygame package is installed')
try:
import numpy as np
except ImportError:
raise RuntimeError('cannot import numpy, make sure numpy package is installed')
# ==============================================================================
# -- World ---------------------------------------------------------------------
# ==============================================================================
class World(object):
def __init__(self, carla_world, args):
self.world = carla_world
self.actor_role_name = args.rolename
self.dim = (args.width, args.height)
try:
self.map = self.world.get_map()
except RuntimeError as error:
print('RuntimeError: {}'.format(error))
print(' The server could not send the OpenDRIVE (.xodr) file:')
print(' Make sure it exists, has the same name of your town, and is correct.')
sys.exit(1)
self.external_actor = args.externalActor
self.hud = HUD(args.width, args.height, carla_world)
self.recording_frame_num = 0
self.recording = False
self.recording_dir_num = 0
self.player = None
self.actors = []
self.rss_sensor = None
self.rss_unstructured_scene_visualizer = None
self.rss_bounding_box_visualizer = None
self._actor_filter = args.filter
if not self._actor_filter.startswith("vehicle."):
print('Error: RSS only supports vehicles as ego.')
sys.exit(1)
self.restart()
self.world_tick_id = self.world.on_tick(self.on_world_tick)
def on_world_tick(self, world_snapshot):
self.hud.on_world_tick(world_snapshot)
def toggle_pause(self):
settings = self.world.get_settings()
self.pause_simulation(not settings.synchronous_mode)
def pause_simulation(self, pause):
settings = self.world.get_settings()
if pause and not settings.synchronous_mode:
settings.synchronous_mode = True
settings.fixed_delta_seconds = 0.05
self.world.apply_settings(settings)
elif not pause and settings.synchronous_mode:
settings.synchronous_mode = False
settings.fixed_delta_seconds = None
self.world.apply_settings(settings)
def restart(self):
if self.external_actor:
# Check whether there is already an actor with defined role name
for actor in self.world.get_actors():
if actor.attributes.get('role_name') == self.actor_role_name:
self.player = actor
break
else:
# Get a random blueprint.
blueprint = random.choice(self.world.get_blueprint_library().filter(self._actor_filter))
blueprint.set_attribute('role_name', self.actor_role_name)
if blueprint.has_attribute('color'):
color = random.choice(blueprint.get_attribute('color').recommended_values)
blueprint.set_attribute('color', color)
if blueprint.has_attribute('driver_id'):
driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values)
blueprint.set_attribute('driver_id', driver_id)
if blueprint.has_attribute('is_invincible'):
blueprint.set_attribute('is_invincible', 'true')
# Spawn the player.
if self.player is not None:
spawn_point = self.player.get_transform()
spawn_point.location.z += 2.0
spawn_point.rotation.roll = 0.0
spawn_point.rotation.pitch = 0.0
self.destroy()
self.player = self.world.try_spawn_actor(blueprint, spawn_point)
while self.player is None:
if not self.map.get_spawn_points():
print('There are no spawn points available in your map/town.')
print('Please add some Vehicle Spawn Point to your UE4 scene.')
sys.exit(1)
spawn_points = self.map.get_spawn_points()
spawn_point = random.choice(spawn_points) if spawn_points else carla.Transform()
self.player = self.world.try_spawn_actor(blueprint, spawn_point)
if self.external_actor:
ego_sensors = []
for actor in self.world.get_actors():
if actor.parent == self.player:
ego_sensors.append(actor)
for ego_sensor in ego_sensors:
if ego_sensor is not None:
ego_sensor.destroy()
# Set up the sensors.
self.camera = Camera(self.player, self.dim)
self.rss_unstructured_scene_visualizer = RssUnstructuredSceneVisualizer(self.player, self.world, self.dim)
self.rss_bounding_box_visualizer = RssBoundingBoxVisualizer(self.dim, self.world, self.camera.sensor)
self.rss_sensor = RssSensor(self.player, self.world,
self.rss_unstructured_scene_visualizer, self.rss_bounding_box_visualizer, self.hud.rss_state_visualizer)
def tick(self, clock):
self.hud.tick(self.player, clock)
def toggle_recording(self):
if not self.recording:
dir_name = "_out%04d" % self.recording_dir_num
while os.path.exists(dir_name):
self.recording_dir_num += 1
dir_name = "_out%04d" % self.recording_dir_num
self.recording_frame_num = 0
os.mkdir(dir_name)
else:
self.hud.notification('Recording finished (folder: _out%04d)' % self.recording_dir_num)
self.recording = not self.recording
def render(self, display):
self.camera.render(display)
self.rss_bounding_box_visualizer.render(display, self.camera.current_frame)
self.rss_unstructured_scene_visualizer.render(display)
self.hud.render(display)
if self.recording:
pygame.image.save(display, "_out%04d/%08d.bmp" % (self.recording_dir_num, self.recording_frame_num))
self.recording_frame_num += 1
def destroy(self):
# stop from ticking
if self.world_tick_id:
self.world.remove_on_tick(self.world_tick_id)
if self.camera:
self.camera.destroy()
if self.rss_sensor:
self.rss_sensor.destroy()
if self.rss_unstructured_scene_visualizer:
self.rss_unstructured_scene_visualizer.destroy()
if self.player:
self.player.destroy()
# ==============================================================================
# -- Camera --------------------------------------------------------------------
# ==============================================================================
class Camera(object):
def __init__(self, parent_actor, display_dimensions):
self.surface = None
self._parent = parent_actor
self.current_frame = None
bp_library = self._parent.get_world().get_blueprint_library()
bp = bp_library.find('sensor.camera.rgb')
bp.set_attribute('image_size_x', str(display_dimensions[0]))
bp.set_attribute('image_size_y', str(display_dimensions[1]))
self.sensor = self._parent.get_world().spawn_actor(bp, carla.Transform(carla.Location(
x=-5.5, z=2.5), carla.Rotation(pitch=8.0)), attach_to=self._parent, attachment_type=carla.AttachmentType.SpringArm)
# We need to pass the lambda a weak reference to self to avoid
# circular reference.
weak_self = weakref.ref(self)
self.sensor.listen(lambda image: Camera._parse_image(weak_self, image))
def destroy(self):
self.sensor.stop()
self.sensor.destroy()
self.sensor = None
def render(self, display):
if self.surface is not None:
display.blit(self.surface, (0, 0))
@staticmethod
def _parse_image(weak_self, image):
self = weak_self()
if not self:
return
self.current_frame = image.frame
image.convert(cc.Raw)
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
self.surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
# ==============================================================================
# -- VehicleControl -----------------------------------------------------------
# ==============================================================================
class VehicleControl(object):
MOUSE_STEERING_RANGE = 200
signal_received = False
"""Class that handles keyboard input."""
def __init__(self, world, start_in_autopilot):
self._autopilot_enabled = start_in_autopilot
self._world = world
self._control = carla.VehicleControl()
self._lights = carla.VehicleLightState.NONE
world.player.set_autopilot(self._autopilot_enabled)
self._restrictor = carla.RssRestrictor()
self._vehicle_physics = world.player.get_physics_control()
world.player.set_light_state(self._lights)
self._steer_cache = 0.0
self._mouse_steering_center = None
self._surface = pygame.Surface((self.MOUSE_STEERING_RANGE * 2, self.MOUSE_STEERING_RANGE * 2))
self._surface.set_colorkey(pygame.Color('black'))
self._surface.set_alpha(60)
line_width = 2
pygame.draw.polygon(self._surface,
(0, 0, 255),
[
(0, 0),
(0, self.MOUSE_STEERING_RANGE * 2 - line_width),
(self.MOUSE_STEERING_RANGE * 2 - line_width,
self.MOUSE_STEERING_RANGE * 2 - line_width),
(self.MOUSE_STEERING_RANGE * 2 - line_width, 0),
(0, 0)
], line_width)
pygame.draw.polygon(self._surface,
(0, 0, 255),
[
(0, self.MOUSE_STEERING_RANGE),
(self.MOUSE_STEERING_RANGE * 2, self.MOUSE_STEERING_RANGE)
], line_width)
pygame.draw.polygon(self._surface,
(0, 0, 255),
[
(self.MOUSE_STEERING_RANGE, 0),
(self.MOUSE_STEERING_RANGE, self.MOUSE_STEERING_RANGE * 2)
], line_width)
world.hud.notification("Press 'H' or '?' for help.", seconds=4.0)
def render(self, display):
if self._mouse_steering_center:
display.blit(
self._surface, (self._mouse_steering_center[0] - self.MOUSE_STEERING_RANGE, self._mouse_steering_center[1] - self.MOUSE_STEERING_RANGE))
@staticmethod
def signal_handler(signum, _):
print('\nReceived signal {}. Trigger stopping...'.format(signum))
VehicleControl.signal_received = True
def parse_events(self, world, clock):
if VehicleControl.signal_received:
print('\nAccepted signal. Stopping loop...')
return True
if isinstance(self._control, carla.VehicleControl):
current_lights = self._lights
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True
elif event.type == pygame.KEYUP:
if self._is_quit_shortcut(event.key):
return True
elif event.key == K_BACKSPACE:
if self._autopilot_enabled:
world.player.set_autopilot(False)
world.restart()
world.player.set_autopilot(True)
else:
world.restart()
elif event.key == K_F1:
world.hud.toggle_info()
elif event.key == K_h or (event.key == K_SLASH and pygame.key.get_mods() & KMOD_SHIFT):
world.hud.help.toggle()
elif event.key == K_TAB:
world.rss_unstructured_scene_visualizer.toggle_camera()
elif event.key == K_n:
world.toggle_pause()
elif event.key == K_r:
world.toggle_recording()
elif event.key == K_F2:
if self._world and self._world.rss_sensor:
self._world.rss_sensor.toggle_debug_visualization_mode()
elif event.key == K_b:
if self._world and self._world.rss_sensor:
if self._world.rss_sensor.sensor.road_boundaries_mode == carla.RssRoadBoundariesMode.Off:
self._world.rss_sensor.sensor.road_boundaries_mode = carla.RssRoadBoundariesMode.On
print("carla.RssRoadBoundariesMode.On")
else:
self._world.rss_sensor.sensor.road_boundaries_mode = carla.RssRoadBoundariesMode.Off
print("carla.RssRoadBoundariesMode.Off")
elif event.key == K_g:
if self._world and self._world.rss_sensor:
self._world.rss_sensor.drop_route()
if isinstance(self._control, carla.VehicleControl):
if event.key == K_q:
self._control.gear = 1 if self._control.reverse else -1
elif event.key == K_p and not pygame.key.get_mods() & KMOD_CTRL:
self._autopilot_enabled = not self._autopilot_enabled
world.player.set_autopilot(self._autopilot_enabled)
world.hud.notification(
'Autopilot %s' % ('On' if self._autopilot_enabled else 'Off'))
elif event.key == K_l and pygame.key.get_mods() & KMOD_CTRL:
current_lights ^= carla.VehicleLightState.Special1
elif event.key == K_l and pygame.key.get_mods() & KMOD_SHIFT:
current_lights ^= carla.VehicleLightState.HighBeam
elif event.key == K_l:
# Use 'L' key to switch between lights:
# closed -> position -> low beam -> fog
if not self._lights & carla.VehicleLightState.Position:
world.hud.notification("Position lights")
current_lights |= carla.VehicleLightState.Position
else:
world.hud.notification("Low beam lights")
current_lights |= carla.VehicleLightState.LowBeam
if self._lights & carla.VehicleLightState.LowBeam:
world.hud.notification("Fog lights")
current_lights |= carla.VehicleLightState.Fog
if self._lights & carla.VehicleLightState.Fog:
world.hud.notification("Lights off")
current_lights ^= carla.VehicleLightState.Position
current_lights ^= carla.VehicleLightState.LowBeam
current_lights ^= carla.VehicleLightState.Fog
elif event.key == K_i:
current_lights ^= carla.VehicleLightState.Interior
elif event.key == K_z:
current_lights ^= carla.VehicleLightState.LeftBlinker
elif event.key == K_x:
current_lights ^= carla.VehicleLightState.RightBlinker
elif event.type == MOUSEBUTTONDOWN:
# store current mouse position for mouse-steering
if event.button == 1:
self._mouse_steering_center = event.pos
elif event.type == MOUSEBUTTONUP:
if event.button == 1:
self._mouse_steering_center = None
if not self._autopilot_enabled:
prev_steer_cache = self._steer_cache
self._parse_vehicle_keys(pygame.key.get_pressed(), clock.get_time())
if pygame.mouse.get_pressed()[0]:
self._parse_mouse(pygame.mouse.get_pos())
self._control.reverse = self._control.gear < 0
vehicle_control = self._control
world.hud.original_vehicle_control = vehicle_control
world.hud.restricted_vehicle_control = vehicle_control
# limit speed to 30kmh
v = self._world.player.get_velocity()
if (3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2)) > 30.0:
self._control.throttle = 0
# if self._world.rss_sensor and self._world.rss_sensor.ego_dynamics_on_route and not self._world.rss_sensor.ego_dynamics_on_route.ego_center_within_route:
# print ("Not on route!" + str(self._world.rss_sensor.ego_dynamics_on_route))
if self._restrictor:
rss_proper_response = self._world.rss_sensor.proper_response if self._world.rss_sensor and self._world.rss_sensor.response_valid else None
if rss_proper_response:
if not (pygame.key.get_mods() & KMOD_CTRL):
vehicle_control = self._restrictor.restrict_vehicle_control(
vehicle_control, rss_proper_response, self._world.rss_sensor.ego_dynamics_on_route, self._vehicle_physics)
world.hud.restricted_vehicle_control = vehicle_control
world.hud.allowed_steering_ranges = self._world.rss_sensor.get_steering_ranges()
if world.hud.original_vehicle_control.steer != world.hud.restricted_vehicle_control.steer:
self._steer_cache = prev_steer_cache
# Set automatic control-related vehicle lights
if vehicle_control.brake:
current_lights |= carla.VehicleLightState.Brake
else: # Remove the Brake flag
current_lights &= carla.VehicleLightState.All ^ carla.VehicleLightState.Brake
if vehicle_control.reverse:
current_lights |= carla.VehicleLightState.Reverse
else: # Remove the Reverse flag
current_lights &= carla.VehicleLightState.All ^ carla.VehicleLightState.Reverse
if current_lights != self._lights: # Change the light state only if necessary
self._lights = current_lights
world.player.set_light_state(carla.VehicleLightState(self._lights))
world.player.apply_control(vehicle_control)
def _parse_vehicle_keys(self, keys, milliseconds):
if keys[K_UP] or keys[K_w]:
self._control.throttle = min(self._control.throttle + 0.2, 1)
else:
self._control.throttle = max(self._control.throttle - 0.2, 0)
if keys[K_DOWN] or keys[K_s]:
self._control.brake = min(self._control.brake + 0.2, 1)
else:
self._control.brake = max(self._control.brake - 0.2, 0)
steer_increment = 5e-4 * milliseconds
if keys[K_LEFT] or keys[K_a]:
if self._steer_cache > 0:
self._steer_cache = 0
else:
self._steer_cache -= steer_increment
elif keys[K_RIGHT] or keys[K_d]:
if self._steer_cache < 0:
self._steer_cache = 0
else:
self._steer_cache += steer_increment
elif self._steer_cache > 0:
self._steer_cache = max(self._steer_cache - steer_increment, 0.0)
elif self._steer_cache < 0:
self._steer_cache = min(self._steer_cache + steer_increment, 0.0)
else:
self._steer_cache = 0
self._steer_cache = min(1.0, max(-1.0, self._steer_cache))
self._control.steer = round(self._steer_cache, 1)
self._control.hand_brake = keys[K_SPACE]
def _parse_mouse(self, pos):
if not self._mouse_steering_center:
return
lateral = float(pos[0] - self._mouse_steering_center[0])
longitudinal = float(pos[1] - self._mouse_steering_center[1])
max_val = self.MOUSE_STEERING_RANGE
lateral = -max_val if lateral < -max_val else max_val if lateral > max_val else lateral
longitudinal = -max_val if longitudinal < -max_val else max_val if longitudinal > max_val else longitudinal
self._control.steer = lateral / max_val
if longitudinal < 0.0:
self._control.throttle = -longitudinal / max_val
self._control.brake = 0.0
elif longitudinal > 0.0:
self._control.throttle = 0.0
self._control.brake = longitudinal / max_val
@staticmethod
def _is_quit_shortcut(key):
return (key == K_ESCAPE) or (key == K_q and pygame.key.get_mods() & KMOD_CTRL)
# ==============================================================================
# -- HUD -----------------------------------------------------------------------
# ==============================================================================
class HUD(object):
def __init__(self, width, height, world):
self.dim = (width, height)
self._world = world
self.map_name = world.get_map().name
font = pygame.font.Font(pygame.font.get_default_font(), 20)
font_name = 'courier' if os.name == 'nt' else 'mono'
fonts = [x for x in pygame.font.get_fonts() if font_name in x]
default_font = 'ubuntumono'
mono = default_font if default_font in fonts else fonts[0]
mono = pygame.font.match_font(mono)
self._font_mono = pygame.font.Font(mono, 12 if os.name == 'nt' else 14)
self._notifications = FadingText(font, (width, 40), (0, height - 40))
self.help = HelpText(pygame.font.Font(mono, 16), width, height)
self.server_fps = 0
self.frame = 0
self.simulation_time = 0
self.original_vehicle_control = None
self.restricted_vehicle_control = None
self.allowed_steering_ranges = []
self._show_info = True
self._info_text = []
self._server_clock = pygame.time.Clock()
self.rss_state_visualizer = RssStateVisualizer(self.dim, self._font_mono, self._world)
def on_world_tick(self, timestamp):
self._server_clock.tick()
self.server_fps = self._server_clock.get_fps()
self.frame = timestamp.frame
self.simulation_time = timestamp.elapsed_seconds
def tick(self, player, clock):
self._notifications.tick(clock)
if not self._show_info:
return
t = player.get_transform()
v = player.get_velocity()
c = player.get_control()
self._info_text = [
'Server: % 16.0f FPS' % self.server_fps,
'Client: % 16.0f FPS' % clock.get_fps(),
'Map: % 20s' % self.map_name,
'',
'Speed: % 15.0f km/h' % (3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2)),
'Location:% 20s' % ('(% 5.1f, % 5.1f)' % (t.location.x, t.location.y)),
'Heading: % 20.2f' % math.radians(t.rotation.yaw),
'']
if self.original_vehicle_control:
orig_control = self.original_vehicle_control
restricted_control = self.restricted_vehicle_control
allowed_steering_ranges = self.allowed_steering_ranges
self._info_text += [
('Throttle:', orig_control.throttle, 0.0, 1.0, restricted_control.throttle),
('Steer:', orig_control.steer, -1.0, 1.0, restricted_control.steer, allowed_steering_ranges),
('Brake:', orig_control.brake, 0.0, 1.0, restricted_control.brake)]
self._info_text += [
('Reverse:', c.reverse),
'']
def toggle_info(self):
self._show_info = not self._show_info
def notification(self, text, seconds=2.0):
self._notifications.set_text(text, seconds=seconds)
def error(self, text):
self._notifications.set_text('Error: %s' % text, (255, 0, 0))
def render(self, display):
if self._show_info:
info_surface = pygame.Surface((220, self.dim[1]))
info_surface.set_alpha(100)
display.blit(info_surface, (0, 0))
v_offset = 4
bar_h_offset = 100
bar_width = 106
for item in self._info_text:
text_color = (255, 255, 255)
if v_offset + 18 > self.dim[1]:
break
if isinstance(item, list):
if len(item) > 1:
points = [(x + 8, v_offset + 8 + (1.0 - y) * 30) for x, y in enumerate(item)]
pygame.draw.lines(display, (255, 136, 0), False, points, 2)
item = None
v_offset += 18
elif isinstance(item, tuple):
if isinstance(item[1], bool):
rect = pygame.Rect((bar_h_offset, v_offset + 2), (10, 10))
pygame.draw.rect(display, (255, 255, 255), rect, 0 if item[1] else 1)
else:
# draw allowed steering ranges
if len(item) == 6 and item[2] < 0.0:
for steering_range in item[5]:
starting_value = min(steering_range[0], steering_range[1])
length = (max(steering_range[0], steering_range[1]) -
min(steering_range[0], steering_range[1])) / 2
rect = pygame.Rect(
(bar_h_offset + (starting_value + 1) * (bar_width / 2), v_offset + 2), (length * bar_width, 14))
pygame.draw.rect(display, (0, 255, 0), rect)
# draw border
rect_border = pygame.Rect((bar_h_offset, v_offset + 2), (bar_width, 14))
pygame.draw.rect(display, (255, 255, 255), rect_border, 1)
# draw value / restricted value
input_value_rect_fill = 0
if len(item) >= 5:
if item[1] != item[4]:
input_value_rect_fill = 1
f = (item[4] - item[2]) / (item[3] - item[2])
if item[2] < 0.0:
rect = pygame.Rect(
(bar_h_offset + 1 + f * (bar_width - 6), v_offset + 3), (12, 12))
else:
rect = pygame.Rect((bar_h_offset + 1, v_offset + 3), (f * bar_width, 12))
pygame.draw.rect(display, (255, 0, 0), rect)
f = (item[1] - item[2]) / (item[3] - item[2])
rect = None
if item[2] < 0.0:
rect = pygame.Rect((bar_h_offset + 2 + f * (bar_width - 14), v_offset + 4), (10, 10))
else:
if item[1] != 0:
rect = pygame.Rect((bar_h_offset + 2, v_offset + 4), (f * (bar_width - 4), 10))
if rect:
pygame.draw.rect(display, (255, 255, 255), rect, input_value_rect_fill)
item = item[0]
if item: # At this point has to be a str.
surface = self._font_mono.render(item, True, text_color)
display.blit(surface, (8, v_offset))
v_offset += 18
self.rss_state_visualizer.render(display, v_offset)
self._notifications.render(display)
self.help.render(display)
# ==============================================================================
# -- FadingText ----------------------------------------------------------------
# ==============================================================================
class FadingText(object):
def __init__(self, font, dim, pos):
self.font = font
self.dim = dim
self.pos = pos
self.seconds_left = 0
self.surface = pygame.Surface(self.dim)
def set_text(self, text, color=(255, 255, 255), seconds=2.0):
text_texture = self.font.render(text, True, color)
self.surface = pygame.Surface(self.dim)
self.seconds_left = seconds
self.surface.fill((0, 0, 0, 0))
self.surface.blit(text_texture, (10, 11))
def tick(self, clock):
delta_seconds = 1e-3 * clock.get_time()
self.seconds_left = max(0.0, self.seconds_left - delta_seconds)
self.surface.set_alpha(500.0 * self.seconds_left)
def render(self, display):
display.blit(self.surface, self.pos)
# ==============================================================================
# -- HelpText ------------------------------------------------------------------
# ==============================================================================
class HelpText(object):
"""Helper class to handle text output using pygame"""
def __init__(self, font, width, height):
lines = __doc__.split('\n')
self.font = font
self.line_space = 18
self.dim = (780, len(lines) * self.line_space + 12)
self.pos = (0.5 * width - 0.5 * self.dim[0], 0.5 * height - 0.5 * self.dim[1])
self.seconds_left = 0
self.surface = pygame.Surface(self.dim)
self.surface.fill((0, 0, 0, 0))
for n, line in enumerate(lines):
text_texture = self.font.render(line, True, (255, 255, 255))
self.surface.blit(text_texture, (22, n * self.line_space))
self._render = False
self.surface.set_alpha(220)
def toggle(self):
self._render = not self._render
def render(self, display):
if self._render:
display.blit(self.surface, self.pos)
# ==============================================================================
# -- game_loop() ---------------------------------------------------------------
# ==============================================================================
def game_loop(args):
pygame.init()
pygame.font.init()
world = None
try:
client = carla.Client(args.host, args.port)
client.set_timeout(2.0)
display = pygame.display.set_mode(
(args.width, args.height),
pygame.HWSURFACE | pygame.DOUBLEBUF)
world = World(client.get_world(), args)
controller = VehicleControl(world, args.autopilot)
clock = pygame.time.Clock()
while True:
clock.tick_busy_loop(60)
if controller.parse_events(world, clock):
return
world.tick(clock)
world.render(display)
controller.render(display)
pygame.display.flip()
finally:
if world is not None:
print('Destroying the world...')
world.destroy()
print('Destroyed!')
pygame.quit()
# ==============================================================================
# -- main() --------------------------------------------------------------------
# ==============================================================================
def main():
argparser = argparse.ArgumentParser(
description='CARLA Manual Control Client RSS')
argparser.add_argument(
'-v', '--verbose',
action='store_true',
dest='debug',
help='print debug information')
argparser.add_argument(
'--host',
metavar='H',
default='127.0.0.1',
help='IP of the host server (default: 127.0.0.1)')
argparser.add_argument(
'-p', '--port',
metavar='P',
default=2000,
type=int,
help='TCP port to listen to (default: 2000)')
argparser.add_argument(
'-a', '--autopilot',
action='store_true',
help='enable autopilot')
argparser.add_argument(
'--res',
metavar='WIDTHxHEIGHT',
default='1280x720',
help='window resolution (default: 1280x720)')
argparser.add_argument(
'--filter',
metavar='PATTERN',
default='vehicle.*',
help='actor filter (default: "vehicle.*")')
argparser.add_argument(
'--rolename',
metavar='NAME',
default='hero',
help='actor role name (default: "hero")')
argparser.add_argument(
'--externalActor',
action='store_true',
help='attaches to externally created actor by role name')
args = argparser.parse_args()
args.width, args.height = [int(x) for x in args.res.split('x')]
log_level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
logging.info('listening to server %s:%s', args.host, args.port)
print(__doc__)
signal.signal(signal.SIGINT, VehicleControl.signal_handler)
try:
game_loop(args)
except KeyboardInterrupt:
print('\nCancelled by user. Bye!')
if __name__ == '__main__':
main()
|
test-crates/pyo3-mixed/test_pyo3_mixed.py
|
thedrow/maturin
| 854 |
108932
|
#!/usr/bin/env python3
import pyo3_mixed
def test_get_42():
assert pyo3_mixed.get_42() == 42
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.