max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
Bot/cogs/mod.py
|
Shuri2060/Nurevam
| 145 |
91061
|
from discord.ext import commands
from .utils import utils
import datetime
import asyncio
import discord
import logging
log = logging.getLogger(__name__)
def check_roles(ctx):
if ctx.message.author.id == 1<PASSWORD>:
return True
return utils.check_roles(ctx, "Mod", "admin_roles")
def has_mute_role(ctx):
return utils.redis.smembers("{}:Mod:mute_roles".format(ctx.message.guild.id))
class get_person(commands.MemberConverter):
def __init__(self, *, lower=False):
self.lower = lower
super().__init__()
class Mod(commands.Cog):
"""
A mod tool for Mods.
"""
def __init__(self, bot):
self.bot = bot
self.redis = bot.db.redis
self.bot.say_edit = bot.say
def __local_check(self,ctx):
return utils.is_enable(ctx,"mod") or ctx.message.author.id == self.bot.owner.id
def delete_mine(self,m):
return m.author.id == self.bot.user.id
#########################################
# _____ _ #
# / ____| | | #
# | | | | ___ __ _ _ __ #
# | | | | / _ \ / _` | | '_ \ #
# | |____ | | | __/ | (_| | | | | | #
# \_____| |_| \___| \__,_| |_| |_| #
#########################################
@commands.group(brief="Allow to clean bot itself, have subcommand",invoke_without_command=True)
@commands.check(check_roles)
# async def clean(self, ctx, *, limit:int=100):
async def clean(self, ctx,limit:int = 100,user:commands.MemberConverter or bool = False,):
"""
Is able to clear up it's own messages.
can affect any user's messages by mention it.
"""
if limit > 2000:
return await self.bot.say(ctx,content = "Won't able to delete due to {limit}/2000 message to delete.".format(limit = limit))
if user:
counter = await ctx.message.channel.purge(check=lambda m:m.author.id == user.id,limit=limit)
await self.bot.say(ctx,content = "```py\nI cleared {} posts from {}```".format(len(counter),user.name))
else:
counter = await ctx.message.channel.purge(limit = limit,check=self.delete_mine)
await self.bot.say(ctx,content = "```py\nI cleared {} posts of mine\n```".format(len(counter)))
@clean.command(brief= "Is able to clear a certain role's messages",pass_context=True, invoke_without_command=True)
@commands.check(check_roles)
@commands.bot_has_permissions(manage_messages=True)
async def role(self,ctx,roles : discord.Role,limit : int=100):
"""
<prefix> role <the role> <optional, number of messages, default: 100>
Is able to clear messages of all users who have this role.
"""
def delete_role(m):
return roles.id in [r.id for r in m.author.roles]
counter = await ctx.message.channel.purge(limit=limit,check=delete_role)
await self.bot.say(ctx, content = "```py\nI cleared {} from person who have role of {}\n```".format(len(counter),roles.name))
@clean.command(brief="Is able to clear a certain user's messages",invoke_without_command=True)
@commands.check(check_roles)
@commands.bot_has_permissions(manage_messages=True)
async def person(self,ctx,user: discord.Member,*,limit: int = 100):
"""
<prefix> person <the person> <optional, number of messages, default 100>
Is able to clear the messages of a certain person.
"""
def delete_player(m):
return m.author.id == user.id
counter = await ctx.message.channel.purge(check=delete_player,limit=limit)
await self.bot.say(ctx,content = "```py\nI cleared {} posts from {}```".format(len(counter),user.name))
@clean.command(name = "all",brief="Allow to clear all message", invoke_without_command=True)
@commands.check(check_roles)
@commands.bot_has_permissions(manage_messages=True)
async def _all(self,ctx,*,limit: int=100):
"""
<prefix> all <optional but how many, default 100 message>
Allow to clear all message, nothing can stop it.
"""
counter = await ctx.message.channel.purge(limit =limit)
await self.bot.say(ctx,content = "```py\nI cleared {} posts```".format(len(counter)))
#############################################################
# _ __ _ _ __ ____ #
# | |/ / (_) | | / / | _ \ #
# | ' / _ ___ | | __ / / | |_) | __ _ _ __ #
# | < | | / __| | |/ / / / | _ < / _` | | '_ \ #
# | . \ | | | (__ | < / / | |_) | | (_| | | | | |#
# |_|\_\ |_| \___| |_|\_\ /_/ |____/ \__,_| |_| |_|#
#############################################################
def format_reason(self,ctx,reason):
if reason is None:
reason = "Request by {}".format(ctx.message.author)
else:
reason += " Request by {}".format(ctx.message.author)
return reason
@commands.command(brief="Is able to kick a user")
@commands.check(check_roles)
@commands.bot_has_permissions(kick_members=True)
async def kick(self,ctx,user:discord.Member,*,reason:str = None):
"""
<prefix> kick <user name>
Mentioning is a faster way to get the user.
Is able to kick a user from guild.
"""
await ctx.message.guild.kick(user,reason = self.format_reason(ctx,reason))
await self.bot.say(ctx,content = "I have kicked {}".format(user.name))
@commands.command(brief="Is able to ban a user")
@commands.check(check_roles)
@commands.bot_has_permissions( ban_members=True)
async def ban(self,ctx,user:discord.Member,*,reason:str = None):
"""
<prefix> ban <user name> <optional, number of passed days, for which the user's messages are deleted, default 1>
Mentioning is a faster way to get the user.
Is able to ban a user from the guild, default number of passed days, for which messages are deleted, is 1.
"""
await ctx.message.guild.ban(user,reason = self.format_reason(ctx,reason))
await self.bot.say(ctx,content = "I have banned {}".format(user.name))
@commands.command(brief="Is able to softban a user which is equal to kicking him and deleting his messages")
@commands.check(check_roles)
@commands.bot_has_permissions( ban_members=True)
async def softban(self,ctx,user:discord.Member,*,reason:str = None):
"""
<prefix> softban <user name> <optional, number of passed days, for which the messages are deleted, default is 1>
This is just kicking + deleting messages,
Is able to kick a user and delete his messages.
"""
await ctx.message.guild.ban(user,reason = self.format_reason(ctx,reason))
await ctx.message.guild.unban(user)
await self.bot.say(ctx,content = "I have softbanned {}".format(user.name))
#################################
# _____ _ #
# | __ \ | | #
# | |__) | ___ | | ___ #
# | _ / / _ \ | | / _ \ #
# | | \ \ | (_) | | | | __/ #
# |_| \_\ \___/ |_| \___| #
#################################
@commands.group(name = "role",brief="Multi subcommand related to role",invoke_without_command=True)
@commands.check(check_roles)
@commands.bot_has_permissions(manage_roles=True)
async def _role(self):
"""
A subcommand of it.
do this
<prefix> help role
to see a more infomations of its sub commands
"""
return
@_role.command(brief="Is able to add a role to a user")
@commands.check(check_roles)
@commands.bot_has_permissions(manage_roles=True)
async def add(self,ctx,user:discord.Member,*role:discord.Role):
"""
<prefix> add <user name> <the role>
Is able to add a role to a member, this is useful for people who are on phone.
You can also add multiple roles to a member at the same time.
Note: this is cap-sensitives, if role have cap in, add cap in command
"""
await user.add_roles(*role,reason = "Request by {}".format(ctx.message.author))
await self.bot.say(ctx,content = "Added a role to {}".format(user.name))
@_role.command(brief="Is able to remove a role from a user")
@commands.check(check_roles)
@commands.bot_has_permissions(manage_roles=True)
async def remove(self,ctx,user:discord.Member,*role:discord.Role):
"""
<prefix> remove <user name> <the role>
Is able to remove a role from a member, this is useful for people who are on phone.
You can also remove multiple roles from a member at the same time.
Note: this is cap-sensitives, if role have cap in, add cap in command
"""
await user.remove_roles(*role,reason ="Request by {}".format(ctx.message.author))
await self.bot.say(ctx,content = "Remove role from {}".format(user.name))
##########################
# __ __ _ #
# | \/ |_ _| |_ ___ #
# | |\/| | || | _/ -_)#
# |_| |_|\_,_|\__\___|#
##########################
@commands.command(brief="Mute user")
@commands.check(check_roles)
@commands.check(has_mute_role)
@commands.bot_has_permissions(manage_roles=True)
async def mute(self,ctx,user:discord.Member):
mute_role = await self.redis.smembers("{}:Mod:mute_roles".format(ctx.message.guild.id))
guild = ctx.message.guild
if guild.me.top_role.permissions.manage_roles: # if got Manage roles permission, can grant roles
role = [x for x in guild.roles if str(x.id) in mute_role]
await user.add_roles(*role,reason = "{} requests with mute command".format(ctx.message.author))
await self.bot.say(ctx,content = "Done muting {}".format(user.mention))
@commands.command(brief="Unmute user")
@commands.check(check_roles)
@commands.check(has_mute_role)
@commands.bot_has_permissions(manage_roles=True)
async def unmute(self,ctx,user:discord.Member):
mute_role = await self.redis.smembers("{}:Mod:mute_roles".format(ctx.message.guild.id))
guild = ctx.message.guild
if guild.me.top_role.permissions.manage_roles: # if got Manage roles permission, can grant roles
role = [x for x in guild.roles if str(x.id) in mute_role]
try:
await user.remove_roles(*role,reason = "{} requests with unmute command".format(ctx.message.author))
await self.bot.say(ctx,content = "Done unmuting {}".format(user.mention))
except:
pass
def setup(bot):
bot.add_cog(Mod(bot))
|
examples/graph_learning/train_robot_value_prediction.py
|
ONLYA/RoboGrammar
| 156 |
91069
|
<filename>examples/graph_learning/train_robot_value_prediction.py<gh_stars>100-1000
'''
train_robot_value_prediction.py
use Graph Neural Network to learn a value function for robot designs.
Learned GNN:
input: a graph of the robot design with all nodes being terminal nodes.
output: the predicted reward for the design.
Argument:
--dataset-name: the name of the dataset
--use-cuse: if use gpu to train, [default is False]
'''
import sys
import os
project_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../')
current_dir = os.path.dirname(os.path.abspath(__file__))
# import python packages
from math import ceil
import time
import numpy as np
import pickle
import argparse
import random
from copy import deepcopy
# import third-party packages
import torch
import torch.nn.functional as F
from torch_geometric.datasets import TUDataset
import torch_geometric.transforms as T
from torch_geometric.data import DenseDataLoader
from torch_geometric.nn import DenseSAGEConv, dense_diff_pool
from torch_geometric.data import InMemoryDataset
from torch_geometric.data.data import Data
torch.set_printoptions(threshold=sys.maxsize)
import IPython
# import our packages
import parse_log_file
from common import *
# parse argument
parser = argparse.ArgumentParser()
parser.add_argument('--dataset-name', type = str, default = 'flat_mar23')
parser.add_argument('--use-cuda', default = False, action = 'store_true')
parser.add_argument('--save-interval', type = int, default = 10)
parser.add_argument('--load-path', type = str, default = None)
parser.add_argument('--test', default = False, action = 'store_true')
args = parser.parse_args()
dataset_name = args.dataset_name
save_dir = os.path.join(current_dir, 'trained_models', 'value_function', dataset_name, get_time_stamp())
# define if construct dataset from raw data
load_data = False
# dataset path
dataset_dir = os.path.join(current_dir, 'data', dataset_name)
testset_path = os.path.join(dataset_dir, 'test_loader')
valset_path = os.path.join(dataset_dir, 'val_loader')
trainset_path = os.path.join(dataset_dir, 'train_loader')
testset_path = os.path.join(dataset_dir, 'test_loader')
# load pre-prosessed dataset. build if not exists
if os.path.isfile(testset_path) and os.path.isfile(valset_path) and os.path.isfile(trainset_path):
with open(testset_path, 'rb') as test_file, open(valset_path, 'rb') as val_file, open(trainset_path, 'rb') as train_file:
test_dataset = pickle.load(test_file)
train_dataset = pickle.load(train_file)
val_dataset = pickle.load(val_file)
max_nodes = 19
num_features = 32 # 31 for previous configuration and 32 for current one (including torque)
else:
os.makedirs(dataset_dir, exist_ok = True)
raw_dataset_path = os.path.join(current_dir, 'data', args.dataset_name + '.csv')
all_link_features, all_link_adj, all_rewards \
= parse_log_file.main(raw_dataset_path, os.path.join(project_dir, 'data/designs/grammar_jan21.dot'))
# exprimental postprocessing
# step 1: make symmetric
all_link_adj_symmetric = [link_adj + np.transpose(link_adj) for link_adj in all_link_adj]
# step 2: Add blank rows, pad with 0s, and fill out mask:
max_nodes = max([feat.shape[0] for feat in all_link_features])
def pad(array, shape):
"""
array: Array to be padded
reference: Reference array with the desired shape
offsets: list of offsets (number of elements must be equal to the dimension of the array)
"""
# Create an array of zeros with the reference shape
result = np.zeros(shape)
if len(shape) == 1:
result[:array.shape[0], :] = array # ERROR: why result is 2d
elif len(shape) == 2:
result[:array.shape[0], :array.shape[1]] = array
else:
raise Exception('only 1 and 2d supported for now')
return result
all_link_adj_symmetric_pad = [pad(adj, (max_nodes, max_nodes)) for adj in all_link_adj_symmetric]
all_features_pad = [pad(feat, (max_nodes, feat.shape[1])) for feat in all_link_features]
def create_mask(feat, max_nodes):
return np.array([True if i < feat.shape[0] else False for i in range(max_nodes)])
all_masks = [create_mask(feat, max_nodes) for feat in all_link_features]
num_features = all_features_pad[0].shape[1]
print('max-nodes = ', max_nodes, ', num-features = ', num_features)
#step 3: Create dataset object
data = [Data(adj=torch.from_numpy(adj).float(),
mask=torch.from_numpy(mask),
x=torch.from_numpy(x[:, :num_features]).float(),
y=torch.from_numpy(np.array([y])).float() ) for adj, mask, x, y in zip(all_link_adj_symmetric_pad, all_masks, all_features_pad, all_rewards)]
random.shuffle(data)
n_test = (len(data) + 2) // 3
random.shuffle(data)
known_dataset = data[:-n_test]
unknown_dataset = data[-n_test:]
# random.shuffle(known_dataset)
n_val = (len(known_dataset) + 9) // 10
train_dataset = known_dataset[:-n_val]
val_dataset = known_dataset[-n_val:]
test_dataset = unknown_dataset
with open(testset_path, 'wb') as test_file, open(valset_path, 'wb') as val_file, open(trainset_path, 'wb') as train_file:
pickle.dump(test_dataset, test_file)
pickle.dump(train_dataset, train_file)
pickle.dump(val_dataset, val_file)
# print dataset info
print_info('dataset loaded:')
print_info('size of training set: {}'.format(len(train_dataset)))
print_info('size of validation set: {}'.format(len(val_dataset)))
print_info('size of testing set: {}'.format(len(test_dataset)))
y_min, y_max = 10000000.0, -10000000.0
for data in train_dataset:
y_min = min(y_min, data.y[0])
y_max = max(y_max, data.y[0])
print('training set: y min = ', y_min, ', y max = ', y_max)
y_min, y_max = 10000000.0, -10000000.0
for data in test_dataset:
y_min = min(y_min, data.y[0])
y_max = max(y_max, data.y[0])
print('testing set: y min = ', y_min, ', yy max = ', y_max)
# constrct batch
test_loader = DenseDataLoader(test_dataset, batch_size=20)
val_loader = DenseDataLoader(val_dataset, batch_size=20)
# random.shuffle(train_dataset)
train_loader = DenseDataLoader(train_dataset, batch_size=20)
class GNN(torch.nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels,
normalize=False, batch_normalization=False, add_loop=False, lin=True):
super(GNN, self).__init__()
self.add_loop = add_loop
self.batch_normalization = batch_normalization
self.conv1 = DenseSAGEConv(in_channels, hidden_channels, normalize)
self.conv2 = DenseSAGEConv(hidden_channels, hidden_channels, normalize)
self.conv3 = DenseSAGEConv(hidden_channels, out_channels, normalize)
if self.batch_normalization:
self.bn1 = torch.nn.BatchNorm1d(hidden_channels)
self.bn2 = torch.nn.BatchNorm1d(hidden_channels)
self.bn3 = torch.nn.BatchNorm1d(out_channels)
if lin is True:
self.lin = torch.nn.Linear(2 * hidden_channels + out_channels,
out_channels)
else:
self.lin = None
def bn(self, i, x):
batch_size, num_nodes, num_features = x.size()
x = x.view(-1, num_features)
x = getattr(self, 'bn{}'.format(i))(x)
x = x.view(batch_size, num_nodes, num_features)
return x
def forward(self, x, adj, mask=None):
batch_size, num_nodes, in_channels = x.size()
x0 = x
#IPython.embed()
if self.batch_normalization:
x1 = self.bn(1, F.relu(self.conv1(x0, adj, mask, self.add_loop)))
x2 = self.bn(2, F.relu(self.conv2(x1, adj, mask, self.add_loop)))
x3 = self.bn(3, F.relu(self.conv3(x2, adj, mask, self.add_loop)))
else:
x1 = F.relu(self.conv1(x0, adj, mask, self.add_loop))
x2 = F.relu(self.conv2(x1, adj, mask, self.add_loop))
x3 = F.relu(self.conv3(x2, adj, mask, self.add_loop))
x = torch.cat([x1, x2, x3], dim=-1)
if self.lin is not None:
x = F.relu(self.lin(x))
return x
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
batch_normalization = False
num_nodes = ceil(0.25 * max_nodes)
self.gnn1_pool = GNN(num_features, 64, num_nodes, batch_normalization = batch_normalization, add_loop=True)
self.gnn1_embed = GNN(num_features, 64, 64, batch_normalization = batch_normalization, add_loop=True, lin=False)
num_nodes = ceil(0.25 * num_nodes)
self.gnn2_pool = GNN(3 * 64, 64, num_nodes, batch_normalization = batch_normalization)
self.gnn2_embed = GNN(3 * 64, 64, 64, batch_normalization = batch_normalization, lin=False)
self.gnn3_embed = GNN(3 * 64, 64, 64, batch_normalization = batch_normalization, lin=False)
self.lin1 = torch.nn.Linear(3 * 64, 64)
self.lin2 = torch.nn.Linear(64, 1)
def forward(self, x, adj, mask=None):
s = self.gnn1_pool(x, adj, mask)
x = self.gnn1_embed(x, adj, mask)
x, adj, l1, e1 = dense_diff_pool(x, adj, s, mask)
s = self.gnn2_pool(x, adj)
x = self.gnn2_embed(x, adj)
x, adj, l2, e2 = dense_diff_pool(x, adj, s)
x = self.gnn3_embed(x, adj)
x = x.mean(dim=1)
x = F.relu(self.lin1(x))
x = self.lin2(x)
return x, l1 + l2, e1 + e2
device = torch.device('cuda' if torch.cuda.is_available() and args.use_cuda else 'cpu')
model = Net().to(device)
if args.load_path is not None and os.path.isfile(args.load_path):
model.load_state_dict(torch.load(args.load_path))
print_info('Successfully loaded the GNN model from {}'.format(args.load_path))
else:
print_info('Train with random initial GNN model')
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
def train(epoch):
model.train()
loss_all = 0
for data in train_loader:
data = data.to(device)
optimizer.zero_grad()
output, loss_link, loss_entropy = model(data.x, data.adj, data.mask)
loss = F.mse_loss(output[:, 0], data.y.view(-1))
# loss = loss + loss_link + loss_entropy
loss.backward()
loss_all += loss.item()
optimizer.step()
return loss_all / len(train_loader)
@torch.no_grad()
def test(loader, size, debug = False):
model.eval()
error = 0.
idx = 0
for data in loader:
data = data.to(device)
pred = model(data.x, data.adj, data.mask)[0]
error += F.mse_loss(pred[:, 0], data.y.view(-1))
if idx == 0 and debug:
print_info('predict = {}'.format(pred[:, 0]))
print_info('y = {}'.format(data.y.view(-1)))
idx += 1
return error / size
if not args.test:
os.makedirs(save_dir, exist_ok = True)
best_val_error = test_error = 10000.0
best_model = None
for epoch in range(1, 151):
t_start = time.time()
train_loss = train(epoch)
train_error = test(train_loader, len(train_loader))
val_error = test(val_loader, len(val_loader))
t_end = time.time()
if val_error < best_val_error:
test_error = test(test_loader, len(test_loader), debug = True)
best_val_error = val_error
best_model = deepcopy(model)
print('Epoch: {:03d}, Epoch Time: {:.1f}s, Train Loss: {:.7f}, Train Error: {:.7f}, Val Error: {:.7f}, Test Error: {:.7f}'\
.format(epoch, t_end - t_start, train_loss, train_error, val_error, test_error))
# save model with fixed interval
if args.save_interval > 0 and epoch % args.save_interval == 0:
save_path = os.path.join(save_dir, 'model_state_dict_{}.pt'.format(epoch))
torch.save(model.state_dict(), save_path)
# save the final model
save_path = os.path.join(save_dir, 'model_state_dict_final.pt')
torch.save(model.state_dict(), save_path)
else:
train_error = test(train_loader, len(train_loader))
val_error = test(val_loader, len(val_loader))
test_error = test(test_loader, len(test_loader), debug = True)
print('Train Error: {:.7f}, Val Error: {:.7f}, Test Error: {:.7f}'\
.format(train_error, val_error, test_error))
|
Chapter03/demoTabWidget.py
|
houdinii/Qt5-Python-GUI-Programming-Cookbook
| 131 |
91087
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'demoTabWidget.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(574, 300)
self.tabWidget = QtWidgets.QTabWidget(Dialog)
self.tabWidget.setGeometry(QtCore.QRect(10, 10, 481, 271))
font = QtGui.QFont()
font.setPointSize(12)
self.tabWidget.setFont(font)
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.checkBox = QtWidgets.QCheckBox(self.tab)
self.checkBox.setGeometry(QtCore.QRect(30, 30, 191, 17))
self.checkBox.setObjectName("checkBox")
self.checkBox_2 = QtWidgets.QCheckBox(self.tab)
self.checkBox_2.setGeometry(QtCore.QRect(30, 70, 141, 17))
self.checkBox_2.setObjectName("checkBox_2")
self.checkBox_3 = QtWidgets.QCheckBox(self.tab)
self.checkBox_3.setGeometry(QtCore.QRect(30, 110, 161, 17))
self.checkBox_3.setObjectName("checkBox_3")
self.checkBox_4 = QtWidgets.QCheckBox(self.tab)
self.checkBox_4.setGeometry(QtCore.QRect(30, 150, 171, 17))
self.checkBox_4.setObjectName("checkBox_4")
self.pushButton = QtWidgets.QPushButton(self.tab)
self.pushButton.setGeometry(QtCore.QRect(40, 190, 141, 23))
self.pushButton.setObjectName("pushButton")
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.radioButton = QtWidgets.QRadioButton(self.tab_2)
self.radioButton.setGeometry(QtCore.QRect(40, 30, 131, 17))
self.radioButton.setObjectName("radioButton")
self.radioButton_2 = QtWidgets.QRadioButton(self.tab_2)
self.radioButton_2.setGeometry(QtCore.QRect(40, 80, 111, 17))
self.radioButton_2.setObjectName("radioButton_2")
self.radioButton_3 = QtWidgets.QRadioButton(self.tab_2)
self.radioButton_3.setGeometry(QtCore.QRect(40, 130, 121, 21))
self.radioButton_3.setObjectName("radioButton_3")
self.radioButton_4 = QtWidgets.QRadioButton(self.tab_2)
self.radioButton_4.setGeometry(QtCore.QRect(40, 180, 181, 21))
self.radioButton_4.setObjectName("radioButton_4")
self.tabWidget.addTab(self.tab_2, "")
self.tab_3 = QtWidgets.QWidget()
self.tab_3.setObjectName("tab_3")
self.lineEdit = QtWidgets.QLineEdit(self.tab_3)
self.lineEdit.setGeometry(QtCore.QRect(160, 10, 291, 20))
self.lineEdit.setObjectName("lineEdit")
self.lineEdit_2 = QtWidgets.QLineEdit(self.tab_3)
self.lineEdit_2.setGeometry(QtCore.QRect(160, 50, 291, 20))
self.lineEdit_2.setObjectName("lineEdit_2")
self.lineEdit_3 = QtWidgets.QLineEdit(self.tab_3)
self.lineEdit_3.setGeometry(QtCore.QRect(160, 90, 291, 20))
self.lineEdit_3.setObjectName("lineEdit_3")
self.lineEdit_4 = QtWidgets.QLineEdit(self.tab_3)
self.lineEdit_4.setGeometry(QtCore.QRect(160, 130, 291, 20))
self.lineEdit_4.setObjectName("lineEdit_4")
self.lineEdit_5 = QtWidgets.QLineEdit(self.tab_3)
self.lineEdit_5.setGeometry(QtCore.QRect(160, 170, 291, 20))
self.lineEdit_5.setObjectName("lineEdit_5")
self.lineEdit_6 = QtWidgets.QLineEdit(self.tab_3)
self.lineEdit_6.setGeometry(QtCore.QRect(160, 210, 291, 20))
self.lineEdit_6.setObjectName("lineEdit_6")
self.label = QtWidgets.QLabel(self.tab_3)
self.label.setGeometry(QtCore.QRect(10, 10, 91, 16))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.tab_3)
self.label_2.setGeometry(QtCore.QRect(10, 50, 71, 16))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.tab_3)
self.label_3.setGeometry(QtCore.QRect(10, 90, 47, 13))
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(self.tab_3)
self.label_4.setGeometry(QtCore.QRect(10, 130, 71, 21))
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(self.tab_3)
self.label_5.setGeometry(QtCore.QRect(10, 170, 71, 16))
self.label_5.setObjectName("label_5")
self.label_6 = QtWidgets.QLabel(self.tab_3)
self.label_6.setGeometry(QtCore.QRect(10, 210, 121, 16))
self.label_6.setObjectName("label_6")
self.tabWidget.addTab(self.tab_3, "")
self.retranslateUi(Dialog)
self.tabWidget.setCurrentIndex(2)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.checkBox.setText(_translate("Dialog", "Cell Phone $150"))
self.checkBox_2.setText(_translate("Dialog", "Laptop $500"))
self.checkBox_3.setText(_translate("Dialog", "Camera $250"))
self.checkBox_4.setText(_translate("Dialog", "Shoes $200"))
self.pushButton.setText(_translate("Dialog", "Add to Cart"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("Dialog", "Products Listing"))
self.radioButton.setText(_translate("Dialog", "Debit Card"))
self.radioButton_2.setText(_translate("Dialog", "Credit Card"))
self.radioButton_3.setText(_translate("Dialog", "Net Banking"))
self.radioButton_4.setText(_translate("Dialog", "Cash On Delivery"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("Dialog", "Payment Method"))
self.label.setText(_translate("Dialog", "Address 1"))
self.label_2.setText(_translate("Dialog", "Address 2"))
self.label_3.setText(_translate("Dialog", "State"))
self.label_4.setText(_translate("Dialog", "Country"))
self.label_5.setText(_translate("Dialog", "Zip Code"))
self.label_6.setText(_translate("Dialog", "Contact Number"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate("Dialog", "Delivery Address"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
bcbio/bam/ref.py
|
a113n/bcbio-nextgen
| 418 |
91126
|
<reponame>a113n/bcbio-nextgen
"""Manipulation functionality to deal with reference files.
"""
import collections
from bcbio import utils
from bcbio.pipeline import config_utils
from bcbio.provenance import do
def fasta_idx(in_file, config=None):
"""Retrieve samtools style fasta index.
"""
fasta_index = in_file + ".fai"
if not utils.file_exists(fasta_index):
samtools = config_utils.get_program("samtools", config) if config else "samtools"
cmd = "{samtools} faidx {in_file}"
do.run(cmd.format(**locals()), "samtools faidx")
return fasta_index
def file_contigs(ref_file, config=None):
"""Iterator of reference contigs and lengths from a reference file.
"""
ContigInfo = collections.namedtuple("ContigInfo", "name size")
with open(fasta_idx(ref_file, config)) as in_handle:
for line in (l for l in in_handle if l.strip()):
name, size = line.split()[:2]
yield ContigInfo(name, int(size))
|
dataviva/apps/embed/views.py
|
joelvisroman/dataviva-site
| 126 |
91136
|
<reponame>joelvisroman/dataviva-site
# -*- coding: utf-8 -*-
import requests
from datetime import datetime
from sqlalchemy import func
from flask import Blueprint, request, render_template, g, Response, make_response, jsonify
from flask.ext.babel import gettext
from dataviva import db, datavivadir, __year_range__, view_cache
from dataviva.api.attrs.models import Bra, Cnae, Hs, Cbo, Wld, University, Course_hedu, Course_sc, Search
from dataviva.apps.general.views import get_locale
from dataviva.apps.data.forms import DownloadForm
from dataviva.apps.user.models import Starred
from dataviva.apps.embed.models import Build, UI, App, Crosswalk_oc, Crosswalk_pi
from dataviva.apps.general.models import Short
from dataviva.utils.gzip_data import gzip_data
from dataviva.utils.cached_query import cached_query
from dataviva.utils.title_format import title_format
import json
import urllib2
import urllib
from config import FACEBOOK_OAUTH_ID, basedir, GZIP_DATA
import os
import zipfile
mod = Blueprint('embed', __name__,
template_folder='templates',
url_prefix='/<lang_code>/embed')
@mod.before_request
def before_request():
g.page_type = mod.name
g.color = "#af1f24"
g.sabrina = {
"outfit": "lab",
"face": "smirk",
"hat": "glasses"
}
@mod.url_defaults
def add_language_code(endpoint, values):
values.setdefault('lang_code', get_locale())
@mod.url_value_preprocessor
def pull_lang_code(endpoint, values):
g.locale = values.pop('lang_code')
def filler(dataset, filter1, filter2):
'''Since the "builds" are held in the database with placeholders for
attributes i.e. <cbo>, <hs>, <cnae> we need to convert the IDs given
in the URL to these placeholders. i.e.
- a0111 = <cnae>
- 010101 = <hs>
- all = all
'''
filler1 = filter1
if filler1 != "all":
if dataset == "rais":
filler1 = "cnae"
elif dataset == "secex":
filler1 = "hs"
elif dataset == "hedu":
filler1 = "university"
filler2 = filter2
if filler2 != "all":
if dataset == "rais":
filler2 = "cbo"
elif dataset == "secex":
filler2 = "wld"
elif dataset == "hedu":
filler2 = "course_hedu"
elif dataset == "sc":
filler2 = "course_sc"
return filler1, filler2
def is_xhr():
return request.is_xhr
@mod.route("/")
@mod.route("/<app_name>/<dataset>/<bra_id>/<filter1>/<filter2>/<output>/")
<EMAIL>(key_prefix=api_cache_key("apps:embed"), unless=is_xhr)
def embed(app_name="tree_map", dataset="rais", bra_id="4mg",
filter1="all", filter2="all", output="cbo"):
prefix = "apps:embed:xhr:"
lang = request.args.get('lang', None) or g.locale
global_vars = {x[0]: x[1] for x in request.args.items()}
imports = False
if "size" in global_vars:
if global_vars["size"] == "import_val":
imports = True
if "y" in global_vars:
if global_vars["y"] == "import_val":
imports = True
if "axes" in global_vars:
if global_vars["axes"] == "import_val":
imports = True
if (g.user is None or not g.user.is_authenticated) and request.is_xhr:
cache_id = prefix + request.path + lang
if imports:
cache_id = cache_id + "imports"
cached_q = cached_query(cache_id)
if cached_q:
ret = make_response(cached_q)
ret.headers['Content-Encoding'] = 'gzip'
ret.headers['Content-Length'] = str(len(ret.data))
return ret
build_filter1, build_filter2 = filler(dataset, filter1, filter2)
'''Grab attrs for bra and filters
'''
if bra_id == "all":
bra_attr = Wld.query.get_or_404("sabra")
else:
bra_attr = [Bra.query.get_or_404(b) for b in bra_id.split("_")]
filter1_attr = filter1
filter2_attr = filter2
if filter1 != "all":
filter1_attr = globals()[build_filter1.capitalize()].query.get_or_404(
filter1)
if filter2 != "all":
filter2_attr = globals()[build_filter2.capitalize()].query.get_or_404(
filter2)
if build_filter1 != "all":
build_filter1 = "<{}>".format(build_filter1)
if build_filter2 != "all":
build_filter2 = "<{}>".format(build_filter2)
'''This is an instance of the Build class for the selected app,
determined by the combination of app_type, dataset, filters and output.
'''
current_app = App.query.filter_by(type=app_name).first_or_404()
current_build = Build.query.filter_by(
app=current_app, dataset=dataset, filter1=build_filter1, filter2=build_filter2, output=output).first_or_404()
current_build.set_filter1(filter1_attr)
current_build.set_filter2(filter2_attr)
current_build.set_bra(bra_attr)
'''Every possible build, required by the embed page for building the build
dropdown.
'''
# all_builds = Build.query.all()
# all_builds.sort(key=lambda x: x.dataset)
# for build in all_builds:
# build.set_filter1(filter1_attr)
# build.set_filter2(filter2_attr)
# build.set_bra(bra_attr)
'''Get URL query parameters from reqest.args object to return to the view.
'''
if "controls" not in global_vars:
global_vars["controls"] = "true"
'''If user is logged in see if they have starred this app.'''
starred = 0
app_id = "/".join([app_name, dataset, bra_id, filter1, filter2, output])
if g.user and g.user.is_authenticated:
is_starred = Starred.query.filter_by(
user=g.user, app_id=app_id).first()
starred = 1 if is_starred else -1
if imports:
current_build.set_import()
if request.is_xhr:
ret = jsonify({
"current_build": current_build.serialize(),
# "all_builds": [b.json() for b in all_builds],
"starred": starred
})
ret.data = gzip_data(ret.data)
ret.headers['Content-Encoding'] = 'gzip'
ret.headers['Content-Length'] = str(len(ret.data))
if starred == 0 and cached_q is None:
cached_query(cache_id, ret.data)
else:
current_build.set_import()
year_range_dict = __year_range__.copy()
if current_build.app.type in ['network', 'rings', 'scatter']:
year_range_dict["secex"] = ["2000-1", "2017-12"]
year_range = json.dumps(year_range_dict)
ret = make_response(render_template("embed/embed.html",
# apps = App.query.all(),
# all_builds = all_builds,
starred=starred,
form=DownloadForm(),
current_build=current_build,
global_vars=json.dumps(
global_vars),
facebook_id=FACEBOOK_OAUTH_ID,
year_range=year_range))
ret.data = gzip_data(ret.data)
ret.headers['Content-Encoding'] = 'gzip'
ret.headers['Content-Length'] = str(len(ret.data))
ret.headers.add('Last-Modified', datetime.now())
ret.headers.add(
'Cache-Control', 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0')
ret.headers.add('Pragma', 'no-cache')
return ret
@mod.route('/star/<app_name>/<data_type>/<bra_id>/<filter1>/<filter2>/<output>/', methods=['GET', 'POST'])
def app_star(app_name, data_type, bra_id, filter1, filter2, output):
app_id = "/".join([app_name, data_type, bra_id, filter1, filter2, output])
# if request.method == 'POST' and request.remote_addr == SITE_MIRROR.split(":")[1][2:]:
# g.user = User.query.get(request.form["user"])
if g.user is None or not g.user.is_authenticated:
return jsonify({"error": gettext("You need to be logged in to star visualizations.")})
starred = Starred.query.filter_by(user=g.user, app_id=app_id).first()
if request.method == 'POST':
# if "user" not in request.form:
# form_json = {"user": g.user.id, "title": request.form['title'].encode('utf-8')}
# try:
# opener = urllib2.urlopen("{0}{1}".format(SITE_MIRROR,request.path[1:]),urllib.urlencode(form_json),5)
# except:
# return jsonify({"error": gettext("The server is not responding.
# Please try again later.")})
if starred:
db.session.delete(starred)
db.session.commit()
return jsonify({"success": -1})
else:
app_name = request.form['title'].encode('utf-8')
timestamp = datetime.utcnow()
new_star = Starred(
user=g.user, app_id=app_id, app_name=app_name, timestamp=timestamp)
db.session.add(new_star)
db.session.commit()
return jsonify({"success": 1})
if starred:
return jsonify({"success": 1})
else:
return jsonify({"success": -1})
def get_builds(bra_attr, dataset, profile1, filter1, profile2, filter2, kwargs):
builds = Build.query.filter_by(
dataset=dataset, filter1=profile1, filter2=profile2).all()
build_list = []
for b in builds:
# -- when looking at all Brazil, skip Occugrid/Rings
if bra_attr and ((b.output == 'bra' and len(bra_attr.id) == 9) or (bra_attr.id == "sabra" and b.id in [48, 51])):
continue
if bra_attr:
b.set_bra(bra_attr)
if filter1 != 'all':
b.set_filter1(filter1)
if filter2 != 'all':
b.set_filter2(filter2)
build_list.append(b.json(**kwargs))
return build_list
@mod.route('/recommend/', methods=['GET', 'POST'])
@mod.route('/recommend/<app_name>/<dataset>/<bra_id>/<filter1>/<filter2>/<output>/', methods=['GET', 'POST'])
<EMAIL>(key_prefix=api_cache_key("apps:recommend"))
def recommend(app_name=None, dataset=None, bra_id="4mg", filter1=None, filter2=None, output=None):
recommended = {}
build_filter1, build_filter2 = filler(dataset, filter1, filter2)
'''Grab attrs for bra and filters
'''
bra_all = [Wld.query.get_or_404("sabra")]
if bra_id == "all":
bra_attr = bra_all
else:
bra_attr = [Bra.query.get_or_404(b) for b in bra_id.split("_")]
filter1_attr = filter1
filter2_attr = filter2
profile = False
if filter1 != "all":
filter1_attr = globals()[build_filter1.capitalize()].query.get_or_404(
filter1)
if output == build_filter1:
profile = filter1_attr
recommended["crosswalk"] = crosswalk_recs(
dataset, build_filter1, filter1)
if filter2 != "all":
filter2_attr = globals()[build_filter2.capitalize()].query.get_or_404(
filter2)
if output == build_filter2:
profile = filter2_attr
recommended["crosswalk"] = crosswalk_recs(
dataset, build_filter2, filter2)
if profile == False and output == "bra":
profile = bra_attr[0]
if profile and output != "school":
if g.locale == "pt":
title = u"Perfil <{0}_para> <{0}>".format(output)
else:
title = u"Profile for <{0}>".format(output)
recommended["profile"] = {
"title": title_format(title, profile),
"url": profile.url()
}
if build_filter1 != "all":
build_filter1 = "<{}>".format(build_filter1)
if build_filter2 != "all":
build_filter2 = "<{}>".format(build_filter2)
kwargs = {k: v for k, v in request.args.items()}
if app_name == "geo_map" and len(bra_id) < 9:
custom = Build.query.filter_by(
app_id=3, dataset=dataset, filter1=build_filter1, filter2=build_filter2, output=output).first()
custom.set_bra(bra_attr)
custom.set_filter1(filter1_attr)
custom.set_filter2(filter2_attr)
recommended["custom"] = custom.json(**kwargs)
for bra in bra_attr:
recommended['builds'] = get_builds(
bra, dataset, build_filter1, filter1_attr, build_filter2, filter2_attr, kwargs)
if bra_id != "all" and output != "bra":
recommended['builds'] += get_builds(
bra_all[0], dataset, build_filter1, filter1_attr, build_filter2, filter2_attr, kwargs)
return jsonify(recommended)
def get_geo_location(ip):
req = urllib2.Request("http://freegeoip.net/json/" + ip)
opener = urllib2.build_opener()
try:
f = opener.open(req)
except:
return None
json_resp = json.loads(f.read())
city = json_resp["city"]
# city = "Viana"
state = json_resp["region_name"]
# state = "Espírito Santo"
# state = "Maranhão"
# first try to find the exact city within the state
bra_state = Bra.query.filter_by(name_pt=state).filter(
func.char_length(Bra.id) == 3).first()
bra_cities = Bra.query.filter_by(name_pt=city).filter(
func.char_length(Bra.id) == 9)
if bra_state:
if bra_cities.count() == 1:
return bra_cities.first()
elif bra_cities.count() > 1:
return bra_cities.filter(Bra.id.like(bra_state.id+'%')).first()
return None
return None
@mod.route('/download/', methods=['GET', 'POST'])
def download():
import tempfile
import subprocess
import random
import base64
form = DownloadForm()
data = form.data.data
format = form.output_format.data
title = form.title.data
downloadToken = form.downloadToken.data
max_length = 250 - (len(downloadToken) + 1)
title_safe = title[:max_length]
filenameDownload = title_safe + "-" + downloadToken
if format == "png":
mimetype = 'image/png'
elif format == "pdf":
mimetype = 'application/pdf'
elif format == "svg":
mimetype = 'application/octet-stream'
elif format == "csv":
mimetype = "text/csv;charset=UTF-16"
elif format == "url2csv":
mimetype = "text/csv;charset=UTF-16"
response_data = data.encode("utf-16")
content_disposition = "attachment;filename=%s.%s" % (title_safe, format)
content_disposition = content_disposition.replace(",", "_")
download_file = make_response(Response(response_data,
mimetype=mimetype,
headers={"Content-Disposition": content_disposition}))
with open(os.path.join(basedir, "dataviva/static/downloads/" + title_safe + "." + format), "wb") as fo:
fo.write(response_data)
zf = zipfile.ZipFile(os.path.join(
basedir, "dataviva/static/downloads/" + filenameDownload + ".zip"), mode='w')
try:
zf.write(os.path.join(basedir, "dataviva/static/downloads/" +
title_safe + "." + format), title_safe + "." + format)
finally:
zf.close()
os.remove(os.path.join(basedir, "dataviva/static/downloads/" + title_safe + "." + format))
return "/static/downloads/" + filenameDownload + ".zip"
@mod.route('/info/<app_name>/')
def info(app_name="tree_map"):
return render_template("embed/info.html", app_name=app_name)
@mod.route('/coords/<id>/')
def coords(id="all"):
if GZIP_DATA:
fileext = ".gz"
filetype = "gzip"
else:
fileext = ""
filetype = "json"
if id == "all":
file_name = "bra_states.json"+fileext
else:
file_name = ("{0}_munic.json"+fileext).format(id)
cached_q = cached_query(file_name)
if cached_q:
ret = make_response(cached_q)
else:
path = datavivadir+"/static/json/coords/{0}".format(file_name)
gzip_file = open(path).read()
cached_query(file_name, gzip_file)
ret = make_response(gzip_file)
ret.headers['Content-Encoding'] = filetype
ret.headers['Content-Length'] = str(len(ret.data))
return ret
@mod.route('/networks/<type>/')
def networks(type="hs"):
if GZIP_DATA:
fileext = ".gz"
filetype = "gzip"
else:
fileext = ""
filetype = "json"
file_name = ("network_{0}.json"+fileext).format(type)
cached_q = cached_query(file_name)
if cached_q:
ret = make_response(cached_q)
else:
path = datavivadir+"/static/json/networks/{0}".format(file_name)
gzip_file = open(path).read()
cached_query(file_name, gzip_file)
ret = make_response(gzip_file)
ret.headers['Content-Encoding'] = filetype
ret.headers['Content-Length'] = str(len(ret.data))
return ret
@mod.route('/shorten/', methods=['GET', 'POST'])
def shorten_url():
if request.method == 'POST':
response = request.form['url'] if 'url' in request.form else request.json['url']
long_url = urllib.unquote(response.encode('utf-8')).decode('utf-8')
short = Short.query.filter_by(long_url=long_url).first()
if short is None:
slug = Short.make_unique_slug(long_url)
short = Short(slug=slug, long_url=long_url)
db.session.add(short)
db.session.commit()
return jsonify({"slug": short.slug})
return jsonify({"error": "No URL given."})
def crosswalk_recs(dataset, filter, id):
crosswalk = []
attr_swap = {"hs": "cnae", "cnae": "hs",
"cbo": "course_hedu", "course_hedu": "cbo"}
crosswalk_table = {
"hs": "pi", "cnae": "pi", "cbo": "oc", "course_hedu": "oc"}
if filter in attr_swap and id != "all":
table = globals()["Crosswalk_{}".format(crosswalk_table[filter])]
col = getattr(table, "{}_id".format(filter))
results = table.query.filter(col == id)
ids = [row.get_id(dataset) for row in results]
if ids:
ids = Search.query.filter(Search.id.in_(ids)).filter(
Search.kind == attr_swap[filter]).all()
ids = [a.id for a in ids]
table = globals()[attr_swap[filter].capitalize()]
attrs = table.query.filter(table.id.in_(ids)).all()
crosswalk = [
{"title": a.name(), "url": a.url(), "type": attr_swap[filter]} for a in attrs]
return crosswalk
@mod.route('/image', methods=['GET'])
def image():
url = request.args.get('link');
code = requests.get(url).status_code;
return Response(str(code), status=200)
|
nsff_scripts/run_midas.py
|
frankhome61/nsff
| 330 |
91140
|
<filename>nsff_scripts/run_midas.py
"""
Compute depth maps for images in the input folder.
"""
import os
import glob
import torch
import cv2
import numpy as np
from torchvision.transforms import Compose
from models.midas_net import MidasNet
from models.transforms import Resize, NormalizeImage, PrepareForNet
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
VIZ = True
def read_image(path):
"""Read image and output RGB image (0-1).
Args:
path (str): path to file
Returns:
array: RGB image (0-1)
"""
img = cv2.imread(path)
if img.ndim == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
return img
def _minify(basedir, factors=[], resolutions=[]):
'''
Minify the images to small resolution for training
'''
needtoload = False
for r in factors:
imgdir = os.path.join(basedir, 'images_{}'.format(r))
if not os.path.exists(imgdir):
needtoload = True
for r in resolutions:
imgdir = os.path.join(basedir, 'images_{}x{}'.format(r[1], r[0]))
if not os.path.exists(imgdir):
needtoload = True
if not needtoload:
return
from shutil import copy
from subprocess import check_output
import glob
imgdir = os.path.join(basedir, 'images')
imgs = [os.path.join(imgdir, f) for f in sorted(os.listdir(imgdir))]
imgs = [f for f in imgs if any([f.endswith(ex) for ex in ['JPG', 'jpg', 'png', 'jpeg', 'PNG']])]
imgdir_orig = imgdir
wd = os.getcwd()
for r in factors + resolutions:
if isinstance(r, int):
name = 'images_{}'.format(r)
resizearg = '{}%'.format(100./r)
else:
name = 'images_{}x{}'.format(r[1], r[0])
resizearg = '{}x{}'.format(r[1], r[0])
imgdir = os.path.join(basedir, name)
if os.path.exists(imgdir):
continue
print('Minifying', r, basedir)
os.makedirs(imgdir)
check_output('cp {}/* {}'.format(imgdir_orig, imgdir), shell=True)
ext = imgs[0].split('.')[-1]
print(ext)
# sys.exit()
img_path_list = glob.glob(os.path.join(imgdir, '*.%s'%ext))
for img_path in img_path_list:
save_path = img_path.replace('.jpg', '.png')
img = cv2.imread(img_path)
print(img.shape, r)
cv2.imwrite(save_path,
cv2.resize(img,
(r[1], r[0]),
interpolation=cv2.INTER_AREA))
if ext != 'png':
check_output('rm {}/*.{}'.format(imgdir, ext), shell=True)
print('Removed duplicates')
print('Done')
to8b = lambda x : (255*np.clip(x,0,1)).astype(np.uint8)
import imageio
def run(basedir,
input_path,
output_path,
model_path,
resize_height=288):
"""Run MonoDepthNN to compute depth maps.
Args:
input_path (str): path to input folder
output_path (str): path to output folder
model_path (str): path to saved model
"""
print("initialize")
img0 = [os.path.join(basedir, 'images', f) \
for f in sorted(os.listdir(os.path.join(basedir, 'images'))) \
if f.endswith('JPG') or f.endswith('jpg') or f.endswith('png')][0]
sh = cv2.imread(img0).shape
height = resize_height
factor = sh[0] / float(height)
width = int(round(sh[1] / factor))
_minify(basedir, resolutions=[[height, width]])
# select device
device = torch.device("cuda")
print("device: %s" % device)
small_img_dir = input_path + '_*x' + str(resize_height) + '/'
print(small_img_dir)
small_img_path = sorted(glob.glob(glob.glob(small_img_dir)[0] + '/*.png'))[0]
small_img = cv2.imread(small_img_path)
print('small_img', small_img.shape)
# Portrait Orientation
if small_img.shape[0] > small_img.shape[1]:
input_h = 640
input_w = int(round( float(input_h) / small_img.shape[0] * small_img.shape[1]))
# Landscape Orientation
else:
input_w = 640
input_h = int(round( float(input_w) / small_img.shape[1] * small_img.shape[0]))
print('Monocular depth input_w %d input_h %d '%(input_w, input_h))
# load network
model = MidasNet(model_path, non_negative=True)
transform_1 = Compose(
[
Resize(
input_w,
input_h,
resize_target=None,
keep_aspect_ratio=True,
ensure_multiple_of=32,
resize_method="upper_bound",
image_interpolation_method=cv2.INTER_AREA,
),
NormalizeImage(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
PrepareForNet(),
]
)
model.to(device)
model.eval()
# get input
img_names = sorted(glob.glob(os.path.join(input_path, "*")))
num_images = len(img_names)
# create output folder
os.makedirs(output_path, exist_ok=True)
print("start processing")
for ind in range(len(img_names)):
img_name = img_names[ind]
print(" processing {} ({}/{})".format(img_name, ind + 1, num_images))
# input
img = read_image(img_name)
img_input_1 = transform_1({"image": img})["image"]
# compute
with torch.no_grad():
sample_1 = torch.from_numpy(img_input_1).to(device).unsqueeze(0)
prediction = model.forward(sample_1)
prediction = (
torch.nn.functional.interpolate(
prediction.unsqueeze(1),
size=[small_img.shape[0],
small_img.shape[1]],
mode="nearest",
)
.squeeze()
.cpu()
.numpy()
)
# output
filename = os.path.join(
output_path, os.path.splitext(os.path.basename(img_name))[0]
)
if VIZ:
if not os.path.exists('./midas_otuputs'):
os.makedirs('./midas_otuputs')
plt.figure(figsize=(12, 6))
plt.subplot(1,2,1)
plt.imshow(img)
plt.subplot(1,2,2)
plt.imshow(prediction, cmap='jet')
plt.savefig('./midas_otuputs/%s'%(img_name.split('/')[-1]))
plt.close()
print(filename + '.npy')
np.save(filename + '.npy', prediction.astype(np.float32))
print("finished")
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--data_path", type=str,
help='COLMAP Directory')
# parser.add_argument("--input_w", type=int, default=640,
# help='input image width for monocular depth network')
# parser.add_argument("--input_h", type=int, default=360,
# help='input image height for monocular depth network')
parser.add_argument("--resize_height", type=int, default=288,
help='resized image height for training \
(width will be resized based on original aspect ratio)')
args = parser.parse_args()
BASE_DIR = args.data_path
INPUT_PATH = BASE_DIR + "/images"
OUTPUT_PATH = BASE_DIR + "/disp"
MODEL_PATH = "model.pt"
if not os.path.exists(OUTPUT_PATH):
os.makedirs(OUTPUT_PATH)
# set torch options
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
# compute depth maps
run(BASE_DIR, INPUT_PATH,
OUTPUT_PATH, MODEL_PATH,
args.resize_height)
|
angr/exploration_techniques/dfs.py
|
Kyle-Kyle/angr
| 6,132 |
91144
|
<gh_stars>1000+
from . import ExplorationTechnique
import random
class DFS(ExplorationTechnique):
"""
Depth-first search.
Will only keep one path active at a time, any others will be stashed in the 'deferred' stash.
When we run out of active paths to step, we take the longest one from deferred and continue.
"""
def __init__(self, deferred_stash='deferred'):
super(DFS, self).__init__()
self._random = random.Random()
self._random.seed(10)
self.deferred_stash = deferred_stash
def setup(self, simgr):
if self.deferred_stash not in simgr.stashes:
simgr.stashes[self.deferred_stash] = []
def step(self, simgr, stash='active', **kwargs):
simgr = simgr.step(stash=stash, **kwargs)
if len(simgr.stashes[stash]) > 1:
self._random.shuffle(simgr.stashes[stash])
simgr.split(from_stash=stash, to_stash=self.deferred_stash, limit=1)
if len(simgr.stashes[stash]) == 0:
if len(simgr.stashes[self.deferred_stash]) == 0:
return simgr
simgr.stashes[stash].append(simgr.stashes[self.deferred_stash].pop())
return simgr
|
app/tests/uploads_tests/test_models.py
|
kaczmarj/grand-challenge.org
| 101 |
91146
|
import pytest
from django.conf import settings
from requests import put
from grandchallenge.uploads.models import UserUpload
from tests.algorithms_tests.factories import AlgorithmImageFactory
from tests.factories import UserFactory
from tests.verification_tests.factories import VerificationFactory
@pytest.mark.django_db
def test_user_upload_flow():
# Create User Upload
u = UserFactory()
filename = "foo.bat"
# Create User Upload File
upload = UserUpload.objects.create(creator=u, filename=filename)
assert upload.status == UserUpload.StatusChoices.INITIALIZED
assert upload.s3_upload_id != ""
# Get the presigned url
presigned_url = upload.generate_presigned_url(part_number=0)
assert presigned_url != ""
# PUT the file
response = put(presigned_url, data=b"123")
assert response.status_code == 200
assert response.headers["ETag"] != ""
# Finish the upload
upload.complete_multipart_upload(
parts=[{"ETag": response.headers["ETag"], "PartNumber": 0}]
)
assert upload.status == UserUpload.StatusChoices.COMPLETED
def test_create_multipart_upload():
user = UserFactory.build()
upload = UserUpload(creator=user)
assert upload.s3_upload_id == ""
assert upload.status == UserUpload.StatusChoices.PENDING
upload.create_multipart_upload()
assert upload.s3_upload_id != ""
assert upload.status == UserUpload.StatusChoices.INITIALIZED
assert upload.key == f"uploads/{user.pk}/{upload.pk}"
def test_generate_presigned_urls():
upload = UserUpload(creator=UserFactory.build())
upload.create_multipart_upload()
presigned_urls = upload.generate_presigned_urls(part_numbers=[1, 13, 26])
assert set(presigned_urls.keys()) == {"1", "13", "26"}
assert presigned_urls["1"].startswith(
f"{settings.AWS_S3_ENDPOINT_URL}/{upload.bucket}/{upload.key}?uploadId={upload.s3_upload_id}&partNumber=1&"
)
assert presigned_urls["13"].startswith(
f"{settings.AWS_S3_ENDPOINT_URL}/{upload.bucket}/{upload.key}?uploadId={upload.s3_upload_id}&partNumber=13&"
)
assert presigned_urls["26"].startswith(
f"{settings.AWS_S3_ENDPOINT_URL}/{upload.bucket}/{upload.key}?uploadId={upload.s3_upload_id}&partNumber=26&"
)
def test_abort_multipart_upload():
upload = UserUpload(creator=UserFactory.build())
upload.create_multipart_upload()
assert upload.status == UserUpload.StatusChoices.INITIALIZED
assert upload.s3_upload_id != ""
upload.abort_multipart_upload()
assert upload.status == UserUpload.StatusChoices.ABORTED
assert upload.s3_upload_id == ""
def test_list_parts():
upload = UserUpload(creator=UserFactory.build())
upload.create_multipart_upload()
url = upload.generate_presigned_url(part_number=1)
response = put(url, data=b"123")
parts = upload.list_parts()
assert len(parts) == 1
assert parts[0]["ETag"] == response.headers["ETag"]
assert parts[0]["Size"] == 3
assert parts[0]["PartNumber"] == 1
def test_list_parts_empty():
upload = UserUpload(creator=UserFactory.build())
upload.create_multipart_upload()
parts = upload.list_parts()
assert parts == []
def test_list_parts_truncation():
upload = UserUpload(creator=UserFactory.build())
upload.create_multipart_upload()
presigned_urls = upload.generate_presigned_urls(part_numbers=[1, 2])
responses = {}
for part_number, url in presigned_urls.items():
responses[part_number] = put(url, data=b"123")
upload.LIST_MAX_ITEMS = 1
parts = upload.list_parts()
assert len(parts) == 2
assert parts[0]["ETag"] == responses["1"].headers["ETag"]
assert parts[0]["Size"] == 3
assert parts[0]["PartNumber"] == 1
assert parts[1]["ETag"] == responses["2"].headers["ETag"]
assert parts[1]["Size"] == 3
assert parts[1]["PartNumber"] == 2
@pytest.mark.django_db
def test_upload_copy():
user = UserFactory()
upload = UserUpload.objects.create(creator=user, filename="test.tar.gz")
presigned_urls = upload.generate_presigned_urls(part_numbers=[1])
response = put(presigned_urls["1"], data=b"123")
upload.complete_multipart_upload(
parts=[{"ETag": response.headers["ETag"], "PartNumber": 1}]
)
upload.save()
ai = AlgorithmImageFactory(creator=user, image=None)
assert not ai.image
upload.copy_object(to_field=ai.image)
assert (
ai.image.name
== f"docker/images/algorithms/algorithmimage/{ai.pk}/test.tar.gz"
)
assert ai.image.storage.exists(name=ai.image.name)
with ai.image.open() as f:
assert f.read() == b"123"
@pytest.mark.django_db
def test_file_deleted_with_object():
u = UserFactory()
upload = UserUpload.objects.create(creator=u)
presigned_urls = upload.generate_presigned_urls(part_numbers=[1])
response = put(presigned_urls["1"], data=b"123")
upload.complete_multipart_upload(
parts=[{"ETag": response.headers["ETag"], "PartNumber": 1}]
)
upload.save()
bucket = upload.bucket
key = upload.key
assert upload._client.head_object(Bucket=bucket, Key=key)
UserUpload.objects.filter(pk=upload.pk).delete()
with pytest.raises(upload._client.exceptions.ClientError):
upload._client.head_object(Bucket=bucket, Key=key)
@pytest.mark.django_db
def test_incomplete_deleted_with_object():
u = UserFactory()
upload = UserUpload.objects.create(creator=u)
bucket = upload.bucket
key = upload.key
assert "Uploads" in upload._client.list_multipart_uploads(
Bucket=bucket, Prefix=key
)
UserUpload.objects.filter(pk=upload.pk).delete()
assert "Uploads" not in upload._client.list_multipart_uploads(
Bucket=bucket, Prefix=key
)
def test_size_of_creators_completed_uploads():
def upload_files_for_user(user, n=1):
for _ in range(n):
ul = UserUpload(creator=user)
ul.create_multipart_upload()
presigned_urls = ul.generate_presigned_urls(part_numbers=[1])
response = put(presigned_urls["1"], data=b"123")
ul.complete_multipart_upload(
parts=[{"ETag": response.headers["ETag"], "PartNumber": 1}]
)
u = UserFactory.build(pk=42)
upload = UserUpload(creator=u)
upload.LIST_MAX_ITEMS = 1
initial_upload_size = upload.size_of_creators_completed_uploads
assert type(initial_upload_size) == int
upload_files_for_user(user=u, n=upload.LIST_MAX_ITEMS + 1)
# another users files should not be considered
upload_files_for_user(user=UserFactory.build(pk=u.pk + 1))
assert (
upload.size_of_creators_completed_uploads
== initial_upload_size + (upload.LIST_MAX_ITEMS + 1) * 3
)
def test_size_incomplete():
u = UserFactory.build(pk=42)
upload = UserUpload(creator=u)
upload.create_multipart_upload()
upload.LIST_MAX_ITEMS = 1
assert upload.size == 0
parts = [1, 2]
presigned_urls = upload.generate_presigned_urls(part_numbers=parts)
for part in parts:
put(presigned_urls[str(part)], data=b"123")
assert upload.size == (upload.LIST_MAX_ITEMS + 1) * 3
def test_size_complete():
u = UserFactory.build(pk=42)
upload = UserUpload(creator=u)
upload.create_multipart_upload()
assert upload.size == 0
presigned_urls = upload.generate_presigned_urls(part_numbers=[1])
response = put(presigned_urls["1"], data=b"123")
upload.complete_multipart_upload(
parts=[{"ETag": response.headers["ETag"], "PartNumber": 1}]
)
assert upload.size == 3
@pytest.mark.django_db
def test_can_upload_more_unverified(settings):
upload = UserUpload.objects.create(creator=UserFactory())
presigned_urls = upload.generate_presigned_urls(part_numbers=[1])
put(presigned_urls["1"], data=b"123")
assert upload.can_upload_more is True
settings.UPLOADS_MAX_SIZE_UNVERIFIED = 2
assert upload.can_upload_more is False
@pytest.mark.django_db
def test_can_upload_more_verified(settings):
user = UserFactory()
upload = UserUpload.objects.create(creator=user)
presigned_urls = upload.generate_presigned_urls(part_numbers=[1])
put(presigned_urls["1"], data=b"123")
settings.UPLOADS_MAX_SIZE_UNVERIFIED = 2
assert upload.can_upload_more is False
VerificationFactory(user=user, is_verified=True)
assert upload.can_upload_more is True
settings.UPLOADS_MAX_SIZE_VERIFIED = 2
assert upload.can_upload_more is False
@pytest.mark.django_db
def test_can_upload_more_other_objects(settings):
user = UserFactory()
new_upload = UserUpload.objects.create(creator=user)
settings.UPLOADS_MAX_SIZE_UNVERIFIED = 2
assert new_upload.can_upload_more is True
upload = UserUpload.objects.create(creator=user)
presigned_urls = upload.generate_presigned_urls(part_numbers=[1])
response = put(presigned_urls["1"], data=b"123")
upload.complete_multipart_upload(
parts=[{"ETag": response.headers["ETag"], "PartNumber": 1}]
)
assert upload.can_upload_more is False
assert new_upload.can_upload_more is False
|
analysis/flight_data/generate_wind_db_from_flight_log.py
|
leozz37/makani
| 1,178 |
91159
|
#!/usr/bin/python
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Produces a wind database for simulation from a flight log."""
import contextlib
import ctypes
import os
import sys
import gflags
import h5py
from makani.config import mconfig
from makani.control import system_types
from makani.lib.python import h5log_loader
from makani.sim.physics import wind_frame
import numpy as np
from scipy import signal
gflags.DEFINE_string('output_file', None,
'Output wind database.')
gflags.DEFINE_enum('log_type', 'wing', ('wing', 'cc', 'weather'),
'Type of log (wing, command center, or weather station)'
' being read.')
gflags.DEFINE_enum('wind_source', 'est', ('est', 'ws'),
'Source of wind data:'
'the controller estimate or raw weather station.')
gflags.DEFINE_float('cutoff_freq', None,
'If specified, wind data will be low-pass filtered with '
'this cutoff frequency [Hz].')
FLAGS = gflags.FLAGS
def ExtractLogData(log_file_names, log_type, wind_source):
"""Extracts data from log files.
Args:
log_file_names: Names of HDF5 log files.
log_type: 'wing', 'cc', or 'weather'.
wind_source: 'est' or 'ws'
Returns:
(wind_vel_ws, duration):
wind_vel_ws: [:, 3] array of wind velocity measurements [m/s] in the wind
sensor frame.
duration: Length of time [s] spanned by wind_vel_ws.
Raises:
ValueError: If log_type is invalid.
"""
log = h5log_loader.H5LogLoader()
log.Open(log_file_names)
if log_type == 'weather':
telemetry_path = 'PlatformSensorsA/GroundStationWeather'
elif log_type == 'wing':
telemetry_path = 'ControllerA/ControlDebug'
elif log_type == 'cc':
telemetry_path = 'ControllerA/ControlTelemetry'
else:
raise ValueError('Invalid value of --log_type: %s.' % FLAGS.log_type)
t = log.capture_time[telemetry_path]
if wind_source == 'ws':
if log_type == 'weather':
wind_vel = log[telemetry_path + '/wind/wind_velocity'].astype(np.float64)
elif log_type == 'wing' or log_type == 'cc':
# Each wind_ws point is stored as a Vec3. Use a view to reinterpret it as
# an array.
wind_vel = log[telemetry_path + '/control_input/wind_ws'].view(
np.dtype(('>f8', 3)))
elif wind_source == 'est':
if log_type == 'weather':
raise ValueError('Cannot use estimator wind_g from weather log.')
elif log_type == 'wing' or log_type == 'cc':
# Each wind_g point is stored as a Vec3. Use a view to reinterpret it as
# an array.
wind_vel = log[telemetry_path + '/state_est/wind_g/vector'].view(
np.dtype(('>f8', 3)))
else:
raise ValueError('Invalid value of --wind_source: %s.' % wind_source)
log.Close()
return wind_vel, t[-1] - t[0]
def CalcWindVelMwAndMeanWind(wind_vel, wind_source):
"""Converts wind to the mean wind frame, and provides mean speed/direction.
Args:
wind_vel: Wind velocity [m/s] in the wind sensor frame or the ground frame,
depending on wind_source (ws and est, respectively).
wind_source: 'est' or 'ws'
Returns:
(wind_vel_mw, mean_wind_speed, mean_wind_dir), defined as:
wind_vel_mw: Wind velocity [m/s] in the mean wind frame.
mean_wind_speed: Mean wind speed [m/s] in the horizontal plane.
mean_wind_dir: Direction [rad] of the mean wind.
Raises:
ValueError: The specified wind source was invalid.
"""
# Transform from weather station to ground coordinates, if needed
if wind_source == 'ws':
system_params = mconfig.MakeParams('m600.system_params')
assert (system_params['gs_model'] ==
system_types.kGroundStationModelTopHat), (
'This script assumes a top hat configuration.')
dcm_ws2g = np.array(system_params['wind_sensor']['dcm_parent2ws']['d']).T
wind_vel_g = np.dot(dcm_ws2g, wind_vel.T).T
elif wind_source == 'est':
wind_vel_g = wind_vel
else:
raise ValueError('Invalid value of --wind_source: %s.' % wind_source)
# Calculate mean wind xy-speed and direction.
mean_wind_g = np.mean(wind_vel_g, axis=0)
mean_wind_speed = np.hypot(mean_wind_g[0], mean_wind_g[1])
mean_wind_dir = np.arctan2(-mean_wind_g[1], -mean_wind_g[0])
# Rotate wind velocity to the mean wind frame.
dcm_mw2g = wind_frame.Mat3()
wind_frame.CalcDcmMwToG(mean_wind_dir, ctypes.byref(dcm_mw2g))
dcm_mw2g = np.array([row[:] for row in dcm_mw2g.d])
wind_vel_mw = np.dot(dcm_mw2g.T, wind_vel_g.T).T
return wind_vel_mw, mean_wind_speed, mean_wind_dir
def main(argv):
def PrintUsageAndExit():
print 'Usage: %s <log_1.h5> [log_2.h5] [log_3.h5] ...\n%s' % (
os.path.basename(argv[0]), FLAGS)
sys.exit(1)
# Parse flags and validate inputs.
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '\nError: %s\n\n' % e
PrintUsageAndExit()
if len(argv) < 2:
PrintUsageAndExit()
log_files = argv[1:]
wind_vel, duration = ExtractLogData(log_files, FLAGS.log_type,
FLAGS.wind_source)
wind_vel_mw, mean_wind_speed, mean_wind_dir = CalcWindVelMwAndMeanWind(
wind_vel, FLAGS.wind_source)
nt = wind_vel_mw.shape[0]
# Filter measured wind if requested.
if FLAGS.cutoff_freq is not None:
nyquist_freq = float(nt) / duration / 2.0
nyquist_frac = FLAGS.cutoff_freq / nyquist_freq
assert 0.0 < FLAGS.cutoff_freq <= 1.0
b, a = signal.butter(1, nyquist_frac, 'lowpass')
for i in range(3):
wind_vel_mw[:, i] = signal.lfilter(b, a, wind_vel_mw[:, i])
# Each timepoint in the wind database defines a wind field in the mean wind
# yz-plane (vw-plane) that advects downwind. This field can be inhomogeneous
# in general.
#
# However, we only have one reading at the wind sensor to work with.
# Consequently, we make this field constant at each timepoint in the simplest
# way possible - by using a 2x2 grid that fills a "large" region.
ny = 2
nz = 2
width = 5000.0
height = 1000.0
with contextlib.closing(h5py.File(FLAGS.output_file, 'w')) as f:
f.create_dataset('num_t', data=np.array([nt]), dtype='<i4')
f.create_dataset('num_y', data=np.array([ny]), dtype='<i4')
f.create_dataset('num_z', data=np.array([nz]), dtype='<i4')
f.create_dataset('mean_wind_speed', data=np.array([mean_wind_speed]))
f.create_dataset('mean_wind_direction', data=np.array([mean_wind_dir]))
f.create_dataset('duration', data=np.array([duration]))
f.create_dataset('width', data=np.array([width]))
f.create_dataset('height', data=np.array([height]))
# The velocity field is uniform over y and z for each point in time.
# Each dataset corresponds to an array over (t, y, z) in row-major
# order, so each data point should be repeated ny*nz times.
f.create_dataset('u', data=np.repeat(wind_vel_mw[:, 0], ny * nz),
compression='gzip', compression_opts=5)
f.create_dataset('v', data=np.repeat(wind_vel_mw[:, 1], ny * nz),
compression='gzip', compression_opts=5)
f.create_dataset('w', data=np.repeat(wind_vel_mw[:, 2], ny * nz),
compression='gzip', compression_opts=5)
if __name__ == '__main__':
gflags.MarkFlagAsRequired('output_file')
main(sys.argv)
|
test/util.py
|
pyrige/greedypacker
| 108 |
91202
|
import sys
import contextlib
@contextlib.contextmanager
def stdout_redirect(stringIO):
sys.stdout = stringIO
try:
yield stringIO
finally:
sys.stdout = sys.__stdout__
stringIO.seek(0)
|
snape/test/test_make_image_dataset.py
|
AhmadKSA/snape
| 182 |
91259
|
<reponame>AhmadKSA/snape
import shutil
from snape.make_image_dataset import *
from snape.make_image_dataset import _ImageNet, _ImageGrabber
from snape.utils import get_random_state
import glob
import pytest
conf = {
"n_classes": 2,
"n_samples": 11,
"out_path": "./test_images/",
"weights": [.8, .2],
"image_source": "imagenet",
"random_seed": 42
}
random_state = get_random_state(conf["random_seed"])
def test_make_image_dataset():
os.mkdir(conf["out_path"])
try:
make_image_dataset(conf)
subdirs = glob.glob(f'{conf["out_path"]}/*')
print(f"Subdirs: {subdirs}")
assert len(subdirs) == conf["n_classes"], \
f'Expected {conf["n_classes"]} classes, but got {len(subdirs)}'
n_samples = conf["n_samples"]
weights = conf["weights"]
exp_class_counts = sorted([int(w * n_samples) for w in weights])
# sort subdir names by present images under each
subdir_counts = sorted([len(glob.glob(f'{s}/*')) for s in subdirs])
assert exp_class_counts == subdir_counts, \
f"\nExpected class counts: {exp_class_counts}" \
f"\nActual class counts: {subdir_counts}"
finally:
shutil.rmtree(conf["out_path"])
@pytest.mark.parametrize(
'cfg', [
# missing an arg
{
"n_samples": 11,
"out_path": "./test_images/",
"weights": [.8, .2],
"image_source": "imagenet",
"random_seed": 42
},
# wrong arg
{
"nclasses": 2,
"n_samples": 11,
"out_path": "./test_images/",
"weights": [.8, .2],
"image_source": "imagenet",
"random_seed": 42
}
]
)
def test_check_configuration(cfg):
with pytest.raises(AssertionError):
check_configuration(cfg)
class TestImageNet:
def __init__(self):
self.image_net = _ImageNet(n_classes=conf["n_classes"],
weights=conf["weights"],
n_samples=conf["n_samples"],
output_dir=conf["out_path"],
random_state=random_state)
def test_get_images(self):
os.mkdir(conf["out_path"])
try:
self.image_net.get_images()
sub_dir1 = conf["out_path"] + os.listdir(conf["out_path"])[0]
sub_dir2 = conf["out_path"] + os.listdir(conf["out_path"])[1]
n_images1 = len(os.listdir(sub_dir1))
n_images2 = len(os.listdir(sub_dir2))
class1_size = int(conf["n_samples"] * conf["weights"][0])
assert (class1_size == n_images1) or (class1_size == n_images2), "Did not download n images"
except:
raise
finally:
shutil.rmtree(conf["out_path"])
def test_sample_synset_links(self):
n = 5
wnid = 'n02114855'
os.mkdir(conf["out_path"])
try:
self.image_net.sample_synset_links(wnid, n, conf["out_path"])
n_images = len(os.listdir(conf["out_path"] + '/' + wnid))
assert n == n_images, "Did not download n images"
assert wnid in os.listdir(conf["out_path"]), "Did not get the requested synset"
except:
raise
finally:
shutil.rmtree(conf["out_path"])
def test_get_ilsvrc_1000_synsets(self):
synsets = self.image_net.get_ilsvrc_1000_synsets()
assert len(synsets) == 1000, "ILSVRC page parsed incorrectly"
def test_get_synset_image_links(self):
wnid = 'n02114855'
links = self.image_net.get_synset_image_links(wnid)
assert len(links) > 0, "Did not return any image links"
def test_retrieve_class_counts(self):
class_counts = self.image_net.retrieve_class_counts()
assert isinstance(class_counts, pd.core.frame.DataFrame), "Class counts not returned in a dataframe"
class TestImageGrabber:
def test_download_image(self):
good_url = "http://farm4.static.flickr.com/3290/2998414960_01dd35d094.jpg"
good_im_path = "ducky.jpg"
_ImageGrabber().download_image(good_url, good_im_path)
good_im_type = imghdr.what(good_im_path)
os.remove(good_im_path)
assert good_im_type is not None
bad_url = "https://mckinleyleather.com/image/130963084.jpg"
bad_im_path = "no_ducky.jpg"
_ImageGrabber().download_image(bad_url, bad_im_path)
is_file = os.path.isfile(bad_im_path)
assert not is_file
def test_catch_unavailable_image(self):
good_url = "http://farm4.static.flickr.com/3290/2998414960_01dd35d094.jpg"
good_img_data = requests.get(good_url)
assert not _ImageGrabber.catch_unavailable_image(good_img_data), "The good image tested was found to be bad"
stale_url = "https://mckinleyleather.com/image/130963084.jpg"
stale_img_data = requests.get(stale_url)
assert _ImageGrabber.catch_unavailable_image(stale_img_data), "The stale image tested was found to be good"
junk_url = "http://farm4.static.flickr.com/3225/2806850016_9bf939037e.jpg"
junk_img_data = requests.get(junk_url)
assert _ImageGrabber.catch_unavailable_image(junk_img_data), "The junk image tested was found to be good"
class TestOpenImages:
pass
class TestGoogleSearch:
pass
|
scripts/kitti_submission.py
|
xingruiy/RAFT-3D
| 133 |
91268
|
<reponame>xingruiy/RAFT-3D<filename>scripts/kitti_submission.py
import sys
sys.path.append('.')
from tqdm import tqdm
import os
import numpy as np
import cv2
import argparse
import torch
from lietorch import SE3
import raft3d.projective_ops as pops
from utils import show_image, normalize_image
from data_readers.kitti import KITTIEval
import torch.nn.functional as F
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
from glob import glob
from data_readers.frame_utils import *
def display(img, tau, phi):
""" display se3 fields """
fig, (ax1, ax2, ax3) = plt.subplots(1,3)
ax1.imshow(img[:, :, ::-1] / 255.0)
tau_img = np.clip(tau, -0.1, 0.1)
tau_img = (tau_img + 0.1) / 0.2
phi_img = np.clip(phi, -0.1, 0.1)
phi_img = (phi_img + 0.1) / 0.2
ax2.imshow(tau_img)
ax3.imshow(phi_img)
plt.show()
def prepare_images_and_depths(image1, image2, depth1, depth2, depth_scale=1.0):
""" padding, normalization, and scaling """
ht, wd = image1.shape[-2:]
pad_h = (-ht) % 8
pad_w = (-wd) % 8
image1 = F.pad(image1, [0,pad_w,0,pad_h], mode='replicate')
image2 = F.pad(image2, [0,pad_w,0,pad_h], mode='replicate')
depth1 = F.pad(depth1[:,None], [0,pad_w,0,pad_h], mode='replicate')[:,0]
depth2 = F.pad(depth2[:,None], [0,pad_w,0,pad_h], mode='replicate')[:,0]
depth1 = (depth_scale * depth1).float()
depth2 = (depth_scale * depth2).float()
image1 = normalize_image(image1.float())
image2 = normalize_image(image2.float())
depth1 = depth1.float()
depth2 = depth2.float()
return image1, image2, depth1, depth2, (pad_w, pad_h)
@torch.no_grad()
def make_kitti_submission(model):
loader_args = {'batch_size': 1, 'shuffle': False, 'num_workers': 1, 'drop_last': False}
test_loader = DataLoader(KITTIEval(), **loader_args)
DEPTH_SCALE = .1
for i_batch, data_blob in enumerate(test_loader):
image1, image2, disp1, disp2, intrinsics = [item.cuda() for item in data_blob]
img1 = image1[0].permute(1,2,0).cpu().numpy()
depth1 = DEPTH_SCALE * (intrinsics[0,0] / disp1)
depth2 = DEPTH_SCALE * (intrinsics[0,0] / disp2)
ht, wd = image1.shape[2:]
image1, image2, depth1, depth2, _ = \
prepare_images_and_depths(image1, image2, depth1, depth2)
Ts = model(image1, image2, depth1, depth2, intrinsics, iters=16)
tau_phi = Ts.log()
# uncomment to diplay motion field
# tau, phi = Ts.log().split([3,3], dim=-1)
# tau = tau[0].cpu().numpy()
# phi = phi[0].cpu().numpy()
# display(img1, tau, phi)
# compute optical flow
flow, _, _ = pops.induced_flow(Ts, depth1, intrinsics)
flow = flow[0, :ht, :wd, :2].cpu().numpy()
# compute disparity change
coords, _ = pops.projective_transform(Ts, depth1, intrinsics)
disp2 = intrinsics[0,0] * coords[:,:ht,:wd,2] * DEPTH_SCALE
disp1 = disp1[0].cpu().numpy()
disp2 = disp2[0].cpu().numpy()
KITTIEval.write_prediction(i_batch, disp1, disp2, flow)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model', help='path the model weights')
parser.add_argument('--network', default='raft3d.raft3d', help='network architecture')
parser.add_argument('--radius', type=int, default=32)
args = parser.parse_args()
import importlib
RAFT3D = importlib.import_module(args.network).RAFT3D
model = torch.nn.DataParallel(RAFT3D(args))
model.load_state_dict(torch.load(args.model))
model.cuda()
model.eval()
if not os.path.isdir('kitti_submission'):
os.mkdir('kitti_submission')
os.mkdir('kitti_submission/disp_0')
os.mkdir('kitti_submission/disp_1')
os.mkdir('kitti_submission/flow')
make_kitti_submission(model)
|
tests/callbacks/test_manifold_mixup.py
|
NunoEdgarGFlowHub/torchbearer
| 358 |
91279
|
from unittest import TestCase
from mock import patch, Mock
from torch import nn
from torchbearer.callbacks.manifold_mixup import ManifoldMixup
import torchbearer
import torch
class TestModule(nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.conv = nn.Conv1d(1, 1, 1)
self.relu = nn.ReLU()
self.bn = nn.BatchNorm1d(1)
def forward(self, x):
x = self.conv(x.view(-1, 1, 1))
x = self.relu(x)
x = self.bn(x)
return x
class TestModule2(nn.Module):
def __init__(self):
super(TestModule2, self).__init__()
self.layer1 = TestModule()
def forward(self, x):
return self.layer1(x)
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.fc1 = nn.Linear(1, 1)
self.conv1 = nn.Conv1d(1, 1, 1)
self.relu = nn.ReLU()
self.layer1 = TestModule()
self.layer2 = TestModule2()
def forward(self, x):
x = self.fc1(x)
x = self.conv1(x.view(-1,1,1))
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
return x
class TestManifoldMixup(TestCase):
def setUp(self):
super(TestManifoldMixup, self).setUp()
self.model = TestModel()
def test_depth_none(self):
mm = ManifoldMixup().at_depth(None)
state = {torchbearer.MODEL: self.model}
mm.on_start(state)
self.assertTrue(len(mm._layers) == 12)
def test_depth_0(self):
mm = ManifoldMixup().at_depth(0)
state = {torchbearer.MODEL: self.model}
mm.on_start(state)
checks = [
self.model.fc1 in mm._layers,
self.model.conv1 in mm._layers,
self.model.relu in mm._layers,
self.model.layer1 in mm._layers,
self.model.layer2 in mm._layers,
]
self.assertTrue(all(checks)) # Top level modules in
self.assertFalse(self.model.layer1.conv in mm._layers) # Depth 1 modules not in
def test_depth_1(self):
mm = ManifoldMixup().at_depth(1)
state = {torchbearer.MODEL: self.model}
mm.on_start(state)
top_checks = [
self.model.fc1 in mm._layers,
self.model.conv1 in mm._layers,
self.model.relu in mm._layers,
self.model.layer1 in mm._layers,
self.model.layer2 in mm._layers,
]
first_checks = [
self.model.layer1.conv in mm._layers,
self.model.layer1.relu in mm._layers,
self.model.layer1.bn in mm._layers,
self.model.layer2.layer1 in mm._layers,
]
self.assertFalse(any(top_checks)) # Top level modules not in
self.assertTrue(all(first_checks)) # Depth 1 modules in
def test_depth_2(self):
mm = ManifoldMixup().at_depth(2)
state = {torchbearer.MODEL: self.model}
mm.on_start(state)
top_checks = [
self.model.fc1 in mm._layers,
self.model.conv1 in mm._layers,
self.model.relu in mm._layers,
self.model.layer1 in mm._layers,
self.model.layer2 in mm._layers,
]
first_checks = [
self.model.layer1.conv in mm._layers,
self.model.layer1.relu in mm._layers,
self.model.layer1.bn in mm._layers,
self.model.layer2.layer1 in mm._layers,
]
second_checks = [
self.model.layer2.layer1.conv in mm._layers,
self.model.layer2.layer1.relu in mm._layers,
self.model.layer2.layer1.bn in mm._layers,
]
self.assertFalse(any(top_checks)) # Top level modules not in
self.assertFalse(any(first_checks)) # Depth 1 modules not in
self.assertTrue(all(second_checks)) # Depth 2 modules in
def test_for_layers(self):
mm = ManifoldMixup().at_depth(None).for_layers(['conv1', 'layer1_conv', 'layer2_layer1_conv'])
state = {torchbearer.MODEL: self.model}
mm.on_start(state)
self.assertTrue(self.model.conv1 in mm._layers, self.model.layer1.conv in mm._layers and self.model.layer2.layer1.conv in mm._layers)
self.assertTrue(len(mm._layers) == 3)
def test_get_selected_layers(self):
mm = ManifoldMixup().at_depth(None).for_layers(['conv1', 'layer1_conv', 'layer2_layer1_conv'])
found_layers = mm.get_selected_layers(self.model)
self.assertTrue(len(found_layers) == 3)
self.assertTrue('conv1' in found_layers)
self.assertTrue('layer1_conv' in found_layers)
self.assertTrue('layer2_layer1_conv' in found_layers)
def test_layer_filter(self):
mm = ManifoldMixup().at_depth(None).with_layer_filter(['conv1', 'layer1_conv', 'layer2_layer1_conv'])
state = {torchbearer.MODEL: self.model}
mm.on_start(state)
self.assertFalse(self.model.conv1 in mm._layers,
self.model.layer1.conv in mm._layers and self.model.layer2.layer1.conv in mm._layers)
self.assertTrue(len(mm._layers) == 12-3)
def test_layer_type_filter(self):
mm = ManifoldMixup().at_depth(None).with_layer_type_filter([nn.Conv1d])
state = {torchbearer.MODEL: self.model}
mm.on_start(state)
self.assertFalse(self.model.conv1 in mm._layers,
self.model.layer1.conv in mm._layers and self.model.layer2.layer1.conv in mm._layers)
self.assertTrue(len(mm._layers) == 12-3)
def test_wrap(self):
mm = ManifoldMixup().at_depth(None).for_layers(['conv1', 'layer1_relu', 'layer2_layer1_conv'])
state = {torchbearer.MODEL: self.model}
mm.on_start(state)
self.model.conv1.mixup()
self.model.layer1.relu.mixup()
self.model.layer2.layer1.conv.mixup()
self.assertRaises(AttributeError, lambda: self.model.relu.mixup())
@patch('torchbearer.callbacks.manifold_mixup._mixup_inputs', side_effect=lambda x, _: x)
def test_call_mix(self, _):
mm = ManifoldMixup().at_depth(None).for_layers(['conv1', 'layer1_relu', 'layer2_layer1_conv'])
state = {torchbearer.MODEL: self.model}
mm.on_start(state)
self.model.conv1.mixup()
self.assertTrue(self.model.conv1.do_mixup)
self.model(torch.rand(3, 1))
self.assertFalse(self.model.conv1.do_mixup)
@patch('torchbearer.callbacks.manifold_mixup._mixup')
def test_on_sample(self, mix):
mm = ManifoldMixup().at_depth(None).for_layers(['conv1', 'layer1_relu', 'layer2_layer1_conv'])
state = {torchbearer.MODEL: self.model, torchbearer.X: torch.rand(3, 1), torchbearer.Y_TRUE: torch.rand(3, 1)}
mm.on_start(state)
mm.on_sample(state)
self.assertTrue(mix.call_count == 1)
self.assertTrue(torchbearer.MIXUP_PERMUTATION in state)
self.assertTrue(torchbearer.MIXUP_LAMBDA in state)
state = {torchbearer.MODEL: self.model, torchbearer.X: torch.rand(3, 1), torchbearer.Y_TRUE: torch.rand(3, 1)}
mm.on_sample(state)
self.assertTrue(mix.call_count == 2)
@patch('torchbearer.callbacks.manifold_mixup._mixup_inputs', side_effect=lambda x, _: x)
def test_eval(self, mix):
mm = ManifoldMixup().at_depth(None).for_layers(['conv1', 'layer1_relu', 'layer2_layer1_conv'])
self.model.eval()
state = {torchbearer.MODEL: self.model, torchbearer.X: torch.rand(3, 1), torchbearer.Y_TRUE: torch.rand(3, 1)}
mm.on_start(state)
mm.on_sample(state)
self.model(torch.rand(3, 1))
self.assertTrue(mix.call_count == 0)
state = {torchbearer.MODEL: self.model, torchbearer.X: torch.rand(3, 1), torchbearer.Y_TRUE: torch.rand(3, 1)}
mm.on_sample(state)
self.model = self.model.train()
self.model(torch.rand(3, 1))
self.assertTrue(mix.call_count == 1)
def test_mixup_inputs(self):
from torchbearer.callbacks.manifold_mixup import _mixup_inputs
x = torch.Tensor([[1, 2], [2, 3]])
perm = torch.Tensor([1, 0]).long()
lam = torch.Tensor([0.1])
state = {torchbearer.X: x, torchbearer.MIXUP_PERMUTATION: perm, torchbearer.MIXUP_LAMBDA: lam}
mixed = _mixup_inputs(x, state)
self.assertFalse((mixed - torch.Tensor([[1.9, 2.9], [1.1, 2.1]]) > 1e-6).any())
@patch('torchbearer.callbacks.manifold_mixup.Beta')
def test_sample_lam_random(self, beta):
mm = ManifoldMixup()
sl = mm._sample_lam
sl()
self.assertTrue(beta.mock_calls[0][1] == (1., 1.))
self.assertTrue(beta.mock_calls[1][0] == '().sample')
def test_sample_lam_negative(self):
mm = ManifoldMixup(alpha=-1)
sl = mm._sample_lam
lam = sl()
self.assertTrue(lam == 1.)
def test_sample_lam_fixed(self):
mm = ManifoldMixup(lam=2.)
sl = mm._sample_lam
lam = sl()
self.assertTrue(lam == 2.)
def test_single_to_list(self):
mm = ManifoldMixup()
sl = mm._single_to_list
item = 1.
self.assertTrue(sl(item) == [item, ])
|
python/graphscope/nx/tests/algorithms/forward/traversal/test_bfs.py
|
LI-Mingyu/GraphScope-MY
| 1,521 |
91280
|
<reponame>LI-Mingyu/GraphScope-MY<gh_stars>1000+
import networkx.algorithms.traversal.tests.test_bfs
import pytest
from graphscope.nx.utils.compat import import_as_graphscope_nx
import_as_graphscope_nx(networkx.algorithms.traversal.tests.test_bfs,
decorators=pytest.mark.usefixtures("graphscope_session"))
|
corehq/motech/repeaters/migrations/0004_attempt_strings.py
|
akashkj/commcare-hq
| 471 |
91304
|
# Generated by Django 2.2.19 on 2021-04-10 14:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('repeaters', '0003_migrate_connectionsettings'),
]
operations = [
migrations.AlterField(
model_name='sqlrepeatrecordattempt',
name='message',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='sqlrepeatrecordattempt',
name='traceback',
field=models.TextField(blank=True, default=''),
),
]
|
examples/vocab.py
|
lyeoni/nlp-preprocessing
| 152 |
91306
|
import argparse
from collections import Counter, OrderedDict
from prenlp.tokenizer import *
TOKENIZER = {'nltk_moses': NLTKMosesTokenizer(),
'mecab' : Mecab()}
class Vocab:
"""Defines a vocabulary object that will be used to numericalize text.
Args:
vocab_size (int) : the maximum size of the vocabulary
pad_token (str) : token that indicates 'padding'
unk_token (str) : token that indicates 'unknown word'
bos_token (str) : token that indicates 'beginning of sentence'
eos_token (str) : token that indicates 'end of sentence'
"""
def __init__(self, vocab_size: int = 16000, pad_token: str = '[PAD]', unk_token: str = '[UNK]',
bos_token: str = '[BOS]', eos_token: str = '[EOS]'):
self.vocab_size = vocab_size
self.pad_token = pad_token
self.unk_token = unk_token
self.bos_token = bos_token
self.eos_token = eos_token
self.special_tokens = [pad_token, unk_token, bos_token, eos_token]
self.freqs = Counter()
self.vocab = OrderedDict()
# Initialize vocabulary with special tokens
for special_token in self.special_tokens:
self.vocab[special_token] = len(self.vocab)
def build(self, corpus, tokenizer, max_sentence_length=100000):
"""Build vocabulary with given corpus and tokenizer.
"""
with open(corpus, 'r', encoding='utf-8') as reader:
for i, line in enumerate(reader.readlines()):
if len(line) >= max_sentence_length:
line = line[:max_sentence_length]
tokens = tokenizer.tokenize(line.strip())
self.freqs.update(tokens)
for token, freq in self.freqs.most_common(self.vocab_size-len(self.special_tokens)):
self.vocab[token] = len(self.vocab)
def save(self, path, postfix='.vocab'):
"""Save vocabulary.
"""
with open(path+postfix, 'w', encoding='utf-8') as writer:
for token, id in self.vocab.items():
writer.write('{token}\t{id}\n'.format(token=token, id=id))
def __len__(self):
return len(self.vocab)
def build(args):
if args.tokenizer == 'sentencepiece':
tokenizer = SentencePiece.train(input = args.corpus, model_prefix = args.prefix,
vocab_size = args.vocab_size,
model_type = args.model_type,
character_coverage = args.character_coverage,
max_sentence_length = args.max_sentence_length,
pad_token = args.pad_token,
unk_token = args.unk_token,
bos_token = args.bos_token,
eos_token = args.eos_token)
else:
tokenizer = TOKENIZER[args.tokenizer]
vocab = Vocab(vocab_size = args.vocab_size,
pad_token = args.pad_token,
unk_token = args.unk_token,
bos_token = args.bos_token,
eos_token = args.eos_token)
vocab.build(args.corpus, tokenizer, args.max_sentence_length)
vocab.save(args.prefix)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--corpus', required=True, type=str, help='one-sentence-per-line corpus file')
parser.add_argument('--prefix', required=True, type=str, help='output vocab(or sentencepiece model) name prefix')
parser.add_argument('--tokenizer', default='sentencepiece', type=str, help='tokenizer to tokenize input corpus. available: sentencepiece, '+', '.join(TOKENIZER.keys()))
parser.add_argument('--vocab_size', default=16000, type=int, help='the maximum size of the vocabulary')
parser.add_argument('--character_coverage', default=1.0, type=float,
help='amount of characters covered by the model, good defaults are: 0.9995 for languages with rich character set\
like Japanse or Chinese and 1.0 for other languages with small character set')
parser.add_argument('--model_type', default='bpe', type=str, help='sentencepiece model type. Choose from unigram, bpe, char, or word')
parser.add_argument('--max_sentence_length', default=100000, type=int, help='The maximum input sequence length')
parser.add_argument('--pad_token', default='[PAD]', type=str, help='token that indicates padding')
parser.add_argument('--unk_token', default='[UNK]', type=str, help='token that indicates unknown word')
parser.add_argument('--bos_token', default='[BOS]', type=str, help='token that indicates beginning of sentence')
parser.add_argument('--eos_token', default='[EOS]', type=str, help='token that indicates end of sentence')
args = parser.parse_args()
build(args)
|
airmozilla/main/views/pages.py
|
mozilla/airmozilla
| 115 |
91307
|
<reponame>mozilla/airmozilla
import datetime
import hashlib
import urllib
import time
import collections
import urlparse
import requests
from django import http
from django.conf import settings
from django.contrib.sites.requests import RequestSite
from django.shortcuts import get_object_or_404, redirect, render
from django.core.cache import cache
from django.views.decorators.cache import never_cache
from django.views.generic.base import View
from django.db.models import Count, Q, F
from django.db import transaction
from django.core.urlresolvers import reverse
from django.template import engines
from django.utils import timezone
from jsonview.decorators import json_view
from airmozilla.main.models import (
Event,
EventOldSlug,
Tag,
Channel,
EventHitStats,
CuratedGroup,
Picture,
VidlySubmission,
EventLiveHits,
Chapter,
VidlyTagDomain,
)
from airmozilla.base.utils import (
paginate,
edgecast_tokenize,
akamai_tokenize,
)
from airmozilla.main.templatetags.jinja_helpers import thumbnail
from airmozilla.search.models import LoggedSearch
from airmozilla.comments.models import Discussion
from airmozilla.surveys.models import Survey
# from airmozilla.subtitles.models import AmaraVideo
from airmozilla.closedcaptions.models import ClosedCaptionsTranscript
from airmozilla.manage import vidly
from airmozilla.manage import related
from airmozilla.base import mozillians
from airmozilla.staticpages.views import staticpage
from airmozilla.main import cloud
from airmozilla.main.views import is_contributor, is_employee
from airmozilla.main import forms
def page(request, template):
"""Base page: renders templates bare, used for static pages."""
return render(request, template)
def home(request, page=1, channel_slug=settings.DEFAULT_CHANNEL_SLUG):
"""Paginated recent videos and live videos."""
channels = Channel.objects.filter(slug=channel_slug)
if not channels.count():
if channel_slug == settings.DEFAULT_CHANNEL_SLUG:
# then, the Main channel hasn't been created yet
Channel.objects.create(
name=settings.DEFAULT_CHANNEL_NAME,
slug=settings.DEFAULT_CHANNEL_SLUG
)
channels = Channel.objects.filter(slug=channel_slug)
else:
raise http.Http404('Channel not found')
request.channels = channels
privacy_filter = {}
privacy_exclude = {}
archived_events = Event.objects.archived()
if request.user.is_active:
if is_contributor(request.user):
privacy_exclude = {'privacy': Event.PRIVACY_COMPANY}
else:
# privacy_filter = {'privacy': Event.PRIVACY_PUBLIC}
privacy_exclude = {'privacy': Event.PRIVACY_COMPANY}
archived_events = archived_events.approved()
if privacy_filter:
archived_events = archived_events.filter(**privacy_filter)
elif privacy_exclude:
archived_events = archived_events.exclude(**privacy_exclude)
archived_events = archived_events.order_by('-start_time')
archived_events = archived_events.select_related('picture')
found_tags = []
if request.GET.getlist('tag'):
requested_tags = request.GET.getlist('tag')
for each in requested_tags:
found_tags.extend(Tag.objects.filter(name__iexact=each))
if len(found_tags) < len(requested_tags):
# invalid tags were used in the query string
url = reverse('main:home')
if found_tags:
# some were good
url += '?%s' % urllib.urlencode({
'tag': [x.name for x in found_tags]
}, True)
return redirect(url, permanent=True)
archived_events = archived_events.filter(tags__in=found_tags)
if found_tags:
# no live events when filtering by tag
live_events = Event.objects.none()
else:
live_events = (Event.objects.live()
.order_by('start_time'))
if not request.user.is_active:
live_events = live_events.approved()
if privacy_filter:
live_events = live_events.filter(**privacy_filter)
elif privacy_exclude:
live_events = live_events.exclude(**privacy_exclude)
# apply the mandatory channels filter
# but only do this if it's not filtered by tags
live_events = live_events.filter(channels=channels)
archived_events = archived_events.filter(channels=channels)
live_events = live_events.select_related('picture')
if channels and channels[0].reverse_order:
archived_events = archived_events.reverse()
archived_paged = paginate(archived_events, page, 10)
# to simplify the complexity of the template when it tries to make the
# pagination URLs, we just figure it all out here
next_page_url = prev_page_url = None
channel = channels[0]
if archived_paged.has_next():
if channel.slug == settings.DEFAULT_CHANNEL_SLUG:
next_page_url = reverse(
'main:home',
args=(archived_paged.next_page_number(),)
)
else:
next_page_url = reverse(
'main:home_channels',
args=(channel.slug,
archived_paged.next_page_number())
)
if archived_paged.has_previous():
if channel.slug == settings.DEFAULT_CHANNEL_SLUG:
prev_page_url = reverse(
'main:home',
args=(archived_paged.previous_page_number(),)
)
else:
prev_page_url = reverse(
'main:home_channels',
args=(channel.slug,
archived_paged.previous_page_number())
)
events_qs = Event.objects.archived().all()
if request.user.is_active:
if is_contributor(request.user):
feed_privacy = 'contributors'
events_qs = events_qs.exclude(privacy=Event.PRIVACY_COMPANY)
else:
feed_privacy = 'company'
else:
events_qs = events_qs.filter(privacy=Event.PRIVACY_PUBLIC)
feed_privacy = 'public'
channel_children = []
for child in channel.get_children().order_by('name'):
channel_children.append((
child,
events_qs.filter(channels=child).count()
))
curated_groups_map = collections.defaultdict(list)
curated_groups = (
CuratedGroup.objects.all()
.values_list('event_id', 'name')
.order_by('name')
)
for event_id, name in curated_groups:
curated_groups_map[event_id].append(name)
def get_curated_groups(event):
return curated_groups_map.get(event.id)
context = {
'events': archived_paged,
'live_events': live_events,
'tags': found_tags,
'Event': Event,
'channel': channel,
'channel_children': channel_children,
'feed_privacy': feed_privacy,
'next_page_url': next_page_url,
'prev_page_url': prev_page_url,
'get_curated_groups': get_curated_groups,
}
return render(request, 'main/home.html', context)
def can_view_event(event, user):
"""return True if the current user has right to view this event"""
if event.privacy == Event.PRIVACY_PUBLIC:
return True
elif not user.is_active:
return False
# you're logged in
if event.privacy == Event.PRIVACY_COMPANY:
# but then it's not good enough to be contributor
if is_contributor(user):
return False
else:
if not is_contributor(user):
# staff can always see it
return True
curated_groups = [
x[0] for x in
CuratedGroup.objects.filter(event=event).values_list('name')
]
if curated_groups:
return any(
[mozillians.in_group(user.email, x) for x in curated_groups]
)
return True
class EventView(View):
"""Video, description, and other metadata."""
template_name = 'main/event.html'
def cant_view_event(self, event, request):
"""return a response appropriate when you can't view the event"""
if request.user.is_authenticated():
return redirect('main:permission_denied', event.slug)
else:
desired_url = reverse('main:event', args=(event.slug,))
url = reverse('main:login')
return redirect('%s?next=%s' % (url, urllib.quote(desired_url)))
def cant_find_event(self, request, slug):
"""return an appropriate response if no event can be found"""
return staticpage(request, slug)
def can_view_event(self, event, request):
"""wrapper on the utility function can_view_event()"""
return can_view_event(event, request.user)
def get_default_context(self, event, request):
context = {}
prefix = request.is_secure() and 'https' or 'http'
root_url = '%s://%s' % (prefix, RequestSite(request).domain)
url = reverse('main:event_video', kwargs={'slug': event.slug})
absolute_url = root_url + url
context['embed_code'] = (
'<iframe src="%s" '
'width="640" height="380" frameborder="0" allowfullscreen>'
'</iframe>'
% absolute_url
)
context['embed_code_big'] = (
'<iframe src="%s" '
'width="896" height="524" frameborder="0" allowfullscreen>'
'</iframe>'
% absolute_url
)
return context
def get_event(self, slug, request):
try:
return Event.objects.get(slug=slug)
except Event.DoesNotExist:
try:
return Event.objects.get(slug__iexact=slug)
except Event.DoesNotExist:
try:
old_slug = EventOldSlug.objects.get(slug=slug)
return redirect('main:event', slug=old_slug.event.slug)
except EventOldSlug.DoesNotExist:
# does it exist as a static page
if slug.isdigit():
# it might be the ID of the event
try:
return Event.objects.get(id=slug)
except Event.DoesNotExist:
# not that either
pass
return self.cant_find_event(request, slug)
@staticmethod
def get_vidly_information(event, tag):
cache_key = 'event_vidly_information-{}'.format(event.id)
from_cache = cache.get(cache_key)
if from_cache is not None:
return from_cache
# It was not cached, we have to figure it out
vidly_tag = hd = None
if (
not (event.is_pending() or event.is_processing()) and
event.is_public() and
event.has_vidly_template() and event.template_environment
):
if event.template_environment.get('tag'):
vidly_tag = tag or event.template_environment['tag']
hd = False # default
vidly_submissions = (
VidlySubmission.objects
.filter(event=event, tag=vidly_tag)
.order_by('-submission_time')
)
for vidly_submission in vidly_submissions.values('hd'):
hd = vidly_submission['hd']
break
cache.set(cache_key, (vidly_tag, hd), 60 * 60)
return vidly_tag, hd
def get(self, request, slug):
event = self.get_event(slug, request)
if isinstance(event, http.HttpResponse):
return event
if not self.can_view_event(event, request):
return self.cant_view_event(event, request)
tag = request.GET.get('tag')
warning = None
ok_statuses = (
Event.STATUS_SCHEDULED,
Event.STATUS_PENDING,
Event.STATUS_PROCESSING,
)
if event.status not in ok_statuses:
if not request.user.is_superuser:
self.template_name = 'main/event_not_scheduled.html'
else:
warning = "Event is not publicly visible - not scheduled."
if event.approval_set.filter(approved=False).exists():
if not request.user.is_active:
return http.HttpResponse('Event not approved')
else:
warning = "Event is not publicly visible - not yet approved."
hits = None
# assume this to false to start with
can_edit_chapters = False
template_tagged = ''
if event.template and not event.is_upcoming():
# The only acceptable way to make autoplay be on
# is to send ?autoplay=true
# All other attempts will switch it off.
autoplay = request.GET.get('autoplay', 'false') == 'true'
try:
template_tagged = get_video_tagged(
event,
request,
autoplay=autoplay,
tag=tag,
)
except VidlySubmission.DoesNotExist:
return http.HttpResponseBadRequest(
'Tag %s does not exist for this event' % (tag,)
)
stats_query = (
EventHitStats.objects.filter(event=event)
.values_list('total_hits', flat=True)
)
for total_hits in stats_query:
hits = total_hits
# if the event has a template is not upcoming
if not event.is_live() and event.is_scheduled():
# ...and is not live, then
if request.user.is_active:
can_edit_chapters = True
can_manage_edit_event = (
request.user.is_active and
request.user.is_staff and
request.user.has_perm('main.change_event')
)
can_edit_event = request.user.is_active
can_edit_discussion = (
can_edit_event and
# This is a little trick to avoid waking up the
# SimpleLazyObject on the user. If the .is_active is true
# the ID will have already been set by the session.
# So doing this comparison instead avoids causing a
# select query on the auth_user table.
request.user.pk == event.creator_id and
Discussion.objects.filter(event=event).exists()
)
request.channels = event.channels.all()
# needed for the open graph stuff
event.url = reverse('main:event', args=(event.slug,))
context = self.get_default_context(event, request)
context.update({
'event': event,
'video': template_tagged,
'warning': warning,
'can_manage_edit_event': can_manage_edit_event,
'can_edit_event': can_edit_event,
'can_edit_discussion': can_edit_discussion,
'can_edit_chapters': can_edit_chapters,
'Event': Event,
'hits': hits,
'tags': [t.name for t in event.tags.all()],
'channels': request.channels,
# needed for the _event_privacy.html template
'curated_groups': CuratedGroup.get_names(event),
})
context['chapters'] = Chapter.objects.filter(
event=event,
is_active=True,
)
# By default, we want to hint in the DOM that this is an HD
# video.
context['hd'] = event.is_scheduled() and not event.is_upcoming()
vidly_tag, vidly_hd = self.get_vidly_information(event, tag)
if vidly_tag:
context['vidly_tag'] = vidly_tag
context['vidly_hd'] = vidly_hd
if not vidly_hd:
context['hd'] = False
# If the event is in the processing state (or pending), we welcome
# people to view it but it'll say that the video isn't ready yet.
# But we'll also try to include an estimate of how long we think
# it will take until it's ready to be viewed.
context['estimated_time_left'] = None
context['time_run'] = None
if (
(event.is_processing() or event.is_pending()) and
event.duration and
event.template_environment.get('tag')
):
vidly_submissions = (
VidlySubmission.objects
.filter(event=event, tag=event.template_environment.get('tag'))
.order_by('-submission_time')
)
for vidly_submission in vidly_submissions[:1]:
context['estimated_time_left'] = (
vidly_submission.get_estimated_time_left()
)
context['time_run'] = (
(
timezone.now() - vidly_submission.submission_time
).seconds
)
if event.pin:
if (
not request.user.is_authenticated() or
not is_employee(request.user)
):
entered_pins = request.session.get('entered_pins', [])
if event.pin not in entered_pins:
self.template_name = 'main/event_requires_pin.html'
context['pin_form'] = forms.PinForm()
try:
context['discussion'] = Discussion.objects.get(event=event)
# The name of the channel we publish to fanout on when there's
# changes to this events comments.
context['subscription_channel_comments'] = 'comments-{}'.format(
event.id
)
except Discussion.DoesNotExist:
context['discussion'] = {'enabled': False}
context['subscription_channel_status'] = 'event-{}'.format(event.id)
# amara_videos = AmaraVideo.objects.filter(
# event=event,
# transcript__isnull=False,
# )
# context['amara_video'] = None
# for amara_video in amara_videos.order_by('-modified')[:1]:
# context['amara_video'] = amara_video
context['closedcaptions'] = None
for connection in ClosedCaptionsTranscript.objects.filter(event=event):
assert connection.closedcaptions.transcript
context['closedcaptions'] = connection.closedcaptions
cache_key = 'event_survey_id_%s' % event.id
context['survey_id'] = cache.get(cache_key, -1)
if context['survey_id'] == -1: # not known in cache
try:
survey = Survey.objects.get(
events=event,
active=True
)
cache.set(cache_key, survey.id, 60 * 60 * 24)
context['survey_id'] = survey.id
except Survey.DoesNotExist:
cache.set(cache_key, None, 60 * 60 * 24)
context['survey_id'] = None
if settings.LOG_SEARCHES:
if request.session.get('logged_search'):
pk, time_ago = request.session.get('logged_search')
age = time.time() - time_ago
if age <= 5:
# the search was made less than 5 seconds ago
try:
logged_search = LoggedSearch.objects.get(pk=pk)
logged_search.event_clicked = event
logged_search.save()
except LoggedSearch.DoesNotExist:
pass
response = render(request, self.template_name, context)
self._set_csp_update(response, event)
return response
def _set_csp_update(self, response, event):
"""Hack alert!
We need to, potentially, update the CSP at run time if the
video you're trying to watch is a Vid.ly video.
Vid.ly is embedded by simply using `https://vid.ly/:shortcode`
but internally they will redirect to a AWS CloudFront domain
which we might not have prepared in our CSP settings.
So let's update that on the fly.
"""
cache_key = 'custom_csp_update:{}'.format(event.id)
update = cache.get(cache_key)
if update is not None:
# it was set, use that and exit early
if update:
response._csp_update = update
return
if not event.template:
return
if event.is_upcoming() or event.is_live() or not event.is_scheduled():
return
if 'vid.ly' not in event.template.name.lower():
return
if not event.template_environment.get('tag'):
return
tag = event.template_environment['tag']
update = get_vidly_csp_headers(tag, private=not event.is_public())
cache.set(cache_key, update, 60 * 60)
# Now we've figured out what headers to update, set it on the response
if update:
response._csp_update = update
def post(self, request, slug):
event = get_object_or_404(Event, slug=slug)
pin_form = forms.PinForm(request.POST, instance=event)
if pin_form.is_valid():
entered_pins = self.request.session.get('entered_pins', [])
pin = pin_form.cleaned_data['pin']
if pin not in entered_pins:
entered_pins.append(pin)
request.session['entered_pins'] = entered_pins
return redirect('main:event', slug=slug)
context = {
'event': event,
'pin_form': pin_form,
}
return render(request, 'main/event_requires_pin.html', context)
def get_vidly_csp_headers(tag, private=False):
token = None
if private:
token = vidly.tokenize(tag, 90)
headers = {}
def get_netloc(type_, url_format):
netloc = None
try:
found = VidlyTagDomain.objects.get(
tag=tag,
type=type_,
)
if found.private != private:
# The tag has changed!
found.delete()
raise VidlyTagDomain.DoesNotExist
elif found.domain == 'm.vid.ly': # pragma: no cover
# In a previous life, airmozilla might have attempted to
# look up what the CDN domain was and if it failed,
# Vid.ly would just redirect to 'https://m.vid.ly' which
# is NOT the right CDN domain. We shouldn't have stored
# that.
# This knowledge was added in June 2017 and from now on
# we never save this as the domain so it should cease.
found.delete()
raise VidlyTagDomain.DoesNotExist
else:
netloc = found.domain
except VidlyTagDomain.DoesNotExist:
url = url_format.format(
tag
)
if token:
url += '&token={}'.format(token)
head_response = requests.head(url)
if head_response.status_code == 302:
if head_response.headers['Location'] == 'https://m.vid.ly':
# Basically, it didn't work.
# When vid.ly can't redirect to the actual file, for
# some reason it instead redirects to the exact
# URL 'https://m.vid.ly'. For example:
#
# curl -v https://vid.ly/l1c2w5/blalbla
# ...
# < HTTP/1.1 302 Found
# ...
# < Location: https://m.vid.ly
#
# Odd right? But it basically means to use that we
# we not able to do the lookup. Sorry.
return
netloc = urlparse.urlparse(
head_response.headers['Location']
).netloc
assert netloc, head_response.headers['Location']
VidlyTagDomain.objects.create(
tag=tag,
type=type_,
private=private,
domain=netloc,
)
return netloc
media_netloc = get_netloc('webm', settings.VIDLY_VIDEO_URL_FORMAT)
if media_netloc:
headers['media-src'] = media_netloc
# In almost all cases, the poster image is on the same domain
# as the video. So let's use that.
# Later we're going to try to do a specific lookup for the poster.
# If that's better/different that becomes the added domain
# for 'img-src' instead.
headers['img-src'] = media_netloc
# There is no way to pre-lookup what the actual CDN domain is
# for the webvtt.vtt file is so let's hope for the best and
# reuse the the domain for media on the connect-src too.
headers['connect-src'] = media_netloc
lock_cache_key = 'poster_netloc_failed:{}'.format(tag)
if not cache.get(lock_cache_key):
img_netloc = get_netloc('poster', settings.VIDLY_POSTER_URL_FORMAT)
if img_netloc:
headers['img-src'] = img_netloc
else:
# If that failed, don't bother trying again. For a while.
cache.set(lock_cache_key, True, 60 * 60)
return headers
class EventByIDView(EventView):
def get(self, request, id):
event = get_object_or_404(Event, id=id)
if not self.can_view_event(event, request):
return self.cant_view_event(event, request)
return redirect('main:event', event.slug)
def get_video_tagged(event, request, autoplay=False, tag=None):
def poster_url(geometry='896x504', crop='center'):
image = event.picture and event.picture.file or event.placeholder_img
return thumbnail(image, geometry, crop=crop).url
context = {
'md5': lambda s: hashlib.md5(s).hexdigest(),
'event': event,
'request': request,
'datetime': datetime.datetime.utcnow(),
'vidly_tokenize': vidly.tokenize,
'edgecast_tokenize': edgecast_tokenize,
'akamai_tokenize': akamai_tokenize,
'popcorn_url': event.popcorn_url,
'autoplay': autoplay and 'true' or 'false', # javascript
'poster_url': poster_url,
}
if isinstance(event.template_environment, dict):
context.update(event.template_environment)
if tag:
submissions = VidlySubmission.objects.filter(
tag=tag,
event=event
)
if not submissions.exists():
raise VidlySubmission.DoesNotExist(tag)
context['tag'] = tag
template = engines['backend'].from_string(event.template.content)
try:
template_tagged = template.render(context)
except vidly.VidlyTokenizeError, msg:
template_tagged = '<code style="color:red">%s</code>' % msg
return template_tagged
class EventVideoView(EventView):
template_name = 'main/event_video.html'
def can_view_event(self, event, request):
if self.embedded:
if event.privacy != Event.PRIVACY_PUBLIC:
# If you are the owner of it, it's fine, if we don't
# want any warnings
if (
self.no_warning and
request.user.is_active and request.user == event.creator
):
return True
return False
return True
else:
return super(EventVideoView, self).can_view_event(event, request)
def cant_view_event(self, event, request):
"""return a response appropriate when you can't view the event"""
return render(request, self.template_name, {
'error': "Not a public event",
'event': None,
})
def cant_find_event(self, request, slug):
"""return an appropriate response if no event can be found"""
return render(request, self.template_name, {
'error': "Event not found",
'event': None
})
def get_default_context(self, event, request):
context = {}
prefix = request.is_secure() and 'https' or 'http'
root_url = '%s://%s' % (prefix, RequestSite(request).domain)
url = reverse('main:event', kwargs={'slug': event.slug})
context['absolute_url'] = root_url + url
context['embedded'] = self.embedded
context['no_warning'] = self.no_warning
context['no_footer'] = request.GET.get('no-footer')
return context
def get(self, request, slug):
self.embedded = request.GET.get('embedded', 'true') == 'true'
self.no_warning = request.GET.get('no-warning')
response = super(EventVideoView, self).get(request, slug)
# ALLOWALL is what YouTube uses for sharing
if self.embedded:
response['X-Frame-Options'] = 'ALLOWALL'
return response
class EventDiscussionView(EventView):
template_name = 'main/event_discussion.html'
def can_edit_discussion(self, event, request):
# this might change in the future to only be
# employees and vouched mozillians
return (
request.user.is_active and
request.user == event.creator and
Discussion.objects.filter(event=event)
)
def cant_edit_discussion(self, event, user):
return redirect('main:event', event.slug)
def get_event_safely(self, slug, request):
event = self.get_event(slug, request)
if isinstance(event, http.HttpResponse):
return event
if not self.can_view_event(event, request):
return self.cant_view_event(event, request)
if not self.can_edit_discussion(event, request):
return self.cant_edit_discussion(event, request)
return event
def get(self, request, slug, form=None):
event = self.get_event_safely(slug, request)
if isinstance(event, http.HttpResponse):
return event
discussion = Discussion.objects.get(event=event)
if form is None:
initial = {
'moderators': ', '.join(
x.email for x in discussion.moderators.all()
),
}
form = forms.EventDiscussionForm(
instance=discussion,
event=event,
initial=initial,
)
context = {
'event': event,
'form': form,
}
return render(request, self.template_name, context)
@transaction.atomic
@json_view
def post(self, request, slug):
event = self.get_event_safely(slug, request)
if isinstance(event, http.HttpResponse):
return event
if 'cancel' in request.POST:
return redirect('main:event', event.slug)
discussion = Discussion.objects.get(event=event)
form = forms.EventDiscussionForm(
request.POST,
instance=discussion,
event=event,
)
if form.is_valid():
form.save()
return redirect('main:event', event.slug)
return self.get(request, slug, form=form)
@json_view
def all_tags(request):
tags = list(Tag.objects.all().values_list('name', flat=True))
return {'tags': tags}
def related_content(request, slug):
event = get_object_or_404(Event, slug=slug)
events, __, __ = find_related_events(event, request.user)
curated_groups_map = collections.defaultdict(list)
def get_curated_groups(event):
return curated_groups_map.get('event_id')
context = {
'events': events,
'get_curated_groups': get_curated_groups,
}
return render(request, 'main/es.html', context)
def find_related_events(
event, user, boost_title=None, boost_tags=None, size=None,
use_title=True, use_tags=True, explain=False
):
assert use_title or use_tags
if boost_title is None:
boost_title = settings.RELATED_CONTENT_BOOST_TITLE
if boost_tags is None:
boost_tags = settings.RELATED_CONTENT_BOOST_TAGS
if size is None:
size = settings.RELATED_CONTENT_SIZE
index = related.get_index()
doc_type = 'event'
es = related.get_connection()
fields = ['title']
if list(event.channels.all()) != [
Channel.objects.get(slug=settings.DEFAULT_CHANNEL_SLUG)]:
fields.append('channel')
mlt_queries = []
if use_title:
mlt_queries.append({
'more_like_this': {
'fields': ['title'],
# 'analyzer': 'snowball',
'docs': [
{
'_index': index,
'_type': doc_type,
'_id': event.id
}],
'min_term_freq': 1,
'max_query_terms': 20,
'min_doc_freq': 1,
# 'max_doc_freq': 2,
# 'stop_words': ['your', 'about'],
'boost': boost_title,
}
})
if use_tags and event.tags.all().exists():
fields.append('tags')
mlt_queries.append({
'more_like_this': {
'fields': ['tags'],
'docs': [
{
'_index': index,
'_type': doc_type,
'_id': event.id
}],
'min_term_freq': 1,
'max_query_terms': 20,
'min_doc_freq': 1,
'boost': boost_tags,
}
})
query_ = {
'bool': {
'should': mlt_queries,
}
}
if user.is_active:
if is_contributor(user):
query = {
'fields': fields,
'query': query_,
'filter': {
'bool': {
'must_not': {
'term': {
'privacy': Event.PRIVACY_COMPANY
}
}
}
}
}
else:
query = {
'fields': fields,
'query': query_
}
else:
query = {
'fields': fields,
'query': query_,
"filter": {
"bool": {
"must": {
"term": {"privacy": Event.PRIVACY_PUBLIC}
}
}
}
}
ids = []
query['from'] = 0
query['size'] = size
query['explain'] = explain
hits = es.search(query, index=index)['hits']
scores = {}
explanations = []
for doc in hits['hits']:
_id = int(doc['_id'])
scores[_id] = doc['_score']
ids.append(_id)
if explain:
explanations.append(doc['_explanation'])
events = Event.objects.scheduled_or_processing().filter(id__in=ids)
if user.is_active:
if is_contributor(user):
events = events.exclude(privacy=Event.PRIVACY_COMPANY)
else:
events = events.filter(privacy=Event.PRIVACY_PUBLIC)
events = sorted(events, key=lambda e: ids.index(e.id))
return (events, scores, explanations)
def channels(request):
channels = []
privacy_filter = {}
privacy_exclude = {}
if request.user.is_active:
if is_contributor(request.user):
feed_privacy = 'contributors'
privacy_exclude = {'privacy': Event.PRIVACY_COMPANY}
else:
feed_privacy = 'company'
else:
privacy_filter = {'privacy': Event.PRIVACY_PUBLIC}
feed_privacy = 'public'
events = Event.objects.filter(status=Event.STATUS_SCHEDULED)
if privacy_filter:
events = events.filter(**privacy_filter)
elif privacy_exclude:
events = events.exclude(**privacy_exclude)
children_channels = Channel.objects.filter(
parent__parent__isnull=True,
parent__isnull=False,
)
parents = collections.defaultdict(list)
for channel in children_channels:
parents[channel.parent_id].append(channel)
channels_qs = (
Channel.objects
.filter(parent__isnull=True)
.exclude(slug=settings.DEFAULT_CHANNEL_SLUG)
)
# make a dict of parental counts
subchannel_counts = {}
qs = (
Channel.objects
.filter(parent__isnull=False)
.values('parent_id')
.order_by() # necessary because the model has a default ordering
.annotate(Count('parent'))
)
for each in qs:
subchannel_counts[each['parent_id']] = each['parent__count']
# make a dict of events counts by channel
event_counts = {}
qs = (
Event.channels.through.objects.filter(event__in=events)
.values('channel_id')
.annotate(Count('channel'))
)
for each in qs:
event_counts[each['channel_id']] = each['channel__count']
for channel in channels_qs:
event_count = event_counts.get(channel.id, 0)
subchannel_count = subchannel_counts.get(channel.id, 0)
subchannels = parents.get(channel.id, [])
if event_count or subchannel_count:
channels.append((
channel,
event_count,
subchannel_count,
subchannels
))
data = {
'channels': channels,
'feed_privacy': feed_privacy,
}
return render(request, 'main/channels.html', data)
class _Tag(object):
def __init__(self, name, count):
self.name = name
self.count = count
def tag_cloud(request, THRESHOLD=1):
context = {}
qs = (
Event.tags.through.objects
.values('tag_id')
.annotate(Count('tag__id'))
)
if request.user.is_active:
if is_contributor(request.user):
# because of a bug in Django we can't use qs.exclude()
qs = qs.filter(
Q(event__privacy=Event.PRIVACY_CONTRIBUTORS) |
Q(event__privacy=Event.PRIVACY_PUBLIC)
)
else:
qs = qs.filter(event__privacy=Event.PRIVACY_PUBLIC)
tags_map = dict(
(x['id'], x['name'])
for x in
Tag.objects.all()
.values('id', 'name')
)
tags = []
for each in qs.values('tag__id__count', 'tag_id'):
count = each['tag__id__count']
if count > THRESHOLD:
tags.append(_Tag(tags_map[each['tag_id']], count))
context['tags'] = cloud.calculate_cloud(
tags,
steps=10
)
return render(request, 'main/tag_cloud.html', context)
def permission_denied(request, slug):
context = {}
event = get_object_or_404(Event, slug=slug)
context['event'] = event
context['is_contributor'] = is_contributor(request.user)
context['is_company_only'] = event.privacy == Event.PRIVACY_COMPANY
curated_groups = CuratedGroup.objects.filter(event=event).order_by('name')
context['curated_groups'] = []
for group in curated_groups:
context['curated_groups'].append({
'name': group.name,
'url': group.url
})
return render(request, 'main/permission_denied.html', context)
def contributors(request):
context = {}
cache_key = 'mozillians_contributors'
cache_key += hashlib.md5(str(settings.CONTRIBUTORS)).hexdigest()[:10]
users = cache.get(cache_key)
if users is None:
users = mozillians.get_contributors()
cache.set(cache_key, users, 60 * 60 * 24)
context['users'] = reversed(users)
return render(request, 'main/contributors.html', context)
@never_cache
@json_view
def event_livehits(request, id):
event = get_object_or_404(Event, id=id)
if request.method == 'POST' and event.is_live():
live_hits, _ = EventLiveHits.objects.get_or_create(event=event)
if request.user.is_authenticated():
cache_key = 'event_livehits-%d' % request.user.id
else:
cache_key = ''
for thing in (
'HTTP_USER_AGENT',
'HTTP_ACCEPT_LANGUAGE',
'REMOVE_ADDR',
):
value = request.META.get(thing)
if value:
cache_key += value
cache_key = 'event_livehits' + hashlib.md5(cache_key).hexdigest()
cache_key = cache_key[:30]
counted = cache.get(cache_key)
total_hits = live_hits.total_hits
if not counted:
# let's assume the longest possible time it's live is 12 hours
cache.set(cache_key, True, 60 * 60 * 12)
# we need to increment!
(
EventLiveHits.objects.filter(event=event)
.update(total_hits=F('total_hits') + 1)
)
total_hits += 1
else:
try:
total_hits = EventLiveHits.objects.get(event=event).total_hits
except EventLiveHits.DoesNotExist:
total_hits = 0
return {'hits': total_hits}
@json_view
def event_processing_timenails(request, slug):
event = get_object_or_404(Event, slug=slug)
if not event.duration:
return {'pictures': []}
form = forms.ProcessingTimenailsForm(request.GET)
if not form.is_valid():
return http.HttpResponseBadRequest(form.errors)
percentage = min(100.0, form.cleaned_data['percent'] or 100.0)
max_ = form.cleaned_data['max']
point = event.duration * percentage / 100.0
pictures = Picture.objects.filter(
event=event,
timestamp__isnull=False,
timestamp__lte=point,
)
pictures_ = []
for picture in pictures.order_by('-timestamp')[:max_]:
# NOTE! This code is the same as used
# in then EventChaptersThumbnailsView.get view.
thumb = thumbnail(
picture.file, '160x90', crop='center'
)
pictures_.append({
'id': picture.id,
'timestamp': picture.timestamp,
'thumbnail': {
'url': thumb.url,
'width': thumb.width,
'height': thumb.height,
},
})
pictures_.reverse()
return {'pictures': pictures_}
@never_cache
@json_view
def event_status(request, slug):
for values in Event.objects.filter(slug=slug).values('status'):
return {'status': values['status']}
raise http.Http404(slug)
@json_view
def thumbnails(request):
form = forms.ThumbnailsForm(request.GET)
if not form.is_valid():
return http.HttpResponseBadRequest(form.errors)
id = form.cleaned_data['id']
width = form.cleaned_data['width']
height = form.cleaned_data['height']
geometry = '%sx%s' % (width, height)
event = get_object_or_404(Event, id=id)
thumbnails = []
for picture in Picture.objects.filter(event=event).order_by('created'):
thumb = thumbnail(picture.file, geometry, crop='center')
thumbnails.append(thumb.url)
return {'thumbnails': thumbnails}
|
vim/autoload/conque_term/conque_sole_shared_memory.py
|
adifinem/dotvim
| 413 |
91312
|
# FILE: autoload/conque_term/conque_sole_shared_memory.py
# AUTHOR: <NAME> <<EMAIL>>
# WEBSITE: http://conque.googlecode.com
# MODIFIED: 2011-09-02
# VERSION: 2.3, for Vim 7.0
# LICENSE:
# Conque - Vim terminal/console emulator
# Copyright (C) 2009-2011 <NAME>
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Wrapper class for shared memory between Windows python processes
Adds a small amount of functionality to the standard mmap module.
"""
import mmap
import sys
# PYTHON VERSION
CONQUE_PYTHON_VERSION = sys.version_info[0]
if CONQUE_PYTHON_VERSION == 2:
import cPickle as pickle
else:
import pickle
class ConqueSoleSharedMemory():
# is the data being stored not fixed length
fixed_length = False
# maximum number of bytes per character, for fixed width blocks
char_width = 1
# fill memory with this character when clearing and fixed_length is true
FILL_CHAR = None
# serialize and unserialize data automatically
serialize = False
# size of shared memory, in bytes / chars
mem_size = None
# size of shared memory, in bytes / chars
mem_type = None
# unique key, so multiple console instances are possible
mem_key = None
# mmap instance
shm = None
# character encoding, dammit
encoding = 'utf-8'
# pickle terminator
TERMINATOR = None
def __init__(self, mem_size, mem_type, mem_key, fixed_length=False, fill_char=' ', serialize=False, encoding='utf-8'):
""" Initialize new shared memory block instance
Arguments:
mem_size -- Memory size in characters, depends on encoding argument to calcuate byte size
mem_type -- Label to identify what will be stored
mem_key -- Unique, probably random key to identify this block
fixed_length -- If set to true, assume the data stored will always fill the memory size
fill_char -- Initialize memory block with this character, only really helpful with fixed_length blocks
serialize -- Automatically serialize data passed to write. Allows storing non-byte data
encoding -- Character encoding to use when storing character data
"""
self.mem_size = mem_size
self.mem_type = mem_type
self.mem_key = mem_key
self.fixed_length = fixed_length
self.fill_char = fill_char
self.serialize = serialize
self.encoding = encoding
self.TERMINATOR = str(chr(0)).encode(self.encoding)
if CONQUE_PYTHON_VERSION == 3:
self.FILL_CHAR = fill_char
else:
self.FILL_CHAR = unicode(fill_char)
if fixed_length and encoding == 'utf-8':
self.char_width = 4
def create(self, access='write'):
""" Create a new block of shared memory using the mmap module. """
if access == 'write':
mmap_access = mmap.ACCESS_WRITE
else:
mmap_access = mmap.ACCESS_READ
name = "conque_%s_%s" % (self.mem_type, self.mem_key)
self.shm = mmap.mmap(0, self.mem_size * self.char_width, name, mmap_access)
if not self.shm:
return False
else:
return True
def read(self, chars=1, start=0):
""" Read data from shared memory.
If this is a fixed length block, read 'chars' characters from memory.
Otherwise read up until the TERMINATOR character (null byte).
If this memory is serialized, unserialize it automatically.
"""
# go to start position
self.shm.seek(start * self.char_width)
if self.fixed_length:
chars = chars * self.char_width
else:
chars = self.shm.find(self.TERMINATOR)
if chars == 0:
return ''
shm_str = self.shm.read(chars)
# return unpickled byte object
if self.serialize:
return pickle.loads(shm_str)
# decode byes in python 3
if CONQUE_PYTHON_VERSION == 3:
return str(shm_str, self.encoding)
# encoding
if self.encoding != 'ascii':
shm_str = unicode(shm_str, self.encoding)
return shm_str
def write(self, text, start=0):
""" Write data to memory.
If memory is fixed length, simply write the 'text' characters at 'start' position.
Otherwise write 'text' characters and append a null character.
If memory is serializable, do so first.
"""
# simple scenario, let pickle create bytes
if self.serialize:
if CONQUE_PYTHON_VERSION == 3:
tb = pickle.dumps(text, 0)
else:
tb = pickle.dumps(text, 0).encode(self.encoding)
else:
tb = text.encode(self.encoding, 'replace')
# write to memory
self.shm.seek(start * self.char_width)
if self.fixed_length:
self.shm.write(tb)
else:
self.shm.write(tb + self.TERMINATOR)
def clear(self, start=0):
""" Clear memory block using self.fill_char. """
self.shm.seek(start)
if self.fixed_length:
self.shm.write(str(self.fill_char * self.mem_size * self.char_width).encode(self.encoding))
else:
self.shm.write(self.TERMINATOR)
def close(self):
""" Close/destroy memory block. """
self.shm.close()
|
green/test/test_result.py
|
vck3000/green
| 686 |
91329
|
<reponame>vck3000/green
# encoding: utf-8
from __future__ import unicode_literals
import copy
# `from doctest import DocTestCase` causes crashes, since the DocTestCase is
# detected as a TestCase subclass and unittest.TestLoader.loadTestsFromModule()
# called from GreenTestLoader.loadTestsFromModule() thinks it is a definition
# of a test to actually try to run, and causes very weird crashes.
import doctest
from io import StringIO
import sys
import os
import unittest
import tempfile
from green.config import default_args
from green.output import Colors, GreenStream
from green.result import (
GreenTestResult,
proto_test,
ProtoTest,
proto_error,
ProtoTestResult,
BaseTestResult,
)
try:
from unittest.mock import MagicMock, patch
except ImportError:
from mock import MagicMock, patch
from coverage import coverage, CoverageException
class MyProtoTest(ProtoTest, object):
"""
For quickly making a ProtoTest
"""
def __init__(self):
super(MyProtoTest, self).__init__()
self.module = "my_module"
self.class_name = "MyClass"
self.method_name = "myMethod"
self.docstr_part = "My docstring"
self.subtest_part = ""
class TestBaseTestResult(unittest.TestCase):
def test_stdoutOutput(self):
"""
recordStdout records output.
"""
btr = BaseTestResult(None, None)
pt = ProtoTest()
o = "some output"
btr.recordStdout(pt, o)
self.assertEqual(btr.stdout_output[pt], o)
def test_stdoutNoOutput(self):
"""
recordStdout ignores empty output sent to it
"""
btr = BaseTestResult(None, None)
pt = ProtoTest()
btr.recordStdout(pt, "")
self.assertEqual(btr.stdout_output, {})
def test_displayStdout(self):
"""
displayStdout displays captured stdout
"""
stream = StringIO()
noise = "blah blah blah"
btr = BaseTestResult(stream, Colors(False))
pt = ProtoTest()
btr.stdout_output[pt] = noise
btr.displayStdout(pt)
self.assertIn(noise, stream.getvalue())
def test_stderrErrput(self):
"""
recordStderr records errput.
"""
btr = BaseTestResult(None, None)
pt = ProtoTest()
o = "some errput"
btr.recordStderr(pt, o)
self.assertEqual(btr.stderr_errput[pt], o)
def test_stderrNoErrput(self):
"""
recordStderr ignores empty errput sent to it
"""
btr = BaseTestResult(None, None)
pt = ProtoTest()
btr.recordStderr(pt, "")
self.assertEqual(btr.stderr_errput, {})
def test_displayStderr(self):
"""
displayStderr displays captured stderr
"""
stream = StringIO()
noise = "blah blah blah"
btr = BaseTestResult(stream, Colors(False))
pt = ProtoTest()
btr.stderr_errput[pt] = noise
btr.displayStderr(pt)
self.assertIn(noise, stream.getvalue())
class TestProtoTestResult(unittest.TestCase):
def test_addSuccess(self):
"""
addSuccess adds a test correctly
"""
ptr = ProtoTestResult()
test = proto_test(MagicMock())
ptr.addSuccess(test)
self.assertEqual(test, ptr.passing[0])
def test_addError(self):
"""
addError adds a test and error correctly
"""
ptr = ProtoTestResult()
test = proto_test(MagicMock())
try:
raise Exception
except:
err = proto_error(sys.exc_info())
ptr.addError(test, err)
self.assertEqual(test, ptr.errors[0][0])
self.assertEqual(err, ptr.errors[0][1])
def test_addFailure(self):
"""
addFailure adds a test and error correctly
"""
ptr = ProtoTestResult()
test = proto_test(MagicMock())
try:
raise Exception
except:
err = proto_error(sys.exc_info())
ptr.addFailure(test, err)
self.assertEqual(test, ptr.failures[0][0])
self.assertEqual(err, ptr.failures[0][1])
def test_addSkip(self):
"""
addSkip adds a test and reason correctly
"""
ptr = ProtoTestResult()
test = proto_test(MagicMock())
reason = "some plausible reason"
ptr.addSkip(test, reason)
self.assertEqual(test, ptr.skipped[0][0])
self.assertEqual(reason, ptr.skipped[0][1])
def test_addExpectedFailure(self):
"""
addExpectedFailure adds a test and error correctly
"""
ptr = ProtoTestResult()
test = proto_test(MagicMock())
try:
raise Exception
except:
err = proto_error(sys.exc_info())
ptr.addExpectedFailure(test, err)
self.assertEqual(test, ptr.expectedFailures[0][0])
self.assertEqual(err, ptr.expectedFailures[0][1])
def test_addUnexpectedSuccess(self):
"""
addUnexpectedSuccess adds a test correctly
"""
ptr = ProtoTestResult()
test = proto_test(MagicMock())
ptr.addUnexpectedSuccess(test)
self.assertEqual(test, ptr.unexpectedSuccesses[0])
@patch("green.result.ProtoTestResult.addError")
@patch("green.result.ProtoTestResult.addFailure")
def test_addSubTest_failure(self, mock_addFailure, mock_addError):
"""
addSubTest calls over to addFailure for failures
"""
ptr = ProtoTestResult()
test = proto_test(MagicMock())
test.failureException = Exception
subtest = MagicMock()
err = [Exception]
ptr.addSubTest(test, subtest, err)
mock_addFailure.assert_called_with(subtest, err)
@patch("green.result.ProtoTestResult.addError")
@patch("green.result.ProtoTestResult.addFailure")
def test_addSubTest_error(self, mock_addFailure, mock_addError):
"""
addSubTest calls over to addError for errors
"""
ptr = ProtoTestResult()
test = proto_test(MagicMock())
test.failureException = KeyError
subtest = MagicMock()
err = [Exception]
ptr.addSubTest(test, subtest, err)
mock_addError.assert_called_with(subtest, err)
class TestProtoError(unittest.TestCase):
def test_str(self):
"""
Running a ProtoError through str() should result in a traceback string
"""
test_str = "noetuaoe"
try:
raise Exception(test_str)
except:
err = sys.exc_info()
pe = proto_error(err)
self.assertIn(test_str, str(pe))
class TestProtoTest(unittest.TestCase):
def test_ProtoTestBlank(self):
"""
ProtoTest can be instantiated empty
"""
pt = ProtoTest()
for i in ["module", "class_name", "docstr_part", "method_name"]:
self.assertEqual("", getattr(pt, i, None))
def test_str(self):
"""
Running a ProtoTest through str() is the same as getting .dotted_name
"""
pt = ProtoTest()
pt.module = "aoeusnth"
self.assertEqual(str(pt), pt.dotted_name)
def test_ProtoTestFromTest(self):
"""
Passing a test into ProtoTest copies out the relevant info.
"""
module = "green.test.test_result"
class_name = "Small"
docstr_part = "stuff"
method_name = "test_method"
class Small(unittest.TestCase):
def test_method(self):
"stuff"
pt = ProtoTest(Small("test_method"))
for i in ["module", "class_name", "docstr_part", "method_name"]:
self.assertEqual(locals()[i], getattr(pt, i, None))
def test_getDescription(self):
"""
getDescription() returns what we expect for all verbose levels
"""
# With a docstring
class Fruit(unittest.TestCase):
def test_stuff(self):
"apple"
pass
t = proto_test(Fruit("test_stuff"))
self.assertEqual(t.getDescription(1), "")
self.assertEqual(t.getDescription(2), "test_stuff")
self.assertEqual(t.getDescription(3), "apple")
self.assertEqual(t.getDescription(4), "apple")
# Without a docstring
class Vegetable(unittest.TestCase):
def test_stuff(self):
pass
t = proto_test(Vegetable("test_stuff"))
self.assertEqual(t.getDescription(1), "")
self.assertEqual(t.getDescription(2), "test_stuff")
self.assertEqual(t.getDescription(3), "test_stuff")
self.assertEqual(t.getDescription(4), "test_stuff")
def test_newlineDocstring(self):
"""
Docstrings starting with a newline are properly handled.
"""
class MyTests(unittest.TestCase):
def test_stuff(self):
"""
tricky
"""
pass
test = proto_test(MyTests("test_stuff"))
self.assertIn("tricky", test.getDescription(3))
def test_multilineDocstring(self):
"""
The description includes all of docstring until the first blank line.
"""
class LongDocs(unittest.TestCase):
def test_long(self):
"""First line is
tricky!
garbage
"""
pass
test = proto_test(LongDocs("test_long"))
self.assertIn("tricky", test.getDescription(3))
self.assertNotIn("garbage", test.getDescription(3))
def test_doctest(self):
"""
If we parse a doctest, we get all the fields we need.
"""
test = """
>>> f()
42
"""
def f():
return 42
parser = doctest.DocTestParser()
dt = parser.get_doctest(test, {"f": f}, "doctest.name", "somefile.py", 20)
dt.__module__ = "somefile"
p = proto_test(doctest.DocTestCase(dt))
# short description
self.assertEqual(p.getDescription(2), "doctest.name")
# long description
description = p.getDescription(3)
self.assertIn("doctest.name", description)
self.assertIn("somefile.py", description)
self.assertIn("20", description)
# dotted name
self.assertEqual(p.dotted_name, "doctest.name")
def test_class_or_module_failure(self):
"""
If we parse an error from a class or module failure, we get the correct result.
"""
p = ProtoTest()
p.is_class_or_module_teardown_error = True
p.name = "the thing"
self.assertEqual(p.getDescription(1), "the thing")
self.assertEqual(p.getDescription(2), "the thing")
self.assertEqual(p.getDescription(3), "the thing")
class TestGreenTestResult(unittest.TestCase):
def setUp(self):
self.args = copy.deepcopy(default_args)
self.stream = StringIO()
def tearDown(self):
del self.stream
del self.args
@patch("green.result.GreenTestResult.printErrors")
def test_stopTestRun(self, mock_printErrors):
"""
We ignore coverage's error about not having anything to cover.
"""
self.args.cov = MagicMock()
self.args.cov.stop = MagicMock(
side_effect=CoverageException("Different Exception")
)
self.args.run_coverage = True
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.startTestRun()
self.assertRaises(CoverageException, gtr.stopTestRun)
self.args.cov.stop = MagicMock(
side_effect=CoverageException("No data to report")
)
def test_tryRecordingStdoutStderr(self):
"""
Recording stdout and stderr works correctly.
"""
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.recordStdout = MagicMock()
gtr.recordStderr = MagicMock()
output = "apple"
test1 = MagicMock()
ptr1 = MagicMock()
ptr1.stdout_output = {test1: output}
ptr1.stderr_errput = {}
errput = "banana"
test2 = MagicMock()
ptr2 = MagicMock()
ptr2.stdout_output = {}
ptr2.stderr_errput = {test2: errput}
gtr.tryRecordingStdoutStderr(test1, ptr1)
gtr.recordStdout.assert_called_with(test1, output)
gtr.tryRecordingStdoutStderr(test2, ptr2)
gtr.recordStderr.assert_called_with(test2, errput)
def test_tryRecordingStdoutStderr_SubTest(self):
"""
Recording stdout and stderr works correctly for failed/errored SubTests.
"""
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.recordStdout = MagicMock()
gtr.recordStderr = MagicMock()
output = "apple"
test1 = MagicMock()
test1.dotted_name = "test 1"
subtest1 = MagicMock()
subtest1.dotted_name = "test 1: the subtest"
subtest1.class_name = "SubTest"
ptr1 = MagicMock()
ptr1.stdout_output = {test1: output}
ptr1.stderr_errput = {}
errput = "banana"
test2 = MagicMock()
test2.dotted_name = "test 2"
subtest2 = MagicMock()
subtest2.dotted_name = "test 2: subtests are annoying"
subtest2.class_name = "SubTest"
ptr2 = MagicMock()
ptr2.stdout_output = {}
ptr2.stderr_errput = {test2: errput}
gtr.tryRecordingStdoutStderr(subtest1, ptr1, err=True)
gtr.recordStdout.assert_called_with(subtest1, output)
gtr.tryRecordingStdoutStderr(subtest2, ptr2, err=True)
gtr.recordStderr.assert_called_with(subtest2, errput)
def test_failfastAddError(self):
"""
addError triggers failfast when it is set
"""
self.args.failfast = True
gtr = GreenTestResult(self.args, GreenStream(self.stream))
self.assertEqual(gtr.failfast, True)
try:
raise Exception
except:
err = sys.exc_info()
self.assertEqual(gtr.shouldStop, False)
gtr.addError(MyProtoTest(), proto_error(err))
self.assertEqual(gtr.shouldStop, True)
def test_failfastAddFailure(self):
"""
addFailure triggers failfast when it is set
"""
self.args.failfast = True
gtr = GreenTestResult(self.args, GreenStream(self.stream))
self.assertEqual(gtr.failfast, True)
try:
raise Exception
except:
err = sys.exc_info()
self.assertEqual(gtr.shouldStop, False)
gtr.addFailure(MyProtoTest(), proto_error(err))
self.assertEqual(gtr.shouldStop, True)
def test_failfastAddUnexpectedSuccess(self):
"""
addUnexpectedSuccess no longer triggers failfast when it is set
"""
self.args.failfast = True
gtr = GreenTestResult(self.args, GreenStream(self.stream))
self.assertEqual(gtr.failfast, True)
self.assertEqual(gtr.shouldStop, False)
gtr.addUnexpectedSuccess(MyProtoTest())
self.assertEqual(gtr.shouldStop, False)
def _outputFromVerboseTest(self):
"""
Start a test with verbose = 2 and get its output.
"""
class FakeCase(unittest.TestCase):
def runTest(self):
pass
self.args.verbose = 2
gtr = GreenTestResult(self.args, GreenStream(self.stream))
tc = FakeCase()
gtr.startTest(tc)
output = self.stream.getvalue()
return output.split("\n")
def test_startTestVerboseTerminal(self):
"""
startTest() contains output we expect in verbose mode on a terminal
"""
self.stream.isatty = lambda: True
output_lines = self._outputFromVerboseTest()
# Output should look like (I'm not putting the termcolor formatting
# here)
# green.test.test_runner
# FakeCase
# test_it
self.assertEqual(len(output_lines), 3)
self.assertNotIn(" ", output_lines[0])
self.assertIn(" ", output_lines[1])
self.assertIn(" ", output_lines[2])
def test_startTestVerbosePipe(self):
"""
startTest() contains output we expect in verbose mode on a pipe
"""
self.stream.isatty = lambda: False
output_lines = self._outputFromVerboseTest()
# Output should look like (I'm not putting the termcolor formatting
# here)
# green.test.test_runner
# FakeCase
# test_it
self.assertEqual(len(output_lines), 3)
self.assertNotIn(" ", output_lines[0])
self.assertIn(" ", output_lines[1])
# No carriage return or extra lines printed
self.assertIn("", output_lines[2])
def test_reportOutcome(self):
"""
_reportOutcome contains output we expect.
"""
self.args.verbose = 1
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr._reportOutcome(None, ".", lambda x: x)
self.assertIn(".", self.stream.getvalue())
@patch("green.result.proto_test")
def test_reportOutcomeCursorUp(self, mock_proto_test):
"""
_reportOutcome moves the cursor up when it needs to.
"""
mockProtoTest = MagicMock()
mockProtoTest.getDescription.return_value = "a description"
mock_proto_test.return_value = mockProtoTest
self.args.verbose = 2
def isatty():
return True
gs = GreenStream(self.stream)
gs.isatty = isatty
gtr = GreenTestResult(self.args, gs)
r = "a fake reason"
t = MagicMock()
t.__str__.return_value = "x" * 1000
gtr._reportOutcome(t, ".", lambda x: x, None, r)
self.assertIn(r, self.stream.getvalue())
self.assertLess(len(self.stream.getvalue()), 2000)
@patch("green.result.proto_test")
def test_reportOutcomeVerbose(self, mock_proto_test):
"""
_reportOutcome contains output we expect in verbose mode.
"""
mockProtoTest = MagicMock()
mockProtoTest.getDescription.return_value = "a description"
mock_proto_test.return_value = mockProtoTest
self.args.verbose = 2
def isatty():
return True
gs = GreenStream(self.stream)
gs.isatty = isatty
gtr = GreenTestResult(self.args, gs)
r = "a fake reason"
t = MagicMock()
t.__str__.return_value = "junk"
gtr._reportOutcome(t, ".", lambda x: x, None, r)
self.assertIn(r, self.stream.getvalue())
def test_printErrorsSkipreport(self):
"""
printErrors() prints the skip report.
"""
self.args.verbose = 1
gtr = GreenTestResult(self.args, GreenStream(self.stream))
pt = MyProtoTest()
reason = "dog ate homework"
gtr.addSkip(pt, reason)
gtr.printErrors()
self.assertIn(reason, self.stream.getvalue())
def test_printErrorsStdout(self):
"""
printErrors() prints out the captured stdout.
"""
self.args.verbose = 1
self.args.termcolor = False
gtr = GreenTestResult(self.args, GreenStream(self.stream))
pt = MyProtoTest()
output = "this is what the test spit out to stdout"
gtr.recordStdout(pt, output)
gtr.addSuccess(pt)
gtr.printErrors()
self.assertIn(output, self.stream.getvalue())
def test_printErrorsStdoutQuietStdoutOnSuccess(self):
"""
printErrors() prints out the captured stdout
except when quiet_stdout is set to True
for successful tests.
"""
self.args.quiet_stdout = True
gtr = GreenTestResult(self.args, GreenStream(self.stream))
pt = MyProtoTest()
output = "this is what the test should not spit out to stdout"
gtr.recordStdout(pt, output)
gtr.addSuccess(pt)
gtr.printErrors()
self.assertNotIn(output, self.stream.getvalue())
def test_printErrorsStdoutQuietStdoutOnError(self):
"""
printErrors() prints out the captured stdout
except when quiet_stdout is set to True
for successful tests, but here we are on a
failing test.
"""
self.args.quiet_stdout = True
try:
raise Exception
except:
err = sys.exc_info()
gtr = GreenTestResult(self.args, GreenStream(self.stream))
pt = MyProtoTest()
output = "this is what the test should spit out to stdout"
gtr.recordStdout(pt, output)
gtr.addError(pt, proto_error(err))
gtr.printErrors()
self.assertIn(output, self.stream.getvalue())
def test_printErrorsStderrQuietStdoutOnSuccess(self):
"""
printErrors() prints out the captured stdout
except when quiet_stdout is set to True
for successful tests.
"""
self.args.quiet_stdout = True
gtr = GreenTestResult(self.args, GreenStream(self.stream))
pt = MyProtoTest()
output = "this is what the test should not spit out to stdout"
gtr.recordStderr(pt, output)
gtr.addSuccess(pt)
gtr.printErrors()
self.assertNotIn(output, self.stream.getvalue())
def test_printErrorsNoTracebacks(self):
"""
printErrors() omits tracebacks for failures and errors when
no_tracebacks is True
"""
self.args.no_tracebacks = True
try:
raise Exception
except:
err = sys.exc_info()
gtr = GreenTestResult(self.args, GreenStream(self.stream))
pt = MyProtoTest()
gtr.addError(pt, proto_error(err))
gtr.printErrors()
self.assertNotIn("Exception", self.stream.getvalue())
def test_printErrorsDots(self):
"""
printErrors() looks correct in verbose=1 (dots) mode.
"""
try:
raise Exception
except:
err = sys.exc_info()
self.args.verbose = 1
self.args.termcolor = False
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.addError(MyProtoTest(), proto_error(err))
gtr.printErrors()
self.assertIn("\n\n", self.stream.getvalue())
self.assertIn("my_module.MyClass.myMethod", self.stream.getvalue())
self.assertIn("test_printErrorsDots", self.stream.getvalue())
self.assertIn("raise Exception", self.stream.getvalue())
self.assertIn("Error", self.stream.getvalue())
def test_printErrorsVerbose2(self):
"""
printErrors() looks correct in verbose=2 mode.
"""
try:
raise Exception
except:
err = sys.exc_info()
self.args.verbose = 2
self.args.termcolor = False
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.addError(MyProtoTest(), proto_error(err))
gtr.printErrors()
self.assertIn("\n\n", self.stream.getvalue())
self.assertIn("my_module.MyClass.myMethod", self.stream.getvalue())
self.assertIn("test_printErrorsVerbose2", self.stream.getvalue())
self.assertIn("raise Exception", self.stream.getvalue())
self.assertIn("Error", self.stream.getvalue())
def test_printErrorsVerbose3(self):
"""
printErrors() looks correct in verbose=3 mode.
"""
try:
raise Exception
except:
err = sys.exc_info()
self.args.verbose = 3
self.args.termcolor = False
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.addError(MyProtoTest(), proto_error(err))
gtr.printErrors()
self.assertIn("\n\n", self.stream.getvalue())
self.assertIn("my_module.MyClass.myMethod", self.stream.getvalue())
self.assertIn("test_printErrorsVerbose3", self.stream.getvalue())
self.assertIn("raise Exception", self.stream.getvalue())
self.assertIn("Error", self.stream.getvalue())
def test_printErrorsVerbose4(self):
"""
printErrors() looks correct in verbose=4 mode.
"""
try:
raise Exception
except:
err = sys.exc_info()
self.args.verbose = 4
self.args.termcolor = False
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.addError(MyProtoTest(), err)
gtr.printErrors()
self.assertIn("\n\n", self.stream.getvalue())
self.assertIn("(most recent call last)", self.stream.getvalue())
self.assertIn("my_module.MyClass.myMethod", self.stream.getvalue())
self.assertIn("test_printErrorsVerbose4", self.stream.getvalue())
self.assertIn("raise Exception", self.stream.getvalue())
self.assertIn("Error", self.stream.getvalue())
def test_addProtoTestResult(self):
"""
addProtoTestResult adds the correct things to the correct places.
"""
ptr = ProtoTestResult()
err_t = proto_test(MagicMock())
try:
raise Exception
except:
err_e = proto_error(sys.exc_info())
ptr.addError(err_t, err_e)
ef_t = proto_test(MagicMock())
try:
raise Exception
except:
ef_e = proto_error(sys.exc_info())
ptr.addExpectedFailure(ef_t, ef_e)
fail_t = proto_test(MagicMock())
try:
raise Exception
except:
fail_e = proto_error(sys.exc_info())
ptr.addFailure(fail_t, fail_e)
pass_t = proto_test(MagicMock())
ptr.addSuccess(pass_t)
skip_t = proto_test(MagicMock())
skip_r = proto_test(MagicMock())
ptr.addSkip(skip_t, skip_r)
us_t = proto_test(MagicMock())
ptr.addUnexpectedSuccess(us_t)
self.args.verbose = 0
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.addProtoTestResult(ptr)
self.assertEqual(gtr.errors, [(err_t, err_e)])
self.assertEqual(gtr.expectedFailures, [(ef_t, ef_e)])
self.assertEqual(gtr.failures, [(fail_t, fail_e)])
self.assertEqual(gtr.passing, [pass_t])
self.assertEqual(gtr.skipped, [(skip_t, skip_r)])
self.assertEqual(gtr.unexpectedSuccesses, [us_t])
def test_stopTestRun_processes_message(self):
"""
StopTestRun adds number of processes used to summary
"""
self.args.processes = 4
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.startTestRun()
gtr.stopTestRun()
self.assertIn("using 4 processes\n", self.stream.getvalue())
def test_stopTestRun_singular_process_message(self):
"""
StopTestRun adds correct summary when one process is used
"""
self.args.processes = 1
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.startTestRun()
gtr.stopTestRun()
self.assertIn("using 1 process\n", self.stream.getvalue())
class TestGreenTestResultAdds(unittest.TestCase):
def setUp(self):
self.stream = StringIO()
self.args = copy.deepcopy(default_args)
self.args.verbose = 0
self.gtr = GreenTestResult(self.args, GreenStream(self.stream))
self.gtr._reportOutcome = MagicMock()
def tearDown(self):
del self.stream
del self.gtr
def test_addSuccess(self):
"""
addSuccess() makes the correct calls to other functions.
"""
test = MagicMock()
test.shortDescription.return_value = "a"
test.__str__.return_value = "b"
test = proto_test(test)
self.gtr.addSuccess(test)
self.gtr._reportOutcome.assert_called_with(test, ".", self.gtr.colors.passing)
def test_addSuccess_with_test_time(self):
"""
addSuccess() sets test time to correct value
"""
test = MagicMock()
test.shortDescription.return_value = "a"
test.__str__.return_value = "b"
test = proto_test(test)
self.gtr.addSuccess(test, "0.42")
self.assertEqual(test.test_time, "0.42")
def test_addError(self):
"""
addError() makes the correct calls to other functions.
"""
try:
raise Exception
except:
err = sys.exc_info()
test = proto_test(MagicMock())
err = proto_error(err)
self.gtr.addError(test, err)
self.gtr._reportOutcome.assert_called_with(
test, "E", self.gtr.colors.error, err
)
def test_addError_with_test_time(self):
"""
addError() sets test time to correct value
"""
try:
raise Exception
except:
err = sys.exc_info()
test = proto_test(MagicMock())
err = proto_error(err)
self.gtr.addError(test, err, "0.42")
self.assertEqual(test.test_time, "0.42")
def test_addFailure(self):
"""
addFailure() makes the correct calls to other functions.
"""
err = None
try:
raise Exception
except:
err = sys.exc_info()
test = proto_test(MagicMock())
err = proto_error(err)
self.gtr.addFailure(test, err)
self.gtr._reportOutcome.assert_called_with(
test, "F", self.gtr.colors.failing, err
)
def test_addFailure_with_test_time(self):
"""
addFailure() makes test time the correct value
"""
err = None
try:
raise Exception
except:
err = sys.exc_info()
test = proto_test(MagicMock())
err = proto_error(err)
self.gtr.addFailure(test, err, "0.42")
self.assertEqual(test.test_time, "0.42")
def test_addFailureTwistedSkip(self):
"""
Twisted's practice of calling addFailure() with their skips is detected
and redirected to addSkip()
"""
err = None
try:
raise Exception
except:
err = sys.exc_info()
test = proto_test(MagicMock())
reason = "Twisted is odd"
err = proto_error(err)
err.traceback_lines = ["UnsupportedTrialFeature: ('skip', '{}')".format(reason)]
self.gtr.addFailure(test, err)
self.gtr._reportOutcome.assert_called_with(
test, "s", self.gtr.colors.skipped, reason=reason
)
def test_addSkip(self):
"""
addSkip() makes the correct calls to other functions.
"""
test = proto_test(MagicMock())
reason = "skip reason"
self.gtr.addSkip(test, reason)
self.gtr._reportOutcome.assert_called_with(
test, "s", self.gtr.colors.skipped, reason=reason
)
def test_addSkip_with_test_time(self):
"""
addSkip() makes test time the correct value
"""
test = proto_test(MagicMock())
reason = "skip reason"
self.gtr.addSkip(test, reason, "0.42")
self.assertEqual(test.test_time, "0.42")
def test_addExpectedFailure(self):
"""
addExpectedFailure() makes the correct calls to other functions.
"""
try:
raise Exception
except:
err = sys.exc_info()
test = proto_test(MagicMock())
err = proto_error(err)
self.gtr.addExpectedFailure(test, err)
self.gtr._reportOutcome.assert_called_with(
test, "x", self.gtr.colors.expectedFailure, err
)
def test_addExcepectedFailure_with_test_time(self):
"""
addExpectedFailure() makes test time correct value
"""
try:
raise Exception
except:
err = sys.exc_info()
test = proto_test(MagicMock())
err = proto_error(err)
self.gtr.addExpectedFailure(test, err, "0.42")
self.assertEqual(test.test_time, "0.42")
def test_addUnexpectedSuccess(self):
"""
addUnexpectedSuccess() makes the correct calls to other functions.
"""
test = proto_test(MagicMock())
self.gtr.addUnexpectedSuccess(test)
self.gtr._reportOutcome.assert_called_with(
test, "u", self.gtr.colors.unexpectedSuccess
)
def test_addUnexpectedSuccess_with_test_time(self):
"""
addUnexpectedSuccess() makes test time with correct value
"""
test = proto_test(MagicMock())
self.gtr.addUnexpectedSuccess(test, "0.42")
self.assertEqual(test.test_time, "0.42")
def test_wasSuccessful(self):
"""
wasSuccessful returns what we expect.
"""
self.args.verbose = 1
gtr = GreenTestResult(self.args, GreenStream(self.stream))
self.assertEqual(gtr.wasSuccessful(), False)
gtr.passing.append("anything")
self.assertEqual(gtr.wasSuccessful(), True)
gtr.all_errors.append("anything")
self.assertEqual(gtr.wasSuccessful(), False)
def test_wasSuccessful_expectedFailures(self):
"""
wasSuccessful returns what we expect when we only have expectedFailures
"""
self.args.verbose = 1
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.expectedFailures.append("anything")
self.assertEqual(gtr.wasSuccessful(), True)
def test_wasSuccessful_passing(self):
"""
wasSuccessful returns what we expect when we only have passing tests
"""
self.args.verbose = 1
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.passing.append("anything")
self.assertEqual(gtr.wasSuccessful(), True)
def test_wasSuccessful_skipped(self):
"""
wasSuccessful returns what we expect when we only have skipped tests
"""
self.args.verbose = 1
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.skipped.append("anything")
self.assertEqual(gtr.wasSuccessful(), True)
def test_wasSuccessful_unexpectedSuccesses(self):
"""
wasSuccessful returns what we expect when we only have unexpectedSuccesses
"""
self.args.verbose = 1
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.unexpectedSuccesses.append("anything")
self.assertEqual(gtr.wasSuccessful(), False)
def test_wasSuccessful_coverageFails(self):
"""
wasSuccessful fails if minimum coverage is not met
"""
self.args.minimum_coverage = 50
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.coverage_percent = 49
self.assertEqual(gtr.wasSuccessful(), False)
def test_wasSuccessful_coverageSucceeds(self):
"""
wasSuccessful succeds if minimum coverage is met
"""
self.args.minimum_coverage = 50
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.passing.append("anything")
gtr.coverage_percent = 60
self.assertEqual(gtr.wasSuccessful(), True)
class TestGreenTestRunCoverage(unittest.TestCase):
def setUp(self):
self.args = copy.deepcopy(default_args)
cov_file = tempfile.NamedTemporaryFile(delete=False)
cov_file.close()
self.args.cov = coverage(
data_file=cov_file.name,
omit=self.args.omit_patterns,
include=self.args.include_patterns,
)
self.args.cov.start()
self.stream = StringIO()
def tearDown(self):
del self.stream
del self.args
def _outputFromTest(self, args):
class FakeCase(unittest.TestCase):
def runTest(self):
pass
gtr = GreenTestResult(args, GreenStream(self.stream))
gtr.startTestRun()
gtr.startTest(FakeCase())
gtr.stopTestRun()
output = self.stream.getvalue()
return output.split("\n")
def test_coverage(self):
self.args.run_coverage = True
output = self._outputFromTest(self.args)
self.assertIn("Stmts Miss Cover Missing", "\n".join(output))
def test_quiet_coverage(self):
self.args.run_coverage = True
self.args.quiet_coverage = True
output = self._outputFromTest(self.args)
self.assertNotIn("Stmts Miss Cover Missing", "\n".join(output))
|
doc/integrations/cortx-s3-slack-bot/upload_file_to_s3.py
|
novium258/cortx-1
| 552 |
91347
|
<filename>doc/integrations/cortx-s3-slack-bot/upload_file_to_s3.py
import requests
import os
from botocore.exceptions import ClientError
from elasticsearch_connector import ElasticsearchConnector
async def upload_file_to_s3(s3_client, es_client: ElasticsearchConnector, file_data, token):
""" Gets a file from slack and uploads it to a Cortx S3 bucket
Parameters
----------
s3_client : botocore.client.S3
A low-level client representing Cortx Simple Storage Service (S3)
es_client : elasticsearch_connector.Elasticsearch
Elasticsearch low-level client. Provides a straightforward mapping from Python to ES REST endpoints.
file_data : dict
File data from slack
token: str
A user access token from slack
Returns
----------
bool: Returns true if the file was uploaded to S3
"""
# search for existing file on elastic search
try:
file_id = file_data['id']
file_name = file_data['name']
created = file_data['created']
timestamp = file_data['timestamp']
mimetype = file_data['mimetype']
filetype = file_data['filetype']
user_id = file_data['user']
size = file_data['size']
url = file_data['url_private']
file_path = os.path.join(os.getcwd(), 'uploads', file_name)
print("Saving to", file_name)
headers = {'Authorization': 'Bearer ' + token}
r = requests.get(url, headers=headers)
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
if os.path.exists(file_path):
response = s3_client.upload_file(
file_path, 'testbucket', file_name)
es_client.create_doc(file_id=file_id, file_name=file_name, created=created,
timestamp=timestamp, mimetype=mimetype, filetype=filetype, user_id=user_id, size=size)
if os.path.exists(file_path):
os.remove(path=file_path)
print("File uploaded to S3 with key {}".format(file_name))
# print(response)
except ClientError as e:
print("Couldn't upload to s3")
print(e)
return False
except Exception as e:
print(e)
return False
return True
|
applications/popart/transformer_transducer/training/transducer_builder.py
|
payoto/graphcore_examples
| 260 |
91351
|
<reponame>payoto/graphcore_examples<filename>applications/popart/transformer_transducer/training/transducer_builder.py
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
import numpy as np
import logging_util
import transducer_blocks
# set up logging
logger = logging_util.get_basic_logger(__name__)
class TranscriptionNetwork(transducer_blocks.Block):
""" Transcription Network (or Audio Encoding network) of the Transformer-Transducer model.
:param popart builder: popart builder object
:param int in_feats: input dimension
:param int subsampling_factor: sub-sampling factor for the initial convolutional subsampling layer
:param int enc_n_hid: encoder hidden dimension
:param int num_encoder_layers: the number of transformer layers for the transcription encoding network
:param int encoder_dim: dimension of the transformer layers
:param int num_attention_heads: number of attention heads
:param float enc_dropout: dropout rate for encoder net
:param int kernel_size: kernel size of the initial convolutional subsampling layer
"""
def __init__(
self,
builder,
in_feats,
subsampling_factor,
num_encoder_layers,
encoder_dim,
num_attention_heads,
enc_dropout,
kernel_size=32,
dtype=np.float32):
super(TranscriptionNetwork, self).__init__(builder, dtype, block_name="transcription_network")
self.encoder_dim = encoder_dim
self.subsampling_factor = subsampling_factor
self.conv_subsampler = transducer_blocks.ConvolutionSubSampler(builder,
in_feats,
encoder_dim,
kernel_size,
subsampling_factor,
dtype=dtype,
block_name="transcription_net_convolution_subsampler")
self.transformer_blocks = [transducer_blocks.TransformerBlock(builder,
num_attention_heads,
encoder_dim,
dtype=dtype,
block_name="transcription_net_transformer_block_{}".format(
layer_ind),
dropout_rate=enc_dropout)
for layer_ind in range(num_encoder_layers)]
self.child_blocks = [self.conv_subsampler] + self.transformer_blocks
def __call__(self, x_in, x_lens):
return self.__build_graph(x_in, x_lens)
def __build_graph(self, x_in, x_lens):
# input shape to transcription-network must be [batch_size, channel_dim, seq_length]
builder = self.builder
logger.info("Shape of Transcription-Network Input: {}".format(builder.getTensorShape(x_in)))
with self.builder.virtualGraph(0):
x = x_in
x = self.conv_subsampler(x)
# scale x_lens as well after subsampling
x_lens = self.builder.aiOnnx.div([x_lens,
self.builder.aiOnnx.constant(np.array([self.subsampling_factor]).astype('int32'))])
builder.recomputeOutputInBackwardPass(x)
# add positional encoding
seq_length = self.builder.getTensorShape(x)[2]
positional_encodings = self.get_constant(sinusoidal_position_encoding(seq_length,
self.encoder_dim))
x = self.builder.aiOnnx.add([x, positional_encodings])
builder.recomputeOutputInBackwardPass(x)
for layer_ind, transformer_block in enumerate(self.transformer_blocks):
x = transformer_block(x, force_recompute=True)
# transpose to shape [batch_size, seq_length, channel_dim]
x = builder.aiOnnx.transpose([x], perm=[0, 2, 1])
return x, x_lens
class PredictionNetwork(transducer_blocks.Block):
""" Prediction Network of the Transducer model.
:param popart builder: popart builder object
:param int num_symbols: number of symbols to embed
:param int pred_n_hid: hidden dimension for LSTM layers of prediction network
:param int pred_rnn_layers: number of LSTM layers of prediction network
:param float pred_dropout: dropout rate for prediction net
:param float forget_gate_bias: value to initialize the forget gate bias values to
:param float weights_init_scale: scaling factor for initial weights and biases of LSTM layers
"""
def __init__(
self,
builder,
num_symbols,
pred_n_hid,
pred_rnn_layers,
pred_dropout,
forget_gate_bias,
weights_init_scale,
dtype=np.float32):
super(PredictionNetwork, self).__init__(builder, dtype, block_name="prediction_network")
self.num_symbols = num_symbols
self.pred_n_hid = pred_n_hid
self.pred_rnn_layers = pred_rnn_layers
self.pred_dropout = pred_dropout
self.embedding_layer = transducer_blocks.EmbeddingBlock(builder,
num_symbols,
pred_n_hid,
dtype=dtype,
block_name="prediction_net_embedding")
self.prediction_rnn_layers = [transducer_blocks.LSTM(builder,
pred_n_hid,
pred_n_hid,
dtype=dtype,
block_name="prediction_net_rnn_{}".format(pred_rnn_ind),
forget_gate_bias=forget_gate_bias,
weights_init_scale=weights_init_scale)
for pred_rnn_ind in range(pred_rnn_layers)]
self.child_blocks = [self.embedding_layer] + self.prediction_rnn_layers
def __call__(self, x_in):
return self.__build_graph(x_in)
def __build_graph(self, x_in):
# input shape to this layer is assumed to be [batch_size, target_sequence_length]
builder = self.builder
logger.info("Shape of Prediction-Network Input: {}".format(builder.getTensorShape(x_in)))
with self.builder.virtualGraph(0):
x = x_in
x = self.embedding_layer(x)
# input shape to lstm layers must be [seq_length, batch_size, channel_dim]
x = builder.aiOnnx.transpose([x], perm=[1, 0, 2])
# prepend blank symbol (zero-vector) to beginning of sequence
blank_shape = builder.getTensorShape(x)
blank_shape[0] = 1
blank_prepend = self.get_constant(np.zeros(blank_shape))
x = builder.aiOnnx.concat([blank_prepend, x], axis=0)
for layer_ind, lstm_layer in enumerate(self.prediction_rnn_layers):
x = lstm_layer(x, force_recompute=True)
x = self.apply_dropout(x, self.pred_dropout)
logger.info("Shape after Pred-RNN layer {}: {}".format(layer_ind, builder.getTensorShape(x)))
# transposing back to shape [batch_size, seq_length, channel_dim]
x = builder.aiOnnx.transpose([x], perm=[1, 0, 2])
return x
class JointNetwork(transducer_blocks.Block):
""" Joint Network of the Transducer model.
:param popart builder: popart builder object
:param int enc_n_hid: encoder hidden dimension
:param int pred_n_hid: hidden dimension for LSTM layers of prediction network
:param int joint_n_hid: hidden dimension of Joint Network
:param int num_symbols: number of symbols to embed
:param float joint_dropout: dropout rate for joint net
"""
def __init__(
self,
builder,
transcription_out_len,
enc_n_hid,
pred_n_hid,
joint_n_hid,
num_symbols,
joint_dropout,
dtype=np.float32,
transcription_out_split_size=15,
shift_labels_by_one=True):
super(JointNetwork, self).__init__(builder, dtype, block_name="joint_network")
self.enc_n_hid = enc_n_hid
self.pred_n_hid = pred_n_hid
self.joint_n_hid = joint_n_hid
self.num_symbols = num_symbols
self.joint_dropout = joint_dropout
logger.warn("For best training performance it is recommended that "
"transcription output split size({}) be a divisor of "
"transcription output length({}).".format(transcription_out_split_size, transcription_out_len))
self.joint_transcription_fc = transducer_blocks.RHSLinear(builder,
enc_n_hid,
joint_n_hid,
dtype=dtype,
block_name="joint_net_transcription_fc")
self.joint_prediction_fc = transducer_blocks.RHSLinear(builder,
pred_n_hid,
joint_n_hid,
dtype=dtype,
block_name="joint_net_prediction_fc")
self.transcription_splitter = transducer_blocks.Split(builder,
total_size=transcription_out_len,
split_size=transcription_out_split_size,
split_axis=1,
dtype=dtype,
block_name="joint_net_transcription_splitter")
self.joint_out_fc = transducer_blocks.RHSLinear(builder,
joint_n_hid,
num_symbols,
dtype=dtype,
block_name='joint_net_out_fc')
self.child_blocks = [self.joint_transcription_fc, self.joint_prediction_fc,
self.transcription_splitter, self.joint_out_fc]
self.shift_labels_by_one = shift_labels_by_one
def __call__(self, transcription_out, transcription_lens, prediction_out, targets, target_lens):
return self.__build_graph(transcription_out, transcription_lens, prediction_out, targets, target_lens)
def get_log_probs(self, transcription_out_split, prediction_out, targets, target_lens):
builder = self.builder
joint_out_split = builder.aiOnnx.add([transcription_out_split, prediction_out])
builder.recomputeOutputInBackwardPass(joint_out_split)
joint_out_split = builder.aiOnnx.relu([joint_out_split])
builder.recomputeOutputInBackwardPass(joint_out_split)
joint_out_split = self.apply_dropout(joint_out_split, self.joint_dropout)
builder.recomputeOutputInBackwardPass(joint_out_split)
joint_out_split = self.joint_out_fc(joint_out_split, force_recompute=True)
# This flag means we need to offset labels by + 1 when passing to RNN-T Loss
# The reason for offset is that we treat logits "A" dimension as [<blank>, valid characters... A-1]
# Thus, blank-symbol has idx 0 and real symbols must have indices [1:A-1]
# RNN-T Loss uses labels as indices of logits (in A dimension)
# The opposite logic must be applied when logits are used for decoder - see transducer_decoder.py
if self.shift_labels_by_one:
one = self.builder.aiOnnx.constant(np.array([1]).astype(np.int32))
targets = self.builder.aiOnnx.add([targets, one])
compact_log_probs = builder.customOp(
opName="SparseLogSoftmax",
opVersion=1,
domain="com.acme",
inputs=[joint_out_split, targets, target_lens],
attributes={},
numOutputs=1,
)
return compact_log_probs[0]
def __build_graph(self, transcription_out, transcription_lens, prediction_out, targets, target_lens):
builder = self.builder
logger.info("Shapes of Joint-Network Inputs: {}, {}".format(builder.getTensorShape(transcription_out),
builder.getTensorShape(prediction_out)))
with self.builder.virtualGraph(0):
transcription_out = self.joint_transcription_fc(transcription_out, force_recompute=True)
prediction_out = self.joint_prediction_fc(prediction_out, force_recompute=True)
transcription_out = self.builder.aiOnnx.unsqueeze([transcription_out], axes=[2])
prediction_out = self.builder.aiOnnx.unsqueeze([prediction_out], axes=[1])
transcription_out_splits = self.transcription_splitter(transcription_out)
log_probs_compact_splits = []
for split_ind, transcription_out_split in enumerate(transcription_out_splits):
logger.info("Building compact log probs for split {}".format(split_ind))
with self.builder.virtualGraph(0):
log_probs_compact = self.get_log_probs(transcription_out_split, prediction_out,
targets, target_lens)
log_probs_compact_splits.append(log_probs_compact)
with self.builder.virtualGraph(0):
# stack all compacted logprobs
log_probs_compact_out = builder.aiOnnx.concat(log_probs_compact_splits, axis=1)
# logger.info("Shape of Joint-Network output: {}".format(builder.getTensorShape(log_probs_compact_out)))
return log_probs_compact_out
class RNNTLoss(transducer_blocks.Block):
""" Returns RNN-T loss value for given inputs """
def __init__(self, builder, dtype):
super(RNNTLoss, self).__init__(builder, dtype, block_name="RNNTLoss")
def __call__(self, joint_out, input_length, target_length):
return self.__build_graph(joint_out, input_length, target_length)
def __build_graph(self, joint_out, input_length, target_length):
with self.namescope("RNNTLoss"):
builder = self.builder
with builder.virtualGraph(0):
rnnt_outputs = builder.customOp(
opName="RNNTLoss",
opVersion=1,
domain="com.acme",
inputs=[joint_out, input_length, target_length],
attributes={},
numOutputs=4,
)
neg_log_likelihood = rnnt_outputs[0]
return neg_log_likelihood
class JointNetwork_wRNNTLoss(transducer_blocks.Block):
""" Joint Network of the RNN-T model followed by RNN-Transducer loss.
:param popart builder: popart builder object
:param int transcription_out_len: sequence length of the transcription net output
:param int enc_n_hid: encoder hidden dimension
:param int pred_n_hid: hidden dimension for LSTM layers of prediction network
:param int joint_n_hid: hidden dimension of Joint Network
:param int num_symbols: number of symbols to embed
:param float joint_dropout: dropout rate for joint net
"""
def __init__(
self,
builder,
transcription_out_len,
enc_n_hid,
pred_n_hid,
joint_n_hid,
num_symbols,
joint_dropout,
dtype=np.float32,
transcription_out_split_size=15,
do_batch_serialization=False,
samples_per_device=2,
batch_split_size=1,
shift_labels_by_one=True):
super(JointNetwork_wRNNTLoss, self).__init__(builder, dtype, block_name="joint_network_w_rnnt_loss")
self.joint_network = JointNetwork(builder,
transcription_out_len,
enc_n_hid,
pred_n_hid,
joint_n_hid,
num_symbols,
joint_dropout,
dtype=dtype,
transcription_out_split_size=transcription_out_split_size,
shift_labels_by_one=shift_labels_by_one)
self.rnnt_loss = RNNTLoss(builder, dtype=dtype)
self.do_batch_serialization = do_batch_serialization
self.samples_per_device = samples_per_device
self.batch_split_size = batch_split_size
self.child_blocks = [self.joint_network, self.rnnt_loss]
if do_batch_serialization:
self.batch_splitter = transducer_blocks.Split(builder, total_size=samples_per_device,
split_size=batch_split_size, split_axis=0,
dtype=dtype, block_name="batch_splitter")
self.child_blocks.append(self.batch_splitter)
def __call__(self, transcription_out, transcription_lens, prediction_out, targets, target_lens):
return self.__build_graph(transcription_out, transcription_lens, prediction_out, targets, target_lens)
def __build_graph(self, transcription_out, transcription_lens, prediction_out, targets, target_lens):
builder = self.builder
if not self.do_batch_serialization:
joint_out = self.joint_network(transcription_out, transcription_lens, prediction_out,
targets, target_lens)
neg_log_likelihood = self.rnnt_loss(joint_out, transcription_lens, target_lens)
return neg_log_likelihood
else:
logger.info("Doing Batch-Serialization for JointNet")
tout_splits = self.batch_splitter(transcription_out)
flen_splits = self.batch_splitter(transcription_lens)
pout_splits = self.batch_splitter(prediction_out)
tin_splits = self.batch_splitter(targets)
tlen_splits = self.batch_splitter(target_lens)
losses = []
for tout, flen, pout, tin, tlen in zip(tout_splits, flen_splits, pout_splits, tin_splits, tlen_splits):
joint_out = self.joint_network(tout, flen, pout, tin, tlen)
losses.append(self.rnnt_loss(joint_out, flen, tlen))
reduced_neg_log_likelihood = losses[0]
for loss in losses[1:]:
reduced_neg_log_likelihood = builder.aiOnnx.add([reduced_neg_log_likelihood, loss])
reduced_neg_log_likelihood = builder.aiOnnx.div([reduced_neg_log_likelihood,
builder.aiOnnx.constant(
np.array([len(losses)]).astype(np.float32))])
return reduced_neg_log_likelihood
def sinusoidal_position_encoding(num_positions, num_channels, position_rate=1.0, position_weight=1.0):
""" Returns a sinusoidal position encoding table """
position_encoding = np.array([
[position_rate * pos / np.power(10000, 2 * (i // 2) / num_channels) for i in range(num_channels)]
if pos != 0 else np.zeros(num_channels) for pos in range(num_positions)])
position_encoding[:, 0::2] = np.sin(position_encoding[:, 0::2]) # even i
position_encoding[:, 1::2] = np.cos(position_encoding[:, 1::2]) # odd i
return position_weight * position_encoding.T
|
datasets/setup_cambridge.py
|
tannerliu347/dsacstar
| 104 |
91368
|
<gh_stars>100-1000
import os
import math
import numpy as np
import cv2 as cv
import torch
from skimage import io
# setup individual scene IDs and their download location
scenes = [
'https://www.repository.cam.ac.uk/bitstream/handle/1810/251342/KingsCollege.zip',
'https://www.repository.cam.ac.uk/bitstream/handle/1810/251340/OldHospital.zip',
'https://www.repository.cam.ac.uk/bitstream/handle/1810/251336/ShopFacade.zip',
'https://www.repository.cam.ac.uk/bitstream/handle/1810/251294/StMarysChurch.zip',
'https://www.repository.cam.ac.uk/bitstream/handle/1810/251291/GreatCourt.zip',
]
target_height = 480 # rescale images
nn_subsampling = 8 # sub sampling of our CNN architecture, for size of the initalization targets
def mkdir(directory):
"""Checks whether the directory exists and creates it if necessacy."""
if not os.path.exists(directory):
os.makedirs(directory)
for scene in scenes:
scene_file = scene.split('/')[-1]
scene_name = scene_file[:-4]
print("===== Processing " + scene_name + " ===================")
print("Downloading and unzipping data...")
os.system('wget ' + scene)
os.system('unzip ' + scene_file)
os.system('rm ' + scene_file)
os.system('mv ' + scene_name + ' Cambridge_' + scene_name)
os.chdir('Cambridge_' + scene_name)
modes = ['train', 'test']
input_file = 'reconstruction.nvm'
print("Loading SfM reconstruction...")
f = open(input_file)
reconstruction = f.readlines()
f.close()
num_cams = int(reconstruction[2])
num_pts = int(reconstruction[num_cams + 4])
# read points
pts_dict = {}
for cam_idx in range(0, num_cams):
pts_dict[cam_idx] = []
pt = pts_start = num_cams + 5
pts_end = pts_start + num_pts
while pt < pts_end:
pt_list = reconstruction[pt].split()
pt_3D = [float(x) for x in pt_list[0:3]]
pt_3D.append(1.0)
for pt_view in range(0, int(pt_list[6])):
cam_view = int(pt_list[7 + pt_view * 4])
pts_dict[cam_view].append(pt_3D)
pt += 1
print("Reconstruction contains %d cameras and %d 3D points." % (num_cams, num_pts))
for mode in modes:
print("Converting " + mode + " data...")
img_output_folder = mode + '/rgb/'
cal_output_folder = mode + '/calibration/'
pose_output_folder = mode + '/poses/'
target_output_folder = mode + '/init/'
mkdir(img_output_folder)
mkdir(cal_output_folder)
mkdir(pose_output_folder)
mkdir(target_output_folder)
# get list of images for current mode (train vs. test)
image_list = 'dataset_'+mode+'.txt'
f = open(image_list)
camera_list = f.readlines()
f.close()
camera_list = camera_list[3:]
image_list = [camera.split()[0] for camera in camera_list]
for cam_idx in range(num_cams):
print("Processing camera %d of %d." % (cam_idx, num_cams))
image_file = reconstruction[3 + cam_idx].split()[0]
image_file = image_file[:-3] + 'png'
if image_file not in image_list:
print("Skipping image " + image_file + ". Not part of set: " + mode + ".")
continue
image_idx = image_list.index(image_file)
# read camera
camera = camera_list[image_idx].split()
cam_rot = [float(r) for r in camera[4:]]
#quaternion to axis-angle
angle = 2 * math.acos(cam_rot[0])
x = cam_rot[1] / math.sqrt(1 - cam_rot[0]**2)
y = cam_rot[2] / math.sqrt(1 - cam_rot[0]**2)
z = cam_rot[3] / math.sqrt(1 - cam_rot[0]**2)
cam_rot = [x * angle, y * angle, z * angle]
cam_rot = np.asarray(cam_rot)
cam_rot, _ = cv.Rodrigues(cam_rot)
cam_trans = [float(r) for r in camera[1:4]]
cam_trans = np.asarray([cam_trans])
cam_trans = np.transpose(cam_trans)
cam_trans = - np.matmul(cam_rot, cam_trans)
if np.absolute(cam_trans).max() > 10000:
print("Skipping image " + image_file + ". Extremely large translation. Outlier?")
print(cam_trans)
continue
cam_pose = np.concatenate((cam_rot, cam_trans), axis = 1)
cam_pose = np.concatenate((cam_pose, [[0, 0, 0, 1]]), axis = 0)
cam_pose = torch.tensor(cam_pose).float()
focal_length = float(reconstruction[3 + cam_idx].split()[1])
#load image
image = io.imread(image_file)
image_file = image_file.replace('/', '_')
#load 3D points from reconstruction
pts_3D = torch.tensor(pts_dict[cam_idx])
img_aspect = image.shape[0] / image.shape[1]
if img_aspect > 1:
#portrait
img_w = target_height
img_h = int(math.ceil(target_height * img_aspect))
else:
#landscape
img_w = int(math.ceil(target_height / img_aspect))
img_h = target_height
out_w = int(math.ceil(img_w / nn_subsampling))
out_h = int(math.ceil(img_h / nn_subsampling))
out_scale = out_w / image.shape[1]
img_scale = img_w / image.shape[1]
out_tensor = torch.zeros((3, out_h, out_w))
out_zbuffer = torch.zeros((out_h, out_w))
image = cv.resize(image, (img_w, img_h))
io.imsave(img_output_folder + image_file, image)
with open(cal_output_folder + image_file[:-3] + 'txt', 'w') as f:
f.write(str(focal_length * img_scale))
inv_cam_pose = cam_pose.inverse()
with open(pose_output_folder + image_file[:-3] + 'txt', 'w') as f:
f.write(str(float(inv_cam_pose[0, 0])) + ' ' + str(float(inv_cam_pose[0, 1])) + ' ' + str(float(inv_cam_pose[0, 2])) + ' ' + str(float(inv_cam_pose[0, 3])) + '\n')
f.write(str(float(inv_cam_pose[1, 0])) + ' ' + str(float(inv_cam_pose[1, 1])) + ' ' + str(float(inv_cam_pose[1, 2])) + ' ' + str(float(inv_cam_pose[1, 3])) + '\n')
f.write(str(float(inv_cam_pose[2, 0])) + ' ' + str(float(inv_cam_pose[2, 1])) + ' ' + str(float(inv_cam_pose[2, 2])) + ' ' + str(float(inv_cam_pose[2, 3])) + '\n')
f.write(str(float(inv_cam_pose[3, 0])) + ' ' + str(float(inv_cam_pose[3, 1])) + ' ' + str(float(inv_cam_pose[3, 2])) + ' ' + str(float(inv_cam_pose[3, 3])) + '\n')
fine = 0
conflict = 0
for pt_idx in range(0, pts_3D.size(0)):
scene_pt = pts_3D[pt_idx]
scene_pt = scene_pt.unsqueeze(0)
scene_pt = scene_pt.transpose(0, 1)
# scene to camera coordinates
cam_pt = torch.mm(cam_pose, scene_pt)
# projection to image
img_pt = cam_pt[0:2, 0] * focal_length / cam_pt[2, 0] * out_scale
y = img_pt[1] + out_h / 2
x = img_pt[0] + out_w / 2
x = int(torch.clamp(x, min=0, max=out_tensor.size(2)-1))
y = int(torch.clamp(y, min=0, max=out_tensor.size(1)-1))
if cam_pt[2, 0] > 1000: #filter some outlier points (large depth)
continue
if out_zbuffer[y, x] == 0 or out_zbuffer[y, x] > cam_pt[2, 0]:
out_zbuffer[y, x] = cam_pt[2, 0]
out_tensor[:, y, x] = pts_3D[pt_idx, 0:3]
torch.save(out_tensor, target_output_folder + image_file[:-4] + '.dat')
os.chdir('..')
|
core/python/build.py
|
lf-shaw/kungfu
| 2,209 |
91381
|
<gh_stars>1000+
import os
import sys
import subprocess
import platform
import click
from kungfu.version import get_version
@click.group(invoke_without_command=True)
@click.option('-l', '--log_level', type=click.Choice(['trace', 'debug', 'info', 'warning', 'error', 'critical']),
default='warning', help='logging level')
@click.option('--build_type', type=click.Choice(['Release', 'Debug']), default='Release', help='build type')
@click.option('--arch', type=click.Choice(['x64', 'x86']), default='x64', help='arch')
@click.option('--runtime', type=click.Choice(['electron', 'node']), default='electron', help='Node.js runtime')
@click.option('--node_version', type=str, default='10.0.0', help='Node.js runtime version')
@click.option('--electron_version', type=str, default='5.0.0', help='Electron runtime version')
@click.pass_context
def build(ctx, log_level, build_type, arch, runtime, node_version, electron_version):
ctx.log_level = log_level
ctx.build_type = build_type
ctx.arch = arch
ctx.runtime = runtime
ctx.runtime_version = node_version if runtime == 'node' else electron_version
if ctx.invoked_subcommand is None:
click.echo(build.get_help(ctx))
pass
@build.command()
@click.pass_context
def configure(ctx):
cmake_configure = build_cmake_js_cmd(ctx, 'configure')
sys.exit(subprocess.Popen(cmake_configure).wait())
@build.command()
@click.pass_context
def make(ctx):
cmake_build = build_cmake_js_cmd(ctx, 'build')
sys.exit(subprocess.Popen(cmake_build).wait())
@build.command()
@click.pass_context
def freeze(ctx):
os.environ['CMAKE_BUILD_TYPE'] = ctx.parent.build_type
with open(os.path.join(os.getcwd(), "build", ctx.parent.build_type, "version.info"), 'w') as version_file:
version_file.write(f"{get_version()}")
osname = platform.system()
if osname == 'Linux':
sys.exit(subprocess.Popen(['pyinstaller', '--clean', '-y', '--distpath=build', 'python/kfc-unix.spec']).wait())
if osname == 'Darwin':
rc = subprocess.Popen(['pyinstaller', '--clean', '-y', '--distpath=build', 'python/kfc-unix.spec']).wait()
os.chdir('build/kfc')
if os.path.exists('.Python'):
os.rename('.Python', 'Python')
os.symlink('Python', '.Python')
sys.exit(rc)
if osname == 'Windows':
sys.exit(subprocess.Popen(['pyinstaller', '--clean', '-y', r'--distpath=build', r'python\kfc-win.spec']).wait())
def find(tool):
tool_path = tool
if platform.system() == 'Windows':
for line in subprocess.Popen(['where', tool], stdout=subprocess.PIPE).stdout.readlines():
path = line.decode('utf8').strip()
if path.endswith('.cmd'):
tool_path = path
return tool_path
def build_cmake_js_cmd(ctx, cmd):
python_path = subprocess.Popen(["pipenv", "--py"], stdout=subprocess.PIPE).stdout.read().decode().strip()
spdlog_levels = {
'trace': 'SPDLOG_LEVEL_TRACE',
'debug': 'SPDLOG_LEVEL_DEBUG',
'info': 'SPDLOG_LEVEL_INFO',
'warning': 'SPDLOG_LEVEL_WARN',
'error': 'SPDLOG_LEVEL_ERROR',
'critical': 'SPDLOG_LEVEL_CRITICAL'
}
loglevel = spdlog_levels[ctx.parent.log_level]
cmake_js_cmd = [find('yarn'), 'cmake-js', '--debug' if ctx.parent.build_type == 'Debug' else '',
'--arch', ctx.parent.arch,
'--runtime', ctx.parent.runtime,
'--runtime-version', ctx.parent.runtime_version,
'--CDPYTHON_EXECUTABLE=' + python_path,
'--CDSPDLOG_LOG_LEVEL_COMPILE=' + loglevel]
if platform.system() == 'Windows':
return cmake_js_cmd + ['--toolset', 'host=' + ctx.parent.arch,
'--CDCMAKE_GENERATOR_PLATFORM=' + ctx.parent.arch, cmd]
else:
return cmake_js_cmd + [cmd]
build(auto_envvar_prefix='KF_BUILD')
|
tests/test_compress.py
|
elkinsd/couchapp
| 224 |
91389
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This software is licensed as described in the file LICENSE, which
# you should have received as part of this distribution.
import unittest2 as unittest
import mock
import os
class CompressTest(unittest.TestCase):
def test_compress_js(self):
from couchapp.config import Config
config = Config()
config.conf['compress'] = {'js': {'foo':['shows/example-show.js']}}
with mock.patch('couchapp.hooks.compress.default.compress', return_value='foo') as mock_compress:
from couchapp.hooks.compress import Compress
compress = Compress(os.path.join(os.path.dirname(__file__), 'testapp'))
compress.conf = config
with mock.patch('couchapp.util.write'):
compress.run()
self.assertTrue(mock_compress.called, 'Default compressor has been called')
def test_our_jsmin_loading(self):
orig_import = __import__
def import_mock(name, *args):
if name == 'jsmin':
raise ImportError()
return orig_import(name, *args)
with mock.patch('__builtin__.__import__', side_effect=import_mock):
with mock.patch('couchapp.hooks.compress.jsmin.jsmin', return_value='foo'):
from couchapp.hooks.compress import default
result = default.compress('bar')
self.assertEqual(result, 'foo', 'Our module is called when it is not installed in the system')
def test_system_jsmin_loading(self):
orig_import = __import__
def import_mock(name, *args):
if name == 'couchapp.hooks.compress.jsmin':
raise ImportError()
return orig_import(name, *args)
with mock.patch('__builtin__.__import__', side_effect=import_mock):
with mock.patch('jsmin.jsmin', return_value='foo'):
from couchapp.hooks.compress import default
result = default.compress('bar')
self.assertEqual(result, 'foo', 'The system module is called when it is installed')
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
custom_layers/pad_layer.py
|
bobenxia/Centripetal-SGD
| 767 |
91404
|
import torch.nn as nn
import torch.nn.functional as F
class PadLayer(nn.Module):
# E.g., (-1, 0) means this layer should crop the first and last rows of the feature map. And (0, -1) crops the first and last columns
def __init__(self, pad):
super(PadLayer, self).__init__()
self.pad = pad
def forward(self, input):
F.pad(input, [self.pad] * 4)
|
mmocr/models/textrecog/layers/satrn_layers.py
|
hongxuenong/mmocr
| 2,261 |
91472
|
<filename>mmocr/models/textrecog/layers/satrn_layers.py
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from mmocr.models.common import MultiHeadAttention
class SatrnEncoderLayer(BaseModule):
""""""
def __init__(self,
d_model=512,
d_inner=512,
n_head=8,
d_k=64,
d_v=64,
dropout=0.1,
qkv_bias=False,
init_cfg=None):
super().__init__(init_cfg=init_cfg)
self.norm1 = nn.LayerNorm(d_model)
self.attn = MultiHeadAttention(
n_head, d_model, d_k, d_v, qkv_bias=qkv_bias, dropout=dropout)
self.norm2 = nn.LayerNorm(d_model)
self.feed_forward = LocalityAwareFeedforward(
d_model, d_inner, dropout=dropout)
def forward(self, x, h, w, mask=None):
n, hw, c = x.size()
residual = x
x = self.norm1(x)
x = residual + self.attn(x, x, x, mask)
residual = x
x = self.norm2(x)
x = x.transpose(1, 2).contiguous().view(n, c, h, w)
x = self.feed_forward(x)
x = x.view(n, c, hw).transpose(1, 2)
x = residual + x
return x
class LocalityAwareFeedforward(BaseModule):
"""Locality-aware feedforward layer in SATRN, see `SATRN.
<https://arxiv.org/abs/1910.04396>`_
"""
def __init__(self,
d_in,
d_hid,
dropout=0.1,
init_cfg=[
dict(type='Xavier', layer='Conv2d'),
dict(type='Constant', layer='BatchNorm2d', val=1, bias=0)
]):
super().__init__(init_cfg=init_cfg)
self.conv1 = ConvModule(
d_in,
d_hid,
kernel_size=1,
padding=0,
bias=False,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'))
self.depthwise_conv = ConvModule(
d_hid,
d_hid,
kernel_size=3,
padding=1,
bias=False,
groups=d_hid,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'))
self.conv2 = ConvModule(
d_hid,
d_in,
kernel_size=1,
padding=0,
bias=False,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'))
def forward(self, x):
x = self.conv1(x)
x = self.depthwise_conv(x)
x = self.conv2(x)
return x
class Adaptive2DPositionalEncoding(BaseModule):
"""Implement Adaptive 2D positional encoder for SATRN, see
`SATRN <https://arxiv.org/abs/1910.04396>`_
Modified from https://github.com/Media-Smart/vedastr
Licensed under the Apache License, Version 2.0 (the "License");
Args:
d_hid (int): Dimensions of hidden layer.
n_height (int): Max height of the 2D feature output.
n_width (int): Max width of the 2D feature output.
dropout (int): Size of hidden layers of the model.
"""
def __init__(self,
d_hid=512,
n_height=100,
n_width=100,
dropout=0.1,
init_cfg=[dict(type='Xavier', layer='Conv2d')]):
super().__init__(init_cfg=init_cfg)
h_position_encoder = self._get_sinusoid_encoding_table(n_height, d_hid)
h_position_encoder = h_position_encoder.transpose(0, 1)
h_position_encoder = h_position_encoder.view(1, d_hid, n_height, 1)
w_position_encoder = self._get_sinusoid_encoding_table(n_width, d_hid)
w_position_encoder = w_position_encoder.transpose(0, 1)
w_position_encoder = w_position_encoder.view(1, d_hid, 1, n_width)
self.register_buffer('h_position_encoder', h_position_encoder)
self.register_buffer('w_position_encoder', w_position_encoder)
self.h_scale = self.scale_factor_generate(d_hid)
self.w_scale = self.scale_factor_generate(d_hid)
self.pool = nn.AdaptiveAvgPool2d(1)
self.dropout = nn.Dropout(p=dropout)
def _get_sinusoid_encoding_table(self, n_position, d_hid):
"""Sinusoid position encoding table."""
denominator = torch.Tensor([
1.0 / np.power(10000, 2 * (hid_j // 2) / d_hid)
for hid_j in range(d_hid)
])
denominator = denominator.view(1, -1)
pos_tensor = torch.arange(n_position).unsqueeze(-1).float()
sinusoid_table = pos_tensor * denominator
sinusoid_table[:, 0::2] = torch.sin(sinusoid_table[:, 0::2])
sinusoid_table[:, 1::2] = torch.cos(sinusoid_table[:, 1::2])
return sinusoid_table
def scale_factor_generate(self, d_hid):
scale_factor = nn.Sequential(
nn.Conv2d(d_hid, d_hid, kernel_size=1), nn.ReLU(inplace=True),
nn.Conv2d(d_hid, d_hid, kernel_size=1), nn.Sigmoid())
return scale_factor
def forward(self, x):
b, c, h, w = x.size()
avg_pool = self.pool(x)
h_pos_encoding = \
self.h_scale(avg_pool) * self.h_position_encoder[:, :, :h, :]
w_pos_encoding = \
self.w_scale(avg_pool) * self.w_position_encoder[:, :, :, :w]
out = x + h_pos_encoding + w_pos_encoding
out = self.dropout(out)
return out
|
addon/surface_heat_diffuse_skinning/__init__.py
|
meshonline/Surface-Heat-Diffuse-Skinning
| 108 |
91498
|
# ***** BEGIN GPL LICENSE BLOCK *****
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENCE BLOCK *****
bl_info = {
"name": "Surface Heat Diffuse Skinning",
"author": "mesh online",
"version": (3, 3, 1),
"blender": (2, 80, 0),
"location": "View3D > UI > Mesh Online",
"description": "Surface Heat Diffuse Skinning",
"warning": "",
"wiki_url": "http://www.mesh-online.net/vhd.html",
"category": "Object"
}
import bpy
import sys
import os
import time
import platform
from subprocess import PIPE, Popen
from threading import Thread
from bpy.props import *
from queue import Queue, Empty
class SFC_OT_ModalTimerOperator(bpy.types.Operator):
"""Operator which runs its self from a timer"""
bl_idname = "wm.surface_heat_diffuse"
bl_label = "Surface Heat Diffuse Skinning"
bl_options = {'REGISTER', 'UNDO'}
_timer = None
_pid = None
_queue = None
_objs = []
_permulation = []
_selected_indices = []
_selected_group_index_weights = []
_start_time = None
def write_bone_data(self, obj, filepath):
f = open(filepath, 'w', encoding='utf-8')
f.write("# surface heat diffuse bone export.\n")
amt = obj.data
bpy.ops.object.mode_set(mode='EDIT')
for bone in amt.edit_bones:
if bone.use_deform:
world_bone_head = obj.matrix_world @ bone.head
world_bone_tail = obj.matrix_world @ bone.tail
f.write("b,{},{:.6f},{:.6f},{:.6f},{:.6f},{:.6f},{:.6f}\n".format(
bone.name, world_bone_head[0], world_bone_head[1], world_bone_head[2],
world_bone_tail[0], world_bone_tail[1], world_bone_tail[2]))
bpy.ops.object.mode_set(mode='OBJECT')
f.close()
def write_mesh_data(self, objs, filepath):
f = open(filepath, 'w', encoding='utf-8')
f.write("# surface heat diffuse mesh export.\n")
vertex_offset = 0
for obj in objs:
for v in obj.data.vertices:
world_v_co = obj.matrix_world @ v.co
f.write("v,{:.6f},{:.6f},{:.6f}\n".format(world_v_co[0], world_v_co[1], world_v_co[2]))
for poly in obj.data.polygons:
f.write("f");
for loop_ind in poly.loop_indices:
vert_ind = obj.data.loops[loop_ind].vertex_index
f.write(",{}".format(vertex_offset + vert_ind))
f.write("\n")
vertex_offset += len(obj.data.vertices)
f.close()
def read_weight_data(self, objs, filepath):
# make permulation for all vertices
vertex_offset = 0;
for obj in objs:
for index in range(len(obj.data.vertices)):
self._permulation.append((vertex_offset + index, index, obj))
vertex_offset += len(obj.data.vertices)
if bpy.context.scene.surface_protect:
for index in range(len(objs)):
obj = objs[index]
# get selected vertex indices
self._selected_indices.append([i.index for i in obj.data.vertices if i.select])
self._selected_group_index_weights.append([])
# push protected vertices weight
for vert_ind in self._selected_indices[index]:
for g in obj.data.vertices[vert_ind].groups:
self._selected_group_index_weights[index].append((obj.vertex_groups[g.group].name, vert_ind, g.weight))
f = open(filepath, 'r', encoding='utf-8')
bones = []
for line in f:
if len(line) == 0:
continue
tokens = line.strip("\r\n").split(",")
if tokens[0] == "b":
group_name = tokens[1]
bones.append(group_name)
for obj in objs:
#check for existing group with the same name
if None != obj.vertex_groups.get(group_name):
group = obj.vertex_groups[group_name]
obj.vertex_groups.remove(group)
obj.vertex_groups.new(name = group_name)
if tokens[0] == "w":
group_name = bones[int(tokens[2])]
index = int(tokens[1])
vert_ind = self._permulation[index][1]
weight = float(tokens[3])
obj = self._permulation[index][2]
# protect vertices weight
if bpy.context.scene.surface_protect and vert_ind in self._selected_indices[objs.index(obj)]:
continue
obj.vertex_groups[group_name].add([vert_ind], weight, 'REPLACE')
f.close()
if bpy.context.scene.surface_protect:
for index in range(len(objs)):
obj = objs[index]
# pop protected vertices weight
for (group_name, vert_ind, weight) in self._selected_group_index_weights[index]:
obj.vertex_groups[group_name].add([vert_ind], weight, 'REPLACE')
def modal(self, context, event):
if event.type == 'ESC':
self._pid.terminate()
return self.cancel(context)
if event.type == 'TIMER':
# background task is still running
if None == self._pid.poll():
# read line without blocking
try: rawline = self._queue.get_nowait()
except Empty:
pass
else:
line = rawline.decode().strip("\r\n")
self.report({'INFO'}, line)
else:
# background task finished running
self.read_weight_data(self._objs, os.path.join(os.path.dirname(__file__), "data", "untitled-weight.txt"))
running_time = time.time() - self._start_time
self.report({'INFO'}, "".join(("Complete, ", "running time: ", \
str(int(running_time / 60))," minutes ", str(int(running_time % 60)), " seconds")))
# bind meshes to the armature
bpy.ops.object.parent_set(type='ARMATURE')
return self.cancel(context)
return {'RUNNING_MODAL'}
def execute(self, context):
self._objs = []
self._permulation = []
self._selected_indices = []
self._selected_group_index_weights = []
arm = None
objs = []
# get armature and mesh
for ob in bpy.context.selected_objects:
if 'ARMATURE' == ob.type:
arm = ob
if 'MESH' == ob.type:
objs.append(ob)
# sort meshes by name
objs.sort(key=lambda obj:obj.name);
# save the reference for later use
self._objs = objs
for obj in objs:
# focus on the mesh
bpy.context.view_layer.objects.active = obj
# synchronize data
bpy.ops.object.mode_set(mode='OBJECT')
# write mesh data
self.write_mesh_data(objs, os.path.join(os.path.dirname(__file__), "data", "untitled-mesh.txt"))
# we must focus on the armature before we can write bone data
bpy.context.view_layer.objects.active = arm
# synchronize data
bpy.ops.object.mode_set(mode='OBJECT')
# write bone data
self.write_bone_data(arm, os.path.join(os.path.dirname(__file__), "data", "untitled-bone.txt"))
# do voxel skinning in background
ON_POSIX = 'posix' in sys.builtin_module_names
# chmod
if ON_POSIX:
os.chmod(os.path.join(os.path.dirname(__file__), "bin", platform.system(), "shd"), 0o755)
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
executable_path = None
if platform.system() == 'Windows':
if platform.machine().endswith('64'):
executable_path = os.path.join(os.path.dirname(__file__), "bin", platform.system(), "x64", "shd")
else:
executable_path = os.path.join(os.path.dirname(__file__), "bin", platform.system(), "x86", "shd")
else:
executable_path = os.path.join(os.path.dirname(__file__), "bin", platform.system(), "shd")
self._pid = Popen([executable_path,
"untitled-mesh.txt",
"untitled-bone.txt",
"untitled-weight.txt",
str(context.scene.surface_resolution),
str(context.scene.surface_loops),
str(context.scene.surface_samples),
str(context.scene.surface_influence),
str(context.scene.surface_falloff),
context.scene.surface_sharpness,
"y" if context.scene.detect_surface_solidify else "n"],
cwd = os.path.join(os.path.dirname(__file__), "data"),
stdout = PIPE,
bufsize = 1,
close_fds = ON_POSIX)
self._queue = Queue()
t = Thread(target=enqueue_output, args=(self._pid.stdout, self._queue))
t.daemon = True
t.start()
self._start_time = time.time()
# start timer to poll data
self._timer = context.window_manager.event_timer_add(0.1, window=context.window)
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
def cancel(self, context):
# remove timer
context.window_manager.event_timer_remove(self._timer)
self._objs = []
self._permulation = []
self._selected_indices = []
self._selected_group_index_weights = []
return {'CANCELLED'}
def init_properties():
bpy.types.Scene.surface_resolution = IntProperty(
name = "Voxel Resolution",
description = "Maximum voxel grid size",
default = 128,
min = 32,
max = 1024)
bpy.types.Scene.surface_loops = IntProperty(
name = "Diffuse Loops",
description = "Heat diffuse pass = Voxel Resolution * Diffuse Loops",
default = 5,
min = 1,
max = 9)
bpy.types.Scene.surface_samples = IntProperty(
name = "Sample Rays",
description = "Ray samples count",
default = 64,
min = 32,
max = 128)
bpy.types.Scene.surface_influence = IntProperty(
name = "Influence Bones",
description = "Max influence bones",
default = 4,
min = 1,
max = 8)
bpy.types.Scene.surface_falloff = FloatProperty(
name = "Diffuse Falloff",
description = "Heat diffuse falloff",
default = 0.2,
min = 0.01,
max = 0.99)
bpy.types.Scene.surface_protect = BoolProperty(
name = "Protect Selected Vertex Weight",
description = "Protect selected vertex weight",
default = False)
bpy.types.Scene.surface_sharpness = EnumProperty(
name = "Edges",
description = "Edges",
items = [
('1','Soft','Soft Curvature'),
('2','Normal','Normal Curvature'),
('3','Sharp','Sharp Curvature'),
('4','Sharpest','Sharpest Curvature')],
default = '3')
bpy.types.Scene.detect_surface_solidify = BoolProperty(
name = "Detect Solidify",
description = "Detect solidified clothes, if you enable this option, make sure that all bones are in the charecter's volume, otherwise, the result may be wrong",
default = False)
def clear_properties():
props = ["surface_resolution",
"surface_samples",
"surface_falloff",
"surface_loops",
"surface_influence",
"surface_protect"]
for p in props:
if p in bpy.types.Scene.bl_rna.properties:
exec("del bpy.types.Scene." + p)
class SFC_PT_SurfaceHeatDiffuseSkinningPanel(bpy.types.Panel):
"""Creates a Panel in the Object properties window"""
bl_label = "Surface Heat Diffuse Skinning"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = 'Mesh Online'
@classmethod
def poll(self, context):
arm_count = 0
obj_count = 0
for ob in bpy.context.selected_objects:
if 'ARMATURE' == ob.type:
arm_count += 1
if 'MESH' == ob.type:
obj_count += 1
return (context.mode == 'OBJECT' and arm_count == 1 and obj_count >= 1)
def draw(self, context):
layout = self.layout
layout.prop(context.scene, 'surface_resolution', icon='BLENDER', toggle=True)
layout.prop(context.scene, 'surface_loops', icon='BLENDER', toggle=True)
layout.prop(context.scene, 'surface_samples', icon='BLENDER', toggle=True)
layout.prop(context.scene, 'surface_influence', icon='BLENDER', toggle=True)
layout.prop(context.scene, 'surface_falloff', icon='BLENDER', toggle=True)
layout.prop(context.scene, 'surface_sharpness')
layout.prop(context.scene, 'surface_protect')
layout.prop(context.scene, 'detect_surface_solidify')
row = layout.row()
row.operator("wm.surface_heat_diffuse")
def register():
bpy.utils.register_class(SFC_PT_SurfaceHeatDiffuseSkinningPanel)
bpy.utils.register_class(SFC_OT_ModalTimerOperator)
init_properties()
def unregister():
bpy.utils.unregister_class(SFC_PT_SurfaceHeatDiffuseSkinningPanel)
bpy.utils.unregister_class(SFC_OT_ModalTimerOperator)
clear_properties()
if __name__ == "__main__":
register()
|
tests/test_connector.py
|
romis2012/aiohttp-socks
| 158 |
91516
|
<reponame>romis2012/aiohttp-socks
import asyncio
import ssl
import aiohttp
import pytest # noqa
from aiohttp import ClientResponse, TCPConnector
from yarl import URL # noqa
from aiohttp_socks import (
ProxyType,
ProxyConnector,
ChainProxyConnector,
ProxyInfo,
ProxyError,
ProxyConnectionError,
ProxyTimeoutError,
open_connection,
create_connection
)
from tests.config import (
TEST_URL_IPV4,
SOCKS5_IPV4_URL, PROXY_HOST_IPV4, SOCKS5_PROXY_PORT, LOGIN,
PASSWORD, TEST_URL_IPV4_DELAY, SKIP_IPV6_TESTS, SOCKS5_IPV6_URL,
SOCKS4_URL, HTTP_PROXY_URL, SOCKS4_PROXY_PORT, HTTP_PROXY_PORT,
TEST_HOST_PEM_FILE, TEST_URL_IPV4_HTTPS,
)
def create_ssl_context():
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS)
ssl_context.verify_mode = ssl.CERT_REQUIRED
ssl_context.load_verify_locations(TEST_HOST_PEM_FILE)
return ssl_context
async def fetch(connector: TCPConnector,
url: str, timeout=None) -> ClientResponse:
url = URL(url)
ssl_context = None
if url.scheme == 'https':
ssl_context = create_ssl_context()
async with aiohttp.ClientSession(connector=connector) as session:
async with session.get(url, ssl=ssl_context, timeout=timeout) as resp:
return resp
@pytest.mark.parametrize('url', (TEST_URL_IPV4, TEST_URL_IPV4_HTTPS))
@pytest.mark.parametrize('rdns', (True, False))
@pytest.mark.asyncio
async def test_socks5_proxy_ipv4(url, rdns):
connector = ProxyConnector.from_url(SOCKS5_IPV4_URL, rdns=rdns)
res = await fetch(connector=connector, url=url)
assert res.status == 200
@pytest.mark.asyncio
async def test_socks5_proxy_with_invalid_credentials():
connector = ProxyConnector(
proxy_type=ProxyType.SOCKS5,
host=PROXY_HOST_IPV4,
port=SOCKS5_PROXY_PORT,
username=LOGIN,
password=PASSWORD + '<PASSWORD>',
)
with pytest.raises(ProxyError):
await fetch(connector=connector, url=TEST_URL_IPV4)
@pytest.mark.asyncio
async def test_socks5_proxy_with_timeout():
connector = ProxyConnector(
proxy_type=ProxyType.SOCKS5,
host=PROXY_HOST_IPV4,
port=SOCKS5_PROXY_PORT,
username=LOGIN,
password=PASSWORD,
)
with pytest.raises(asyncio.TimeoutError):
await fetch(connector=connector, url=TEST_URL_IPV4_DELAY, timeout=1)
@pytest.mark.asyncio
async def test_socks5_proxy_with_proxy_connect_timeout():
connector = ProxyConnector.from_url(SOCKS5_IPV4_URL)
timeout = aiohttp.ClientTimeout(total=32, sock_connect=0.001)
with pytest.raises(ProxyTimeoutError):
await fetch(connector=connector, url=TEST_URL_IPV4, timeout=timeout)
@pytest.mark.asyncio
async def test_socks5_proxy_with_invalid_proxy_port(unused_tcp_port):
connector = ProxyConnector(
proxy_type=ProxyType.SOCKS5,
host=PROXY_HOST_IPV4,
port=unused_tcp_port,
username=LOGIN,
password=PASSWORD,
)
with pytest.raises(ProxyConnectionError):
await fetch(connector=connector, url=TEST_URL_IPV4)
@pytest.mark.parametrize('url', (TEST_URL_IPV4, TEST_URL_IPV4_HTTPS))
@pytest.mark.skipif(SKIP_IPV6_TESTS, reason="TravisCI doesn't support ipv6")
@pytest.mark.asyncio
async def test_socks5_proxy_ipv6(url):
connector = ProxyConnector.from_url(SOCKS5_IPV6_URL)
res = await fetch(connector=connector, url=url)
assert res.status == 200
@pytest.mark.parametrize('url', (TEST_URL_IPV4, TEST_URL_IPV4_HTTPS))
@pytest.mark.parametrize('rdns', (True, False))
@pytest.mark.asyncio
async def test_socks4_proxy(url, rdns):
connector = ProxyConnector.from_url(SOCKS4_URL, rdns=rdns, )
res = await fetch(connector=connector, url=url)
assert res.status == 200
@pytest.mark.parametrize('url', (TEST_URL_IPV4, TEST_URL_IPV4_HTTPS))
@pytest.mark.asyncio
async def test_http_proxy(url):
connector = ProxyConnector.from_url(HTTP_PROXY_URL)
res = await fetch(connector=connector, url=url)
assert res.status == 200
@pytest.mark.parametrize('url', (TEST_URL_IPV4, TEST_URL_IPV4_HTTPS))
@pytest.mark.asyncio
async def test_chain_proxy_from_url(url):
connector = ChainProxyConnector.from_urls([
SOCKS5_IPV4_URL,
SOCKS4_URL,
HTTP_PROXY_URL
])
res = await fetch(connector=connector, url=url)
assert res.status == 200
@pytest.mark.parametrize('url', (TEST_URL_IPV4, TEST_URL_IPV4_HTTPS))
@pytest.mark.parametrize('rdns', (True, False))
@pytest.mark.asyncio
async def test_chain_proxy_ctor(url, rdns):
connector = ChainProxyConnector([
ProxyInfo(
proxy_type=ProxyType.SOCKS5,
host=PROXY_HOST_IPV4,
port=SOCKS5_PROXY_PORT,
username=LOGIN,
password=PASSWORD,
rdns=rdns
),
ProxyInfo(
proxy_type=ProxyType.SOCKS4,
host=PROXY_HOST_IPV4,
port=SOCKS4_PROXY_PORT,
username=LOGIN,
rdns=rdns
),
ProxyInfo(
proxy_type=ProxyType.HTTP,
host=PROXY_HOST_IPV4,
port=HTTP_PROXY_PORT,
username=LOGIN,
password=PASSWORD
),
])
res = await fetch(connector=connector, url=url)
assert res.status == 200
@pytest.mark.parametrize('url', (TEST_URL_IPV4, TEST_URL_IPV4_HTTPS))
@pytest.mark.parametrize('rdns', (True, False))
@pytest.mark.asyncio
async def test_socks5_open_connection(url, rdns):
url = URL(url)
ssl_context = None
if url.scheme == 'https':
ssl_context = create_ssl_context()
reader, writer = await open_connection(
proxy_url=SOCKS5_IPV4_URL,
host=url.host,
port=url.port,
ssl=ssl_context,
server_hostname=url.host if ssl_context else None,
rdns=rdns,
)
request = ("GET %s HTTP/1.1\r\n"
"Host: %s\r\n"
"Connection: close\r\n\r\n" % (url.path_qs, url.host))
writer.write(request.encode())
response = await reader.read(-1)
assert b'200 OK' in response
@pytest.mark.parametrize('url', (TEST_URL_IPV4, TEST_URL_IPV4_HTTPS))
@pytest.mark.parametrize('rdns', (True, False))
@pytest.mark.asyncio
async def test_socks5_http_create_connection(url, rdns, event_loop):
url = URL(url)
ssl_context = None
if url.scheme == 'https':
ssl_context = create_ssl_context()
reader = asyncio.StreamReader(loop=event_loop)
protocol = asyncio.StreamReaderProtocol(reader, loop=event_loop)
transport, _ = await create_connection(
proxy_url=SOCKS5_IPV4_URL,
protocol_factory=lambda: protocol,
host=url.host,
port=url.port,
ssl=ssl_context,
server_hostname=url.host if ssl_context else None,
rdns=rdns,
)
writer = asyncio.StreamWriter(transport, protocol, reader, event_loop)
request = ("GET %s HTTP/1.1\r\n"
"Host: %s\r\n"
"Connection: close\r\n\r\n" % (url.path_qs, url.host))
writer.write(request.encode())
response = await reader.read(-1)
assert b'200 OK' in response
|
gslib/tests/test_hash.py
|
maxshine/gsutil
| 1,894 |
91548
|
# -*- coding: utf-8 -*-
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for hash command."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
from gslib.exception import CommandException
import gslib.tests.testcase as testcase
from gslib.tests.testcase.integration_testcase import SkipForS3
from gslib.tests.util import ObjectToURI as suri
_TEST_FILE_CONTENTS = b'123456\n'
_TEST_FILE_B64_CRC = 'nYmSiA=='
_TEST_FILE_B64_MD5 = '9EeyCn/L9TpdW+AT6gsVrw=='
_TEST_FILE_HEX_CRC = '9D899288'
_TEST_FILE_HEX_MD5 = 'f447b20a7fcbf53a5d5be013ea0b15af'
_TEST_COMPOSITE_ADDED_CONTENTS = b'tmp'
_TEST_COMPOSITE_B64_CRC = 'M3DYBg=='
_TEST_COMPOSITE_HEX_CRC = '3370D806'
class TestHashUnit(testcase.GsUtilUnitTestCase):
"""Unit tests for hash command."""
def testHashContents(self):
tmp_file = self.CreateTempFile(contents=_TEST_FILE_CONTENTS)
stdout = self.RunCommand('hash', args=[tmp_file], return_stdout=True)
self.assertIn(b'Hashes [base64]', stdout)
self.assertIn(
('\tHash (crc32c):\t\t%s' % _TEST_FILE_B64_CRC).encode('ascii'), stdout)
self.assertIn(('\tHash (md5):\t\t%s' % _TEST_FILE_B64_MD5).encode('ascii'),
stdout)
def testHashNoMatch(self):
try:
self.RunCommand('hash', args=['non-existent-file'])
self.fail('Did not get expected CommandException')
except CommandException as e:
self.assertIn('No files matched', e.reason)
def testHashHexFormat(self):
tmp_file = self.CreateTempFile(contents=_TEST_FILE_CONTENTS)
stdout = self.RunCommand('hash', args=['-h', tmp_file], return_stdout=True)
self.assertIn(b'Hashes [hex]', stdout)
self.assertIn(
('\tHash (crc32c):\t\t%s' % _TEST_FILE_HEX_CRC).encode('ascii'), stdout)
self.assertIn(('\tHash (md5):\t\t%s' % _TEST_FILE_HEX_MD5).encode('ascii'),
stdout)
def testHashWildcard(self):
num_test_files = 2
tmp_dir = self.CreateTempDir(test_files=num_test_files)
stdout = self.RunCommand('hash',
args=[os.path.join(tmp_dir, '*')],
return_stdout=True)
# One summary line and two hash lines per file.
num_expected_lines = num_test_files * (1 + 2)
self.assertEquals(len(stdout.splitlines()), num_expected_lines)
def testHashSelectAlg(self):
tmp_file = self.CreateTempFile(contents=_TEST_FILE_CONTENTS)
stdout_crc = self.RunCommand('hash',
args=['-c', tmp_file],
return_stdout=True)
stdout_md5 = self.RunCommand('hash',
args=['-m', tmp_file],
return_stdout=True)
stdout_both = self.RunCommand('hash',
args=['-c', '-m', tmp_file],
return_stdout=True)
for stdout in (stdout_crc, stdout_both):
self.assertIn(
('\tHash (crc32c):\t\t%s' % _TEST_FILE_B64_CRC).encode('ascii'),
stdout)
for stdout in (stdout_md5, stdout_both):
self.assertIn(
('\tHash (md5):\t\t%s' % _TEST_FILE_B64_MD5).encode('ascii'), stdout)
self.assertNotIn(b'md5', stdout_crc)
self.assertNotIn(b'crc32c', stdout_md5)
class TestHash(testcase.GsUtilIntegrationTestCase):
"""Integration tests for hash command."""
def testHashCloudObject(self):
"""Test hash command on a cloud object."""
obj1 = self.CreateObject(object_name='obj1', contents=_TEST_FILE_CONTENTS)
# Tests cloud object with -h.
stdout = self.RunGsUtil(['hash', '-h', suri(obj1)], return_stdout=True)
self.assertIn('Hashes [hex]', stdout)
if self.default_provider == 'gs':
# Hex hashes for cloud objects get converted to lowercase but their
# meaning is the same.
self.assertIn(('\tHash (crc32c):\t\t%s' % _TEST_FILE_HEX_CRC.lower()),
stdout)
self.assertIn(('\tHash (md5):\t\t%s' % _TEST_FILE_HEX_MD5), stdout)
# Tests cloud object as base64.
stdout = self.RunGsUtil(['hash', suri(obj1)], return_stdout=True)
self.assertIn('Hashes [base64]', stdout)
if self.default_provider == 'gs':
self.assertIn(('\tHash (crc32c):\t\t%s' % _TEST_FILE_B64_CRC), stdout)
self.assertIn(('\tHash (md5):\t\t%s' % _TEST_FILE_B64_MD5), stdout)
@SkipForS3('No composite object or crc32c support for S3.')
def testHashCompositeObject(self):
"""Test hash command on a composite object (which only has crc32c)."""
bucket = self.CreateBucket()
obj1 = self.CreateObject(bucket_uri=bucket,
object_name='obj1',
contents=_TEST_FILE_CONTENTS)
obj2 = self.CreateObject(bucket_uri=bucket,
object_name='tmp',
contents=_TEST_COMPOSITE_ADDED_CONTENTS)
self.RunGsUtil(['compose', suri(obj1), suri(obj2), suri(obj1)])
stdout = self.RunGsUtil(['hash', '-h', suri(obj1)], return_stdout=True)
self.assertIn('Hashes [hex]', stdout)
# Hex hashes for cloud objects get converted to lowercase but their
# meaning is the same.
self.assertIn(('\tHash (crc32c):\t\t%s' % _TEST_COMPOSITE_HEX_CRC.lower()),
stdout)
stdout = self.RunGsUtil(['hash', suri(obj1)], return_stdout=True)
self.assertIn('Hashes [base64]', stdout)
self.assertIn(('\tHash (crc32c):\t\t%s' % _TEST_COMPOSITE_B64_CRC), stdout)
|
src/sage_setup/util.py
|
UCD4IDS/sage
| 1,742 |
91556
|
r"""
Utility functions for building Sage
"""
# ****************************************************************************
# Copyright (C) 2017 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
def stable_uniq(L):
"""
Given an iterable L, remove duplicate items from L by keeping only
the last occurrence of any item.
The items must be hashable.
EXAMPLES::
sage: from sage_setup.util import stable_uniq
sage: stable_uniq( (1, 2, 3, 4, 5, 6, 3, 7, 5, 1, 5, 9) )
[2, 4, 6, 3, 7, 1, 5, 9]
"""
D = {}
for pos, item in enumerate(L):
D[item] = pos # Store the last position where an item appears
return sorted(D, key=lambda item: D[item])
def have_module(name):
"""
Check whether a Python module named ``name`` can be imported.
This is done by trying to import that module and returning ``True``
if that import succeeded. So, as a side effect, the module is
actually imported if possible.
EXAMPLES::
sage: from sage_setup.util import have_module
sage: have_module("itertools")
True
sage: have_module("sage.rings.integer")
True
sage: have_module("no.such.module")
False
"""
try:
__import__(name, {}, {}, [], 0)
return True
except ImportError:
return False
|
tests/test_environment_variables.py
|
ben-31/kas
| 187 |
91577
|
<reponame>ben-31/kas<gh_stars>100-1000
import os
import shutil
from kas import kas
def test_build_dir_is_placed_inside_work_dir_by_default(changedir, tmpdir):
conf_dir = str(tmpdir.mkdir('test_env_variables'))
shutil.rmtree(conf_dir, ignore_errors=True)
shutil.copytree('tests/test_environment_variables', conf_dir)
os.chdir(conf_dir)
kas.kas(['checkout', 'test.yml'])
assert(os.path.exists(os.path.join(os.getcwd(), 'build', 'conf')))
def test_build_dir_can_be_specified_by_environment_variable(changedir, tmpdir):
conf_dir = str(tmpdir.mkdir('test_env_variables'))
build_dir = str(tmpdir.mkdir('test_build_dir'))
shutil.rmtree(conf_dir, ignore_errors=True)
shutil.copytree('tests/test_environment_variables', conf_dir)
shutil.rmtree(build_dir, ignore_errors=True)
os.chdir(conf_dir)
os.environ['KAS_BUILD_DIR'] = build_dir
kas.kas(['checkout', 'test.yml'])
del os.environ['KAS_BUILD_DIR']
assert(os.path.exists(os.path.join(build_dir, 'conf')))
|
tests/apollo/test_skvbc_consensus_batching.py
|
definitelyNotFBI/utt
| 340 |
91578
|
<gh_stars>100-1000
# Concord
#
# Copyright (c) 2019 VMware, Inc. All Rights Reserved.
#
# This product is licensed to you under the Apache 2.0 license (the "License").
# You may not use this product except in compliance with the Apache 2.0 License.
#
# This product may include a number of subcomponents with separate copyright
# notices and license terms. Your use of these subcomponents is subject to the
# terms and conditions of the subcomponent's license, as noted in the LICENSE
# file.
import os.path
import unittest
from util.bft import with_trio, with_bft_network, KEY_FILE_PREFIX
from util.skvbc_history_tracker import verify_linearizability
from util import skvbc as kvbc
NUM_OF_WRITES = 100
MAX_CONCURRENCY = 30
CONCURRENCY_LEVEL = "1"
MAX_REQS_SIZE_IN_BATCH = "300"
MAX_REQ_NUM_IN_BATCH = "3"
BATCH_FLUSH_PERIOD = "250"
BATCH_SELF_ADJUSTED = "0"
BATCH_BY_REQ_SIZE = "1"
BATCH_BY_REQ_NUM = "2"
BATCHING_POLICY = BATCH_SELF_ADJUSTED
def start_replica_cmd(builddir, replica_id):
"""
Return a command that starts an skvbc replica when passed to
subprocess. Popen.
Note each arguments is an element in a list.
"""
statusTimerMilli = "500"
path = os.path.join(builddir, "tests", "simpleKVBC", "TesterReplica", "skvbc_replica")
return [path,
"-k", KEY_FILE_PREFIX,
"-i", str(replica_id),
"-s", statusTimerMilli,
"-y", CONCURRENCY_LEVEL,
"-b", BATCHING_POLICY,
"-m", MAX_REQS_SIZE_IN_BATCH,
"-q", MAX_REQ_NUM_IN_BATCH,
"-z", BATCH_FLUSH_PERIOD
]
class SkvbcConsensusBatchingPoliciesTest(unittest.TestCase):
__test__ = False # so that PyTest ignores this test scenario
async def launch_concurrent_requests(self, bft_network, tracker):
bft_network.start_all_replicas()
skvbc = kvbc.SimpleKVBCProtocol(bft_network, tracker)
rw = await skvbc.send_concurrent_ops(NUM_OF_WRITES, max_concurrency=MAX_CONCURRENCY, max_size=10, write_weight=0.9)
self.assertTrue(rw[0] + rw[1] >= NUM_OF_WRITES)
@with_trio
@with_bft_network(start_replica_cmd, selected_configs=lambda n, f, c: n == 7)
@verify_linearizability(pre_exec_enabled=True, no_conflicts=True)
async def test_batching_by_req_num(self, bft_network, tracker):
"""
This test verifies that BATCH_SELF_ADJUSTED consensus policy works
"""
global BATCHING_POLICY
BATCHING_POLICY = BATCH_SELF_ADJUSTED
await self.launch_concurrent_requests(bft_network, tracker)
@with_trio
@with_bft_network(start_replica_cmd, selected_configs=lambda n, f, c: n == 7)
@verify_linearizability(pre_exec_enabled=True, no_conflicts=True)
async def test_batching_by_req_num(self, bft_network, tracker):
"""
This test verifies that BATCH_BY_REQ_NUM consensus policy works
"""
global BATCHING_POLICY
BATCHING_POLICY = BATCH_BY_REQ_NUM
await self.launch_concurrent_requests(bft_network, tracker)
@with_trio
@with_bft_network(start_replica_cmd, selected_configs=lambda n, f, c: n == 7)
@verify_linearizability(pre_exec_enabled=True, no_conflicts=True)
async def test_batching_by_reqs_size(self, bft_network, tracker):
"""
This test verifies that BATCH_BY_REQ_SIZE consensus policy works
"""
global BATCHING_POLICY
BATCHING_POLICY = BATCH_BY_REQ_SIZE
await self.launch_concurrent_requests(bft_network, tracker)
|
saleor/core/utils/date_time.py
|
eanknd/saleor
| 1,392 |
91581
|
from datetime import datetime
import pytz
def convert_to_utc_date_time(date):
"""Convert date into utc date time."""
if date is None:
return
return datetime.combine(date, datetime.min.time(), tzinfo=pytz.UTC)
|
docs/_ext/djangodocs.py
|
Kunpors/dr.pors-
| 341 |
91592
|
<filename>docs/_ext/djangodocs.py
# Sphinx helper for Django-specific references
def setup(app):
app.add_crossref_type(
directivename = "label",
rolename = "djterm",
indextemplate = "pair: %s; label",
)
app.add_crossref_type(
directivename = "setting",
rolename = "setting",
indextemplate = "pair: %s; setting",
)
app.add_crossref_type(
directivename = "templatetag",
rolename = "ttag",
indextemplate = "pair: %s; template tag",
)
app.add_crossref_type(
directivename = "templatefilter",
rolename = "tfilter",
indextemplate = "pair: %s; template filter",
)
app.add_crossref_type(
directivename = "fieldlookup",
rolename = "lookup",
indextemplate = "pair: %s; field lookup type",
)
|
configs/mmedit/super-resolution/super-resolution_ncnn_dynamic.py
|
aegis-rider/mmdeploy
| 746 |
91629
|
_base_ = ['./super-resolution_dynamic.py', '../../_base_/backends/ncnn.py']
|
sklearn/gaussian_process/tests/_mini_sequence_kernel.py
|
MaiRajborirug/scikit-learn
| 50,961 |
91634
|
from sklearn.gaussian_process.kernels import Kernel, Hyperparameter
from sklearn.gaussian_process.kernels import GenericKernelMixin
from sklearn.gaussian_process.kernels import StationaryKernelMixin
import numpy as np
from sklearn.base import clone
class MiniSeqKernel(GenericKernelMixin, StationaryKernelMixin, Kernel):
"""
A minimal (but valid) convolutional kernel for sequences of variable
length.
"""
def __init__(self, baseline_similarity=0.5, baseline_similarity_bounds=(1e-5, 1)):
self.baseline_similarity = baseline_similarity
self.baseline_similarity_bounds = baseline_similarity_bounds
@property
def hyperparameter_baseline_similarity(self):
return Hyperparameter(
"baseline_similarity", "numeric", self.baseline_similarity_bounds
)
def _f(self, s1, s2):
return sum(
[1.0 if c1 == c2 else self.baseline_similarity for c1 in s1 for c2 in s2]
)
def _g(self, s1, s2):
return sum([0.0 if c1 == c2 else 1.0 for c1 in s1 for c2 in s2])
def __call__(self, X, Y=None, eval_gradient=False):
if Y is None:
Y = X
if eval_gradient:
return (
np.array([[self._f(x, y) for y in Y] for x in X]),
np.array([[[self._g(x, y)] for y in Y] for x in X]),
)
else:
return np.array([[self._f(x, y) for y in Y] for x in X])
def diag(self, X):
return np.array([self._f(x, x) for x in X])
def clone_with_theta(self, theta):
cloned = clone(self)
cloned.theta = theta
return cloned
|
NLP/Conversational-Recommendation-BASELINE/conversational_recommendation/goal_planning/model/paddle_astar_goal.py
|
zhangyimi/Research
| 1,319 |
91665
|
<gh_stars>1000+
######################################################################
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
######################################################################
"""
File:
"""
import paddle
import os
import paddle.fluid as fluid
import numpy as np
import sys
import math
import random
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
import paddle.fluid.dygraph.base as base
class Config(object):
"""Config"""
def __init__(self):
self.CLASS_SIZE = 2
self.EMBED_SIZE = 128
self.HIDDEN_SIZE = 128
self.STACKED_NUM = 3
self.INPUT_SIZE = 21
self.TRAIN_RATE = 0.7
self.VAL_RATE = 0.15
self.SHUFFLE = False
self.GOAL_FILE = True
self.DOWN_SAMPLING = False
self.BATCH_SIZE = 128
self.LEARNING_RATE = 0.001
self.NUM_EPOCH = 50
self.DEVICE = fluid.CPUPlace()
self.SAVE_PATH = "../model_state/paddle_astar_goal_v1.mdl"
def file_reader(file_path):
"""file_reader"""
data = None
with open(file_path, "r") as f:
data = eval(f.read())
f.close()
return data
def reader_generater(datas):
"""reader_generater"""
def reader():
"""reader"""
for data in datas:
yield data
return reader
def get_data(file_path, data_tag, test=False):
x = file_reader(file_path + data_tag + "_next_goal_type.txt")
if test is False:
y = file_reader(file_path + data_tag + "_next_goal_type_label.txt")
g = file_reader(file_path + data_tag + "_final_goal_type.txt")
idx = file_reader(file_path + data_tag + "_next_goal_type_idx.txt")
c = [item[-1] for item in x]
data = None
if test is False:
data = [[x[i], y[i], c[i], g[i], idx[i]] for i in range(len(x))]
else:
data = [[x[i], c[i], g[i][0], idx[i]] for i in range(len(x))]
return data
def get_point_data(train_rate, val_rate, batch_size, train_down_sampling=False, goal_file=True):
"""get_point_data"""
file_path = "../train_data/"
train_data = get_data(file_path, "train")
val_data = get_data(file_path, "val")
test_data = get_data(file_path, "test", test=True)
train_len = len(train_data)
val_len = len(val_data)
data_len = train_len + val_len + len(test_data)
print(train_len, val_len, len(test_data))
train_reader = paddle.batch(reader_generater(train_data), batch_size=batch_size)
val_reader = paddle.batch(reader_generater(val_data), batch_size=batch_size)
test_reader = paddle.batch(reader_generater(test_data), batch_size=batch_size)
return train_reader, val_reader, test_reader
def a_star(sequence, current, goal, config):
"""a_star"""
input_dim = config.INPUT_SIZE
class_dim = config.CLASS_SIZE
embed_dim = config.EMBED_SIZE
hidden_dim = config.HIDDEN_SIZE
stacked_num = config.STACKED_NUM
weight_data = np.random.random(size=(input_dim, embed_dim))
my_param_attrs = fluid.ParamAttr(
name="embedding",
learning_rate=0.5,
initializer=fluid.initializer.NumpyArrayInitializer(weight_data),
trainable=True
)
seq_embed = fluid.embedding(
input=sequence, size=[input_dim, embed_dim], param_attr=my_param_attrs)
curr_embed = fluid.embedding(
input=current, size=[input_dim, embed_dim], param_attr=my_param_attrs)
goal_embed = fluid.embedding(
input=goal, size=[input_dim, embed_dim], param_attr=my_param_attrs)
fc1 = fluid.layers.fc(input=seq_embed, size=hidden_dim)
lstm1, cell1 = fluid.layers.dynamic_lstm(input=fc1, size=hidden_dim)
inputs = [fc1, lstm1]
for i in range(2, stacked_num + 1):
fc = fluid.layers.fc(input=inputs, size=hidden_dim)
lstm, cell = fluid.layers.dynamic_lstm(
input=fc, size=hidden_dim, is_reverse=(i % 2) == 0)
inputs = [fc, lstm]
fc_last = fluid.layers.sequence_pool(input=inputs[0], pool_type='max')
lstm_last = fluid.layers.sequence_pool(input=inputs[1], pool_type='max')
current_cost_embed = [fc_last, lstm_last, curr_embed]
remain_cost_embed = [fc_last, lstm_last, goal_embed]
pred_curr_fc1 = fluid.layers.fc(input=current_cost_embed, size=64, act="relu")
current_cost = fluid.layers.fc(input=pred_curr_fc1, size=1, act="sigmoid")
pred_goal_fc1 = fluid.layers.fc(input=remain_cost_embed, size=64, act="relu")
remain_cost = fluid.layers.fc(input=pred_goal_fc1, size=1, act="sigmoid")
prediction = 0.5 * current_cost + 0.5 * remain_cost
return prediction
def inference_program(config):
"""inference_program"""
sequence = fluid.data(name="sequence", shape=[None, 1], dtype="int64", lod_level=1)
current = fluid.data(name="current", shape=[None], dtype="int64")
goal = fluid.data(name="goal", shape=[None], dtype="int64")
net = a_star(sequence, current, goal, config)
return net
def train_program(prediction):
"""train_program"""
label = fluid.data(name="label", shape=[None, 1], dtype="float32")
idx = fluid.data(name="idx", shape=[None], dtype="int64")
cost = fluid.layers.log_loss(input=prediction, label=label)
avg_cost = fluid.layers.mean(cost)
return avg_cost
def train_accuracy(prediction, data):
"""train_accuracy"""
y_pred = list()
y_true = list()
for item in prediction:
if item[0] > 0.5:
y_pred.append(1)
else:
y_pred.append(0)
for item in data:
y_true.append(item[1])
accuracy = accuracy_score(y_true, y_pred)
return accuracy
def argmax_metrics(y_pred, y_true, y_type, y_idx):
"""argmax_metrics"""
group_data = list()
idx = 0
while idx < len(y_idx):
tmp_pred = list()
tmp_true = list()
tmp_type = list()
tmp_idx = list()
previous = y_idx[idx]
while idx < len(y_idx) and y_idx[idx] == previous:
tmp_pred.append(y_pred[idx])
tmp_true.append(y_true[idx])
tmp_type.append(y_type[idx])
tmp_idx.append(y_idx[idx])
idx += 1
group_data.append([tmp_pred, tmp_true, tmp_type, tmp_idx])
data_pred = list()
data_true = list()
for item in group_data:
true_type = None
pred_type = None
max_pred = -1
for a, b, c in zip(item[0], item[1], item[2]):
if a > max_pred:
max_pred = a
pred_type = c
if b == 1:
true_type = c
if true_type != None and pred_type != None:
data_pred.append(pred_type)
data_true.append(true_type)
print(len(data_true), len(data_pred))
return evaluation(data_pred, data_true)
def evaluation(y_pred, y_true, flag="macro"):
"""evaluation"""
return accuracy_score(y_true, y_pred), recall_score(y_true, y_pred, average=flag), \
precision_score(y_true, y_pred, average=flag), f1_score(y_true, y_pred, average=flag)
def optimizer_func(learning_rate):
"""optimizer_func"""
return fluid.optimizer.Adagrad(learning_rate=learning_rate)
def train(config):
"""train"""
print("Reading data...")
train_reader, val_reader, test_reader = get_point_data(
config.TRAIN_RATE, config.VAL_RATE, config.BATCH_SIZE, config.DOWN_SAMPLING, config.GOAL_FILE)
feed_order = ["sequence", "label", "current", "goal", "idx"]
main_program = fluid.default_main_program()
star_program = fluid.default_startup_program()
main_program.random_seed = 90
star_program.random_seed = 90
prediction = inference_program(config)
avg_cost = train_program(prediction)
val_program = main_program.clone(for_test=True)
sgd_optimizer = optimizer_func(config.LEARNING_RATE)
sgd_optimizer.minimize(avg_cost)
exe = fluid.Executor(config.DEVICE)
def val_loop(program, reader):
"""val_loop"""
feed_var_list = [program.global_block().var(var_name) for var_name in feed_order]
feeder_var = fluid.DataFeeder(feed_list=feed_var_list, place=config.DEVICE)
val_exe = fluid.Executor(config.DEVICE)
val_pred, val_prob, val_true, val_type, val_idx = list(), list(), list(), list(), list()
for val_data in reader():
preds = val_exe.run(
program=program,
feed=feeder_var.feed(val_data),
fetch_list=prediction
)
preds = np.squeeze(np.array(preds))
for pred in preds:
if pred <= 0.5:
val_pred.append(0)
else:
val_pred.append(1)
val_prob.append(pred)
for vd in val_data:
val_type.append(vd[0][-1])
val_true.append(vd[1])
val_idx.append(vd[-1])
bi_acc, bi_rec, bi_pre, bi_f1 = evaluation(val_pred, val_true)
ag_acc, ag_rec, ag_pre, ag_f1 = argmax_metrics(val_prob, val_true, val_type, val_idx)
return bi_acc, bi_rec, bi_pre, bi_f1, ag_acc, ag_rec, ag_pre, ag_f1
def train_loop():
"""train_loop"""
feed_var_list_loop = [main_program.global_block().var(var_name) for var_name in feed_order]
feeder = fluid.DataFeeder(feed_list=feed_var_list_loop, place=config.DEVICE)
exe.run(star_program)
max_epoch, max_acc = 0, 0
for epoch_id in range(config.NUM_EPOCH):
train_loss = list()
train_acc = list()
for batch_id, data in enumerate(train_reader()):
metrics = exe.run(
main_program,
feed=feeder.feed(data),
fetch_list=[avg_cost, prediction]
)
train_loss.append(np.array(metrics[0]))
train_acc.append(train_accuracy(np.array(metrics[1]), data))
bi_acc, bi_rec, bi_pre, bi_f1, ag_acc, ag_rec, ag_pre, ag_f1 = val_loop(val_program, val_reader)
print("Epoch-%d, Train-Loss:%.4f, Train-Acc:%.4f, BI-Acc:%.4f, \
BI-Rec:%.4f, BI-Pre:%.4f, BI-F1:%.4f, AG-Acc:%.4f, AG-Rec:%.4f, AG-Pre:%.4f, AG-F1:%.4f" % (
epoch_id, np.mean(train_loss), np.mean(train_acc),
bi_acc, bi_rec, bi_pre, bi_f1,
ag_acc, ag_rec, ag_pre, ag_f1))
if bi_acc > max_acc:
max_acc = bi_acc
max_epoch = epoch_id
fluid.io.save_inference_model(config.SAVE_PATH, ["sequence", "current", "goal"], prediction, exe)
print("max_epoch: %d, max_acc: %.4f" % (max_epoch, max_acc))
train_loop()
inference(config, test_reader)
def inference(config, reader):
"""inference"""
exe = fluid.Executor(config.DEVICE)
inference_scope = fluid.core.Scope()
with fluid.scope_guard(inference_scope):
[inferencer, feed_target_names, fetch_targets] = fluid.io.load_inference_model(config.SAVE_PATH, exe)
test_pred, test_prob, test_true, test_type, test_idx = list(), list(), list(), list(), list()
for data in reader():
sequence = list()
current = list()
goal = list()
for d in data:
sequence.append(d[0])
current.append(d[1])
goal.append(d[2])
test_type.append(d[0][-1])
test_true.append(d[1])
test_idx.append(d[-1])
base_shape = [[len(seq) for seq in sequence]]
lod_seq = fluid.create_lod_tensor(sequence, base_shape, config.DEVICE)
new_data = {
feed_target_names[0]: lod_seq,
feed_target_names[1]: np.array(current),
feed_target_names[2]: np.array(goal)
}
preds = exe.run(
inferencer,
feed=new_data,
fetch_list=fetch_targets,
return_numpy=False
)[0]
preds = np.squeeze(np.array(preds))
for pred in preds:
if pred <= 0.5:
test_pred.append(0)
else:
test_pred.append(1)
test_prob.append(pred)
return test_pred, test_prob
if __name__ == "__main__":
config = Config()
train(config)
train_reader, val_reader, test_reader = get_point_data(
config.TRAIN_RATE, config.VAL_RATE, config.BATCH_SIZE, config.DOWN_SAMPLING, config.GOAL_FILE)
inference(config, test_reader)
|
tests/samples/threads.py
|
spamegg1/snoop
| 751 |
91693
|
<filename>tests/samples/threads.py
from threading import Thread
from snoop.configuration import Config
snoop = Config(columns='thread').snoop
@snoop
def foo():
return 1
def run(name):
thread = Thread(target=foo, name=name)
thread.start()
thread.join()
def main():
run('short')
run('longername')
run('short')
|
src/main/python/pybuilder/plugins/python/pep8_plugin.py
|
klr8/pybuilder
| 1,419 |
91696
|
<filename>src/main/python/pybuilder/plugins/python/pep8_plugin.py
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2020 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pybuilder.core import use_plugin, task, after, init
from pybuilder.plugins.python.python_plugin_helper import execute_tool_on_source_files
from pybuilder.utils import read_file
use_plugin("python.core")
@init
def init_pep8_properties(project):
project.plugin_depends_on("pep8")
@after("prepare")
def check_pep8_available(project, logger, reactor):
logger.debug("Checking availability of pep8")
reactor.python_env_registry["pybuilder"].verify_can_execute(["pep8"], "pep8", "plugin python.pep8")
@task
def analyze(project, logger):
logger.info("Executing pep8 on project sources")
_, report_file = execute_tool_on_source_files(project, "pep8", ["pep8"])
reports = read_file(report_file)
if len(reports) > 0:
logger.warn("Found %d warning%s produced by pep8",
len(reports), "" if len(reports) == 1 else "s")
|
falkon/kernels/distance_kernel.py
|
vishalbelsare/falkon
| 130 |
91717
|
from typing import Union, Optional, Dict
import torch
from falkon import sparse
from falkon.kernels.diff_kernel import DiffKernel
from falkon.la_helpers.square_norm_fn import square_norm_diff
from falkon.options import FalkonOptions
from falkon.sparse import SparseTensor
SQRT3 = 1.7320508075688772
SQRT5 = 2.23606797749979
def validate_sigma(sigma: Union[float, torch.Tensor]) -> torch.Tensor:
if isinstance(sigma, torch.Tensor):
# Sigma is a 1-item tensor ('single')
try:
sigma.item()
return sigma
except ValueError:
pass
# Sigma is a vector ('diag')
if sigma.dim() == 1 or sigma.shape[1] == 1:
return sigma.reshape(-1)
else:
# TODO: Better error
raise ValueError("sigma must be a scalar or a vector.")
else:
try:
return torch.tensor([float(sigma)], dtype=torch.float64)
except TypeError:
raise TypeError("Sigma must be a scalar or a tensor.")
def _sq_dist(mat1, mat2, norm_mat1, norm_mat2, out: Optional[torch.Tensor]) -> torch.Tensor:
if mat1.dim() == 3:
if out is None:
out = torch.baddbmm(norm_mat1, mat1, mat2.transpose(-2, -1), alpha=-2, beta=1) # b*n*m
else:
out = torch.baddbmm(norm_mat1, mat1, mat2.transpose(-2, -1), alpha=-2, beta=1,
out=out) # b*n*m
else:
if out is None:
out = torch.addmm(norm_mat1, mat1, mat2.transpose(-2, -1), alpha=-2, beta=1) # n*m
else:
out = torch.addmm(norm_mat1, mat1, mat2.transpose(-2, -1), alpha=-2, beta=1,
out=out) # n*m
out.add_(norm_mat2.transpose(-2, -1))
out.clamp_min_(1e-20)
return out
def _sparse_sq_dist(X1_csr: SparseTensor, X2_csr: SparseTensor,
X1: SparseTensor, X2: SparseTensor,
out: torch.Tensor) -> torch.Tensor:
sq1 = torch.empty(X1_csr.size(0), dtype=X1_csr.dtype, device=X1_csr.device)
sparse.sparse_square_norm(X1_csr, sq1) # TODO: This must be implemented for CUDA tensors
sq1 = sq1.reshape(-1, 1)
sq2 = torch.empty(X2_csr.size(0), dtype=X2_csr.dtype, device=X2_csr.device)
sparse.sparse_square_norm(X2_csr, sq2)
sq2 = sq2.reshape(-1, 1)
sparse.sparse_matmul(X1, X2, out)
out.mul_(-2.0)
out.add_(sq1.to(device=X1.device))
out.add_(sq2.to(device=X2.device).t())
out.clamp_min_(1e-20)
return out
def rbf_core(mat1, mat2, out: Optional[torch.Tensor], sigma):
"""
Note 1: if out is None, then this function will be differentiable wrt all three remaining inputs.
Note 2: this function can deal with batched inputs
Parameters
----------
sigma
mat1
mat2
out
Returns
-------
"""
mat1_div_sig = mat1 / sigma
mat2_div_sig = mat2 / sigma
norm_sq_mat1 = square_norm_diff(mat1_div_sig, -1, True) # b*n*1 or n*1
norm_sq_mat2 = square_norm_diff(mat2_div_sig, -1, True) # b*m*1 or m*1
out = _sq_dist(mat1_div_sig, mat2_div_sig, norm_sq_mat1, norm_sq_mat2, out)
out.mul_(-0.5)
out.exp_()
return out
def rbf_core_sparse(mat1: SparseTensor, mat2: SparseTensor,
mat1_csr: SparseTensor, mat2_csr: SparseTensor,
out: torch.Tensor, sigma) -> torch.Tensor:
gamma = 0.5 / (sigma ** 2)
out = _sparse_sq_dist(X1_csr=mat1_csr, X2_csr=mat2_csr, X1=mat1, X2=mat2, out=out)
out.mul_(-gamma)
out.exp_()
return out
def laplacian_core(mat1, mat2, out: Optional[torch.Tensor], sigma):
mat1_div_sig = mat1 / sigma
mat2_div_sig = mat2 / sigma
norm_sq_mat1 = square_norm_diff(mat1_div_sig, -1, True) # b*n*1
norm_sq_mat2 = square_norm_diff(mat2_div_sig, -1, True) # b*m*1
orig_out = out
out = _sq_dist(mat1_div_sig, mat2_div_sig, norm_sq_mat1, norm_sq_mat2, out)
out.sqrt_() # Laplacian: sqrt of squared-difference
# The gradient calculation needs the output of sqrt_
if orig_out is None: # TODO: We could be more explicit in the parameters about whether the gradient is or isn't needed
out = out.neg()
else:
out.neg_()
out.exp_()
return out
def laplacian_core_sparse(mat1: SparseTensor, mat2: SparseTensor,
mat1_csr: SparseTensor, mat2_csr: SparseTensor,
out: torch.Tensor, sigma) -> torch.Tensor:
gamma = 1 / sigma
out = _sparse_sq_dist(X1_csr=mat1_csr, X2_csr=mat2_csr, X1=mat1, X2=mat2, out=out)
out.sqrt_()
out.mul_(-gamma)
out.exp_()
return out
def matern_core(mat1, mat2, out: Optional[torch.Tensor], sigma, nu):
if nu == 0.5:
return laplacian_core(mat1, mat2, out, sigma)
elif nu == float('inf'):
return rbf_core(mat1, mat2, out, sigma)
orig_out = out
mat1_div_sig = mat1 / sigma
mat2_div_sig = mat2 / sigma
norm_sq_mat1 = square_norm_diff(mat1_div_sig, -1, True) # b*n*1
norm_sq_mat2 = square_norm_diff(mat2_div_sig, -1, True) # b*m*1
out = _sq_dist(mat1_div_sig, mat2_div_sig, norm_sq_mat1, norm_sq_mat2, out)
if nu == 1.5:
# (1 + sqrt(3)*D) * exp(-sqrt(3)*D))
out.sqrt_()
if orig_out is None: # TODO: We could be more explicit in the parameters about whether the gradient is or isn't needed
out = out.mul(SQRT3)
else:
out.mul_(SQRT3)
out_neg = torch.neg(out) # extra n*m block
out_neg.exp_()
out.add_(1.0).mul_(out_neg)
elif nu == 2.5:
# (1 + sqrt(5)*D + (sqrt(5)*D)^2 / 3 ) * exp(-sqrt(5)*D)
out_sqrt = torch.sqrt(out)
if orig_out is None: # TODO: We could be more explicit in the parameters about whether the gradient is or isn't needed
out_sqrt = out_sqrt.mul(SQRT5)
else:
out_sqrt.mul_(SQRT5)
out.mul_(5.0 / 3.0).add_(out_sqrt).add_(1.0)
out_sqrt.neg_().exp_()
out.mul_(out_sqrt)
return out
def matern_core_sparse(mat1: SparseTensor, mat2: SparseTensor,
mat1_csr: SparseTensor, mat2_csr: SparseTensor,
out: torch.Tensor, sigma, nu) -> torch.Tensor:
if nu == 0.5:
return laplacian_core_sparse(mat1, mat2, mat1_csr, mat2_csr, out, sigma)
elif nu == float('inf'):
return rbf_core_sparse(mat1, mat2, mat1_csr, mat2_csr, out, sigma)
gamma = 1 / (sigma ** 2)
out = _sparse_sq_dist(X1_csr=mat1_csr, X2_csr=mat2_csr, X1=mat1, X2=mat2, out=out)
out.mul_(gamma)
# For certain nu = 1.5, 2.5 we will need an extra n*m block
if nu == 1.5:
# (1 + sqrt(3)*D) * exp(-sqrt(3)*D))
out.sqrt_()
out.mul_(SQRT3)
out_neg = torch.neg(out)
out_neg.exp_()
out.add_(1.0).mul_(out_neg)
elif nu == 2.5:
# (1 + sqrt(5)*D + (sqrt(5)*D)^2 / 3 ) * exp(-sqrt(5)*D)
out_sqrt = torch.sqrt(out)
out_sqrt.mul_(SQRT5)
out.mul_(5.0 / 3.0).add_(out_sqrt).add_(1.0)
out_sqrt.neg_().exp_()
out.mul_(out_sqrt)
return out
class GaussianKernel(DiffKernel):
r"""Class for computing the Gaussian kernel and related kernel-vector products
The Gaussian kernel is one of the most common and effective kernel embeddings
since it is infinite dimensional, and governed by a single parameter. The kernel length-scale
determines the width of the Gaussian distribution which is placed on top of each point.
A larger sigma corresponds to a wide Gaussian, so that the relative influence of far away
points will be high for computing the kernel at a given datum.
On the opposite side of the spectrum, a small sigma means that only nearby points will
influence the kernel.
Parameters
-----------
sigma
The length-scale of the kernel.
This can be a scalar, and then it corresponds to the standard deviation
of the Gaussian distribution from which the kernel is derived.
If `sigma` is a vector of size `d` (where `d` is the dimensionality of the data), it is
interpreted as the diagonal standard deviation of the Gaussian distribution.
It can also be a matrix of size `d*d` where `d`, in which case sigma will be the precision
matrix (inverse covariance).
opt
Additional options to be forwarded to the matrix-vector multiplication
routines.
Examples
--------
Creating a Gaussian kernel with a single length-scale. Operations on this kernel will not
use KeOps.
>>> K = GaussianKernel(sigma=3.0, opt=FalkonOptions(keops_active="no"))
Creating a Gaussian kernel with a different length-scale per dimension
>>> K = GaussianKernel(sigma=torch.tensor([1.0, 3.5, 7.0]))
Creating a Gaussian kernel object with full covariance matrix (randomly chosen)
>>> mat = torch.randn(3, 3, dtype=torch.float64)
>>> sym_mat = mat @ mat.T
>>> K = GaussianKernel(sigma=sym_mat)
>>> K
GaussianKernel(sigma=tensor([[ 2.0909, 0.0253, -0.2490],
[ 0.0253, 0.3399, -0.5158],
[-0.2490, -0.5158, 4.4922]], dtype=torch.float64)) #random
Notes
-----
The Gaussian kernel with a single length-scale follows
.. math::
k(x, x') = \exp{-\dfrac{\lVert x - x' \rVert^2}{2\sigma^2}}
When the length-scales are specified as a matrix, the RBF kernel is determined by
.. math::
k(x, x') = \exp{-\dfrac{1}{2}x\Sigma x'}
In both cases, the actual computation follows a different path, working on the expanded
norm.
"""
kernel_name = "gaussian"
core_fn = rbf_core
def __init__(self, sigma: Union[float, torch.Tensor], opt: Optional[FalkonOptions] = None):
self.sigma = validate_sigma(sigma)
super().__init__(self.kernel_name, opt, core_fn=GaussianKernel.core_fn, sigma=self.sigma)
def _keops_mmv_impl(self, X1, X2, v, kernel, out, opt: FalkonOptions):
formula = 'Exp(SqDist(x1 / g, x2 / g) * IntInv(-2)) * v'
aliases = [
'x1 = Vi(%d)' % (X1.shape[1]),
'x2 = Vj(%d)' % (X2.shape[1]),
'v = Vj(%d)' % (v.shape[1]),
'g = Pm(%d)' % (self.sigma.shape[0])
]
other_vars = [self.sigma.to(device=X1.device, dtype=X1.dtype)]
return self.keops_mmv(X1, X2, v, out, formula, aliases, other_vars, opt)
def extra_mem(self) -> Dict[str, float]:
return {
# Data-matrix / sigma in prepare + Data-matrix / sigma in apply
'nd': 2,
'md': 1,
# Norm results in prepare
'm': 1,
'n': 1,
}
def detach(self) -> 'GaussianKernel':
detached_params = self._detach_params()
return GaussianKernel(detached_params["sigma"], opt=self.params)
# noinspection PyMethodOverriding
def compute_sparse(self, X1: SparseTensor, X2: SparseTensor, out: torch.Tensor,
X1_csr: SparseTensor, X2_csr: SparseTensor) -> torch.Tensor:
if len(self.sigma) > 1:
raise NotImplementedError("Sparse kernel is only implemented for scalar sigmas.")
dev_kernel_tensor_params = self._move_kernel_params(X1)
return rbf_core_sparse(X1, X2, X1_csr, X2_csr, out, dev_kernel_tensor_params["sigma"])
def __repr__(self):
return f"GaussianKernel(sigma={self.sigma})"
def __str__(self):
return f"Gaussian kernel<{self.sigma}>"
class LaplacianKernel(DiffKernel):
r"""Class for computing the Laplacian kernel, and related kernel-vector products.
The Laplacian kernel is similar to the Gaussian kernel, but less sensitive to changes
in the parameter `sigma`.
Parameters
----------
sigma
The length-scale of the Laplacian kernel
Notes
-----
The Laplacian kernel is determined by the following formula
.. math::
k(x, x') = \exp{-\frac{\lVert x - x' \rVert}{\sigma}}
"""
kernel_name = "laplacian"
def __init__(self, sigma: Union[float, torch.Tensor], opt: Optional[FalkonOptions] = None):
self.sigma = validate_sigma(sigma)
super().__init__(self.kernel_name, opt, core_fn=laplacian_core, sigma=self.sigma)
def _keops_mmv_impl(self, X1, X2, v, kernel, out, opt: FalkonOptions):
formula = 'Exp(-Sqrt(SqDist(x1 / g, x2 / g))) * v'
aliases = [
'x1 = Vi(%d)' % (X1.shape[1]),
'x2 = Vj(%d)' % (X2.shape[1]),
'v = Vj(%d)' % (v.shape[1]),
'g = Pm(%d)' % (self.sigma.shape[0])
]
other_vars = [self.sigma.to(device=X1.device, dtype=X1.dtype)]
return self.keops_mmv(X1, X2, v, out, formula, aliases, other_vars, opt)
def extra_mem(self) -> Dict[str, float]:
return {
# Data-matrix / sigma in prepare + Data-matrix / sigma in apply
'nd': 2,
'md': 1,
# Norm results in prepare
'm': 1,
'n': 1,
}
def detach(self) -> 'LaplacianKernel':
detached_params = self._detach_params()
return LaplacianKernel(detached_params["sigma"], opt=self.params)
# noinspection PyMethodOverriding
def compute_sparse(self, X1: SparseTensor, X2: SparseTensor, out: torch.Tensor,
X1_csr: SparseTensor, X2_csr: SparseTensor) -> torch.Tensor:
if len(self.sigma) > 1:
raise NotImplementedError("Sparse kernel is only implemented for scalar sigmas.")
dev_kernel_tensor_params = self._move_kernel_params(X1)
return laplacian_core_sparse(X1, X2, X1_csr, X2_csr, out, dev_kernel_tensor_params["sigma"])
def __repr__(self):
return f"LaplacianKernel(sigma={self.sigma})"
def __str__(self):
return f"Laplaciankernel<{self.sigma}>"
class MaternKernel(DiffKernel):
r"""Class for computing the Matern kernel, and related kernel-vector products.
The Matern kernels define a generic class of kernel functions which includes the
Laplacian and Gaussian kernels. The class is parametrized by 'nu'. When `nu = 0.5`
this kernel is equivalent to the Laplacian kernel, when `nu = float('inf')`, the
Matern kernel is equivalent to the Gaussian kernel.
This class implements the Matern kernel only for the values of nu which have a closed
form solution, which are 0.5, 1.5, 2.5, and infinity.
Parameters
----------
sigma
The length-scale of the Matern kernel. The length-scale can be either a scalar
or a vector. Matrix-valued length-scales are not allowed for the Matern kernel.
nu
The parameter of the Matern kernel. It should be one of `0.5`, `1.5`, `2.5` or
`inf`.
Notes
-----
While for `nu = float('inf')` this kernel is equivalent to the :class:`GaussianKernel`,
the implementation is more general and using the :class:`GaussianKernel` directly
may be computationally more efficient.
"""
_valid_nu_values = frozenset({0.5, 1.5, 2.5, float('inf')})
def __init__(self,
sigma: Union[float, torch.Tensor],
nu: Union[float, torch.Tensor],
opt: Optional[FalkonOptions] = None):
self.sigma = validate_sigma(sigma)
self.nu = self.validate_nu(nu)
self.kernel_name = f"{self.nu:.1f}-matern"
super().__init__(self.kernel_name, opt, core_fn=matern_core, sigma=self.sigma, nu=self.nu)
def _keops_mmv_impl(self, X1, X2, v, kernel, out, opt: FalkonOptions):
if self.nu == 0.5:
formula = 'Exp(-Norm2(x1 / s - x2 / s)) * v'
elif self.nu == 1.5:
formula = ('(IntCst(1) + Sqrt(IntCst(3)) * Norm2(x1 / s - x2 / s)) * '
'(Exp(-Sqrt(IntCst(3)) * Norm2(x1 / s - x2 / s)) * v)')
elif self.nu == 2.5:
formula = ('(IntCst(1) + Sqrt(IntCst(5)) * Norm2(x1 / s - x2 / s) + '
'(IntInv(3) * IntCst(5)) * SqNorm2(x1 / s - x2 / s)) * '
'(Exp(-Sqrt(IntCst(5)) * Norm2(x1 / s - x2 / s)) * v)')
elif self.nu == float('inf'):
formula = 'Exp(IntInv(-2) * SqDist(x1 / s, x2 / s)) * v'
else:
raise RuntimeError(f"Unrecognized value of nu ({self.nu}). "
f"The onnly allowed values are 0.5, 1.5, 2.5, inf.")
aliases = [
'x1 = Vi(%d)' % (X1.shape[1]),
'x2 = Vj(%d)' % (X2.shape[1]),
'v = Vj(%d)' % (v.shape[1]),
's = Pm(%d)' % (self.sigma.shape[0])
]
other_vars = [self.sigma.to(device=X1.device, dtype=X1.dtype)]
return self.keops_mmv(X1, X2, v, out, formula, aliases, other_vars, opt)
def extra_mem(self) -> Dict[str, float]:
extra_mem = {
# Data-matrix / sigma
'nd': 1,
'md': 1,
# Norm results in prepare
'm': 1,
'n': 1,
}
if self.nu in {1.5, 2.5}:
# Extra kernel block in transform
extra_mem['nm'] = 1
return extra_mem
def detach(self) -> 'MaternKernel':
detached_params = self._detach_params()
return MaternKernel(detached_params["sigma"], detached_params["nu"], opt=self.params)
@staticmethod
def validate_nu(nu: Union[torch.Tensor, float]) -> float:
if isinstance(nu, torch.Tensor):
if nu.requires_grad:
raise ValueError("The nu parameter of the Matern kernel is not differentiable, "
"and must not require gradients.")
try:
out_nu = round(nu.item(), ndigits=2)
except ValueError:
raise ValueError("nu=%s is not convertible to a scalar." % (nu))
elif isinstance(nu, float):
out_nu = round(nu, ndigits=2)
else:
raise TypeError(f"nu must be a float or a tensor, not a {type(nu)}")
if out_nu not in MaternKernel._valid_nu_values:
raise ValueError(f"The given value of nu = {out_nu} can only take "
f"values {MaternKernel._valid_nu_values}.")
return out_nu
# noinspection PyMethodOverriding
def compute_sparse(self, X1: SparseTensor, X2: SparseTensor, out: torch.Tensor,
X1_csr: SparseTensor, X2_csr: SparseTensor) -> torch.Tensor:
if len(self.sigma) > 1:
raise NotImplementedError("Sparse kernel is only implemented for scalar sigmas.")
dev_kernel_tensor_params = self._move_kernel_params(X1)
return matern_core_sparse(X1, X2, X1_csr, X2_csr, out, dev_kernel_tensor_params["sigma"],
self.nu)
def __repr__(self):
return f"MaternKernel(sigma={self.sigma}, nu={self.nu:.1f})"
def __str__(self):
return f"Matern kernel<{self.sigma}, {self.nu:.1f}>"
|
cctbx/examples/exp_i_alpha_derivatives.py
|
dperl-sol/cctbx_project
| 155 |
91759
|
from __future__ import absolute_import, division, print_function
import cmath
import math
from six.moves import zip
class least_squares:
def __init__(self, obs, calc):
self.obs = obs
self.calc = calc
a, b = self.calc.real, self.calc.imag
self.abs_calc = math.sqrt(a**2 + b**2)
self.delta = self.obs - self.abs_calc
def f(self):
"Mathematica: f=(obs-Sqrt[a^2+b^2])^2"
return self.delta**2
def da(self):
"Mathematica: D[f,a]"
if (self.abs_calc == 0): return 0
return -2 * self.delta * self.calc.real / self.abs_calc
def db(self):
"Mathematica: D[f,b]"
if (self.abs_calc == 0): return 0
return -2 * self.delta * self.calc.imag / self.abs_calc
def daa(self):
"Mathematica: FortranForm[FullSimplify[D[f,a,a]]]"
ac = self.abs_calc
if (ac == 0):
if (self.obs == 0): return 2
return -1.e160
return 2 - (2*self.calc.imag**2*self.obs)/ac/ac/ac
def dbb(self):
"Mathematica: FortranForm[FullSimplify[D[f,b,b]]]"
ac = self.abs_calc
if (ac == 0):
if (self.obs == 0): return 2
return -1.e160
return 2 - (2*self.calc.real**2*self.obs)/ac/ac/ac
def dab(self):
"Mathematica: FortranForm[FullSimplify[D[f,a,b]]]"
ac = self.abs_calc
if (ac == 0):
if (self.obs == 0): return 0
return 1.e160
return (2*self.calc.real*self.calc.imag*self.obs)/ac/ac/ac
class exp_i_alpha_sum:
def __init__(self, alphas):
self.alphas = alphas
def f(self):
"Mathematica: f=Exp[I alpha]"
result = 0
for alpha in self.alphas:
result += cmath.exp(1j*alpha)
return result
def d_alphas(self):
"Mathematica: D[f,alpha]"
return [1j*cmath.exp(1j*alpha) for alpha in self.alphas]
def d2_alphas(self):
"Mathematica: D[f,alpha,alpha]"
return [-cmath.exp(1j*alpha) for alpha in self.alphas]
def d_target_d_alphas(self, target):
"Rule for derivatives of sum of roots of unity."
da, db = target.da(), target.db()
return [da * d.real + db * d.imag for d in self.d_alphas()]
def d2_target_d_alphas(self, target):
"Product rule applied to da * d.real + db * d.imag."
result = []
da, db = target.da(), target.db()
daa, dbb, dab = target.daa(), target.dbb(), target.dab()
d = self.d_alphas()
d2 = self.d2_alphas()
for di,d2i in zip(d, d2):
row = []
for dj in d:
sum = daa * di.real * dj.real \
+ dbb * di.imag * dj.imag \
+ dab * (di.real * dj.imag + di.imag * dj.real)
if (di is dj):
sum += da * d2i.real + db * d2i.imag
row.append(sum)
result.append(row)
return result
|
python/tinyflow/_util.py
|
irvingzhang0512/tinyflow
| 2,035 |
91760
|
<reponame>irvingzhang0512/tinyflow
from __future__ import absolute_import as _abs
import json
from nnvm import symbol, graph
def infer_variable_shapes(net, feed_dict):
"""Inference shape of all variables in the net.
Parameters
----------
net : tf.Symbol
The symbolic network containing all the variables.
feed_dict : dict
dict of placeholder to known shape
Returns
-------
Generator of (var, vname, vshape)
Enables enumeration of variables in the net with corresponding name and shape.
"""
g = graph.create(net)
jgraph = json.loads(g.apply('SaveJSON').json_attr('json'))
jnode_row_ptr = jgraph["node_row_ptr"]
jnodes = jgraph["nodes"]
shape = [[]] * jnode_row_ptr[-1]
nindex = {n['name']: i for i, n in enumerate(jnodes)}
for k, v in feed_dict.items():
node_name = k.attr("name")
shape[jnode_row_ptr[nindex[node_name]]] = v
g._set_json_attr("shape", shape, "list_shape")
g = g.apply("InferShape")
shape = g.json_attr("shape")
ret = {}
for v in net.list_input_variables():
vname = v.attr("name")
vshape = shape[jnode_row_ptr[nindex[vname]]]
if len(vshape) == 0:
raise ValueError("not sufficient information in feed_dict")
yield (v, vname, vshape)
|
DQM/EcalPreshowerMonitorModule/python/EcalPreshowerMonitorTasks_withFEDIntegrity_cfi.py
|
ckamtsikis/cmssw
| 852 |
91770
|
<filename>DQM/EcalPreshowerMonitorModule/python/EcalPreshowerMonitorTasks_withFEDIntegrity_cfi.py<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
from DQM.EcalPreshowerMonitorModule.ESRawDataTask_cfi import *
from DQM.EcalPreshowerMonitorModule.ESIntegrityTask_cfi import *
from DQM.EcalPreshowerMonitorModule.ESFEDIntegrityTask_cfi import *
#from DQM.EcalPreshowerMonitorModule.ESPedestalTask_cfi import *
from DQM.EcalPreshowerMonitorModule.ESOccupancyTask_cfi import *
from DQM.EcalPreshowerMonitorModule.ESTimingTask_cfi import *
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
dqmInfoES = DQMEDAnalyzer('DQMEventInfo',
subSystemFolder = cms.untracked.string('EcalPreshower')
)
#ecalPreshowerDefaultTasksSequence = cms.Sequence(ecalPreshowerOccupancyTask*ecalPreshowerPedestalTask)
ecalPreshowerDefaultTasksSequence = cms.Sequence(ecalPreshowerRawDataTask*ecalPreshowerFEDIntegrityTask*ecalPreshowerIntegrityTask*ecalPreshowerOccupancyTask*ecalPreshowerTimingTask)
|
tests/components/ecobee/test_util.py
|
tbarbette/core
| 30,023 |
91785
|
"""Tests for the ecobee.util module."""
import pytest
import voluptuous as vol
from homeassistant.components.ecobee.util import ecobee_date, ecobee_time
async def test_ecobee_date_with_valid_input():
"""Test that the date function returns the expected result."""
test_input = "2019-09-27"
assert ecobee_date(test_input) == test_input
async def test_ecobee_date_with_invalid_input():
"""Test that the date function raises the expected exception."""
test_input = "20190927"
with pytest.raises(vol.Invalid):
ecobee_date(test_input)
async def test_ecobee_time_with_valid_input():
"""Test that the time function returns the expected result."""
test_input = "20:55:15"
assert ecobee_time(test_input) == test_input
async def test_ecobee_time_with_invalid_input():
"""Test that the time function raises the expected exception."""
test_input = "20:55"
with pytest.raises(vol.Invalid):
ecobee_time(test_input)
|
larq_compute_engine/mlir/python/converter_test.py
|
simonmaurer/compute-engine
| 193 |
91791
|
import sys
import unittest
from packaging import version
from unittest import mock
import tensorflow as tf
import larq_zoo as lqz
from tensorflow.python.eager import context
sys.modules["importlib.metadata"] = mock.MagicMock()
sys.modules["importlib_metadata"] = mock.MagicMock()
sys.modules["larq_compute_engine.mlir._tf_tfl_flatbuffer"] = mock.MagicMock()
sys.modules[
"larq_compute_engine.tflite.python.interpreter_wrapper_lite"
] = mock.MagicMock()
sys.modules["larq_compute_engine.mlir.python.tflite_schema"] = mock.MagicMock()
from larq_compute_engine.mlir.python.converter import convert_keras_model
from larq_compute_engine.mlir._tf_tfl_flatbuffer import (
convert_graphdef_to_tflite_flatbuffer as mocked_graphdef_converter,
convert_saved_model_to_tflite_flatbuffer as mocked_saved_model_converter,
)
class TestConverter(unittest.TestCase):
def test_larq_zoo_models(self):
with context.eager_mode():
model = lqz.sota.QuickNet(weights=None)
convert_keras_model(model)
if version.parse(tf.__version__) < version.parse("2.2"):
mocked_graphdef_converter.assert_called_once_with(
mock.ANY,
["input_1"],
["DT_FLOAT"],
[[1, 224, 224, 3]],
["Identity"],
False,
"arm",
None,
False,
)
else:
mocked_saved_model_converter.assert_called_once_with(
mock.ANY, ["serve"], ["serving_default"], 1, "arm", None, False
)
def test_wrong_arg(self):
with self.assertRaises(ValueError):
convert_keras_model("./model.h5")
def test_target_arg(self):
with context.eager_mode():
model = lqz.sota.QuickNet(weights=None)
# These should work
convert_keras_model(model, target="arm")
convert_keras_model(model, target="xcore")
# Anything else shouldn't
with self.assertRaises(
ValueError, msg='Expected `target` to be "arm" or "xcore"'
):
convert_keras_model(model, target="x86")
if __name__ == "__main__":
unittest.main()
|
pymagnitude/third_party/_apsw/setup.py
|
tpeng/magnitude
| 1,520 |
91813
|
<reponame>tpeng/magnitude
#!/usr/bin/env python
# See the accompanying LICENSE file.
import os
import sys
import shlex
import glob
import re
import time
import zipfile
import tarfile
try:
if not os.environ.get("APSW_FORCE_DISTUTILS"):
from setuptools import setup, Extension, Command
else:
raise ImportError()
except ImportError:
from distutils.core import setup, Extension, Command
from distutils.command import build_ext, build, sdist
##
## Do your customizations here or by creating a setup.cfg as documented at
## http://www.python.org/doc/2.5.2/dist/setup-config.html
##
include_dirs=['src']
library_dirs=[]
define_macros=[]
libraries=[]
# This includes the functionality marked as experimental in SQLite 3.
# Comment out the line to exclude them
define_macros.append( ('EXPERIMENTAL', '1') )
##
## End of customizations
##
# python 2 and 3 print equivalent
def write(*args):
# py2 won't allow optional keyword arg on end, so work around it
dest=sys.stdout
if args[-1]==sys.stderr:
dest=args[-1]
args=args[:-1]
dest.write(" ".join(args)+"\n")
dest.flush()
py3=sys.version_info>=(3,0)
# ensure files are closed
def read_whole_file(name, mode):
if sys.version_info<(2,4):
if "r" in mode and "U" in mode:
# python 2.3 returns file not found if "U" present!
mode="".join([m for m in mode if m!="U"])
f=open(name, mode)
try:
return f.read()
finally:
f.close()
def write_whole_file(name, mode, data):
f=open(name, mode)
try:
f.write(data)
finally:
f.close()
# They keep messing with where files are in URI
def fixup_download_url(url):
ver=re.search("3[0-9]{6}", url)
if ver:
ver=int(ver.group(0))
if ver>=3071600:
if ver>=3220000:
year="2018"
elif ver>=3160000:
year="2017"
elif ver>=3100000:
year="2016"
elif ver>=3080800:
year="2015"
elif ver>=3080300:
year="2014"
else:
year="2013"
if "/"+year+"/" not in url:
url=url.split("/")
url.insert(3, year)
return "/".join(url)
return url
# Run test suite
class run_tests(Command):
description="Run test suite"
# I did originally try using 'verbose' as the option but it turns
# out that is builtin and defaults to 1 (--quiet is also builtin
# and forces verbose to 0)
user_options=[
("show-tests", "s", "Show each test being run"),
]
# see if you can find boolean_options documented anywhere
boolean_options=['show-tests']
def initialize_options(self):
self.show_tests=0
def finalize_options(self):
pass
def run(self):
import unittest
import tests
tests.setup()
suite=unittest.TestLoader().loadTestsFromModule(tests)
# verbosity of zero doesn't print anything, one prints a dot
# per test and two prints each test name
result=unittest.TextTestRunner(verbosity=self.show_tests+1).run(suite)
if not result.wasSuccessful():
sys.exit(1)
# A hack we dont't document
class build_test_extension(Command):
description="Compiles APSW test loadable extension"
user_options=[]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
# On 64 bit windows we have to use MSVC
if sys.platform=='win32': # yes even on 64 bit
try:
import platform
if platform.architecture()[0]=='64bit':
res=os.system("cl /Gd src/testextension.c /I sqlite3 /I . /DDLL /LD /link /export:sqlite3_extension_init /export:alternate_sqlite3_extension_init /out:testextension.sqlext")
if res!=0:
raise RuntimeError("Building test extension failed")
return
except ImportError:
pass
shared="shared"
if sys.platform.startswith("darwin"):
shared="bundle"
res=os.system("gcc -fPIC -%s -o testextension.sqlext -Isqlite3 -I. src/testextension.c" % (shared,))
if res!=0:
raise RuntimeError("Building test extension failed")
# Another hack. Visual Studio 2008 & 2010 ship with 64
# compilers, headers and the Windows SDK but claims it doesn't and
# distutils can't find it. The separate Windows SDK can't find this
# and gets very confused not to mention being one of the buggiest cmd
# scripts I have ever seen. This hack just sets some environment
# variables directly since all the "proper" ways are very broken.
class win64hackvars(Command):
description="Set env vars for Visual Studio 2008/2010 Express 64 bit"
user_options=[]
def initialize_options(self): pass
def finalize_options(self): pass
def run(self):
vcver=9
if sys.version_info>=(3,3):
vcver=10
sdkdir=r"C:\Program Files\Microsoft SDKs\Windows\v6.0A"
vsdir=r"C:\Program Files (x86)\Microsoft Visual Studio %d.0\VC" % vcver
assert os.path.isdir(sdkdir), "Expected sdk dir "+sdkdir
assert os.path.isdir(vsdir), "Expected visual studio dir "+vsdir
os.environ["PATH"]=r"%s\bin\amd64;%s\bin" % (vsdir, sdkdir)
os.environ["INCLUDE"]=r"%s\include;%s\include" % (vsdir, sdkdir)
os.environ["LIB"]=r"%s\lib\amd64;%s\lib\x64" % (vsdir, sdkdir)
os.environ["DISTUTILS_USE_SDK"]="1"
os.environ["MSSdk"]=sdkdir
# deal with various python version compatibility issues with how
# to treat returned web data as lines of text
def fixupcode(code):
if sys.version_info<(2,5):
if type(code)!=str:
code=code.read()
if sys.version_info>=(3,0):
if type(code)!=bytes:
code=code.read()
if type(code)==bytes:
code=code.decode("iso8859-1")
if type(code)==str:
return [l+"\n" for l in code.split("\n")]
return code
fetch_parts=[]
class fetch(Command):
description="Automatically downloads SQLite and components"
user_options=[
("version=", None, "Which version of SQLite/components to get (default current)"),
("missing-checksum-ok", None, "Continue on a missing checksum (default abort)"),
("sqlite", None, "Download SQLite amalgamation"),
("all", None, "Download all downloadable components"),
]
fetch_options=['sqlite']
boolean_options=fetch_options+['all', 'missing-checksum-ok']
def initialize_options(self):
self.version=None
self.sqlite=False
self.all=False
self.missing_checksum_ok=False
def finalize_options(self):
# If all is selected then turn on all components
global fetch_parts
if self.all:
for i in self.fetch_options:
setattr(self, i, True)
for i in self.fetch_options:
fetch_parts.append(i)
def run(self):
# work out the version
if self.version is None:
write(" Getting download page to work out current SQLite version")
page=self.download("https://sqlite.org/download.html", text=True, checksum=False)
match=re.search(r'sqlite-amalgamation-3([0-9][0-9])([0-9][0-9])([0-9][0-9])\.zip', page)
if match:
self.version="3.%d.%d.%d" % tuple([int(match.group(n)) for n in range(1,4)])
if self.version.endswith(".0"):
self.version=self.version[:-len(".0")]
else:
write("Unable to determine current SQLite version. Use --version=VERSION", sys.stderr)
write("to set version - eg setup.py fetch --version=3.6.18", sys.stderr)
sys.exit(17)
write(" Version is "+self.version)
# now get each selected component
downloaded=0
if not self.version.startswith("fossil"):
v=[int(x) for x in self.version.split(".")]
if len(v)<4:
v.append(0)
self.webversion="%d%02d%02d%02d" % tuple(v)
## The amalgamation
if self.sqlite:
if self.version.startswith("fossil"):
write(" Getting code from fossil")
else:
write(" Getting the SQLite amalgamation")
if self.version.startswith("fossil"):
if self.version=="fossil":
uuid="trunk"
else:
showmsg=False
if not self.version.startswith("fossil-"):
showmsg=True
else:
uuid=self.version.split("-", 1)[1]
if not uuid:
showmsg=True
if showmsg:
write("Use fossil-HASH to identify a particular commit", sys.stderr)
write("eg fossil-3a82c8e6", sys.stderr)
sys.exit(18)
AURL="https://sqlite.org/src/zip/sqlite3.zip?uuid="+uuid
checksum=False
else:
if sys.platform=="win32":
AURL="https://sqlite.org/sqlite-amalgamation-%s.zip" % (self.webversion,)
else:
AURL="https://sqlite.org/sqlite-autoconf-%s.tar.gz" % (self.webversion,)
checksum=True
AURL=fixup_download_url(AURL)
data=self.download(AURL, checksum=checksum)
if AURL.endswith(".zip"):
zip=zipfile.ZipFile(data, "r")
for name in "sqlite3.c", "sqlite3.h", "sqlite3ext.h":
write("Extracting", name)
f=[n for n in zip.namelist() if n.endswith(name)]
if len(f)!=1:
raise Exception("Can't find %s in zip. Candidates are %s" % (name, f))
# Work around SQLite 3.7.13 bug where a symbol was
# declared SQLITE_API and extern
data=zip.read(f[0])
if name=="sqlite3.c":
data=data.decode("utf8")
data=data.replace("SQLITE_API extern", "SQLITE_API")
data=data.encode("utf8")
open(name, "wb").write(data)
zip.close()
else:
# we need to run configure to get various -DHAVE_foo flags on non-windows platforms
# delete existing sqlite3 directory if it exists, but save sqlite3config.h if it exists
sqlite3config_h=None
if os.path.exists("sqlite3/sqlite3config.h"):
sqlite3config_h=read_whole_file("sqlite3/sqlite3config.h", "rb")
if os.path.exists('sqlite3'):
for dirpath, dirnames, filenames in os.walk('sqlite3', topdown=False):
for file in filenames:
os.remove(os.path.join(dirpath, file))
for dir in dirnames:
os.rmdir(os.path.join(dirpath, dir))
os.rmdir('sqlite3')
if self.version.startswith("fossil"):
zip=zipfile.ZipFile(data, "r")
for name in zip.namelist():
# extract
if name.endswith("/"):
os.mkdir(name)
else:
open(name, "wb").write(zip.read(name))
zip.close()
else:
# if you get an exception here it is likely that you don't have the python zlib module
import zlib
tar=tarfile.open("nonexistentname to keep old python happy", 'r', data)
configmember=None
for member in tar.getmembers():
tar.extract(member)
# find first file named configure
if not configmember and member.name.endswith("/configure"):
configmember=member
tar.close()
# the directory name has changed a bit with each release so try to work out what it is
if not configmember:
write("Unable to determine directory it extracted to.", dest=sys.stderr)
sys.exit(19)
dirname=configmember.name.split('/')[0]
os.rename(dirname, 'sqlite3')
os.chdir('sqlite3')
if self.version.startswith("fossil"):
write(" Building amalgamation from fossil")
res=os.system("make TOP=. -f Makefile.linux-gcc sqlite3.c && cp src/sqlite3ext.h .")
defs=[]
if sqlite3config_h:
open("sqlite3config.h", "wb").write(sqlite3config_h)
else:
write(" Running configure to work out SQLite compilation flags")
res=os.system("./configure >/dev/null")
defline=None
for line in read_whole_file("Makefile", "rtU").split("\n"):
if line.startswith("DEFS = "):
defline=line
break
if not defline:
write("Unable to determine compile flags. Create sqlite3/sqlite3config.h to manually set.", sys.stderr)
sys.exit(18)
defs=[]
for part in shlex.split(defline):
if part.startswith("-DHAVE"):
part=part[2:]
if '=' in part:
part=part.split('=', 1)
else:
part=(part, )
defs.append(part)
if res!=0:
raise ValueError("Command execution failed")
if defs:
op=open("sqlite3config.h", "wt")
op.write("""
/* This file was generated by parsing how configure altered the Makefile
which isn't used when building python extensions. It is specific to the
machine and developer components on which it was run. */
\n""")
for define in defs:
op.write('#define %s %s\n' % tuple(define))
op.close()
os.chdir("..")
downloaded+=1
if not downloaded:
write("You didn't specify any components to fetch. Use")
write(" setup.py fetch --help")
write("for a list and details")
raise ValueError("No components downloaded")
# A function for verifying downloads
def verifyurl(self, url, data):
d=["%s" % (len(data),)]
try:
import hashlib
d.append(hashlib.sha1(data).hexdigest())
d.append(hashlib.md5(data).hexdigest())
except ImportError:
import sha
d.append(sha.new(data).hexdigest())
import md5
d.append(md5.new(data).hexdigest())
write(" Length:", d[0], " SHA1:", d[1], " MD5:", d[2])
sums=os.path.join(os.path.dirname(__file__), "checksums")
for line in read_whole_file(sums, "rt").split("\n"):
line=line.strip()
if len(line)==0 or line[0]=="#":
continue
l=[l.strip() for l in line.split()]
if len(l)!=4:
write("Invalid line in checksums file:", line, sys.stderr)
raise ValueError("Bad checksums file")
if l[0]==url:
if l[1:]==d:
write(" Checksums verified")
return
if l[1]!=d[0]:
write("Length does not match. Expected", l[1], "download was", d[0])
if l[2]!=d[1]:
write("SHA does not match. Expected", l[2], "download was", d[1])
if l[3]!=d[2]:
write("MD5 does not match. Expected", l[3], "download was", d[2])
write("The download does not match the checksums distributed with APSW.\n"
"The download should not have changed since the checksums were\n"
"generated. The cause could be anything from network corruption\n"
"to a malicious attack.")
raise ValueError("Checksums do not match")
# no matching line
write(" (Not verified. No match in checksums file)")
if not self.missing_checksum_ok:
raise ValueError("No checksum available. Use --missing-checksum-ok option to continue")
# download a url
def download(self, url, text=False, checksum=True):
if py3:
import urllib.request
urlopen=urllib.request.urlopen
import io
bytesio=io.BytesIO
else:
import urllib2
urlopen=urllib2.urlopen
import cStringIO
bytesio=cStringIO.StringIO
write(" Fetching", url)
count=0
while True:
try:
if count:
write(" Try #",str(count+1))
try:
page=urlopen(url).read()
except:
# Degrade to http if https is not supported
e=sys.exc_info()[1]
if "eof occurred in violation of protocol" in str(e).lower() or getattr(e, "reason")=="unknown url type: https":
write(" [Python has https issues - using http instead]")
page=urlopen(url.replace("https://", "http://")).read()
else:
raise
break
except:
write(" Error ", str(sys.exc_info()[1]))
time.sleep(1.3)
count+=1
if count>=5:
raise
if text:
if py3:
page=page.decode("iso8859_1")
if checksum:
self.verifyurl(url, page)
if not text:
page=bytesio(page)
return page
# We allow enable/omit to be specified to build and then pass them to build_ext
build_enable=None
build_omit=False # PLASTICITY
build_enable_all_extensions=True # PLASTICITY
bparent=build.build
class apsw_build(bparent):
user_options=bparent.user_options+\
[ ("enable=", None, "Enable SQLite options (comma separated list)"),
("omit=", None, "Omit SQLite functionality (comma separated list)"),
("enable-all-extensions", None, "Enable all SQLite extensions"),
]
boolean_options=bparent.boolean_options+["enable-all-extensions"]
def initialize_options(self):
v=bparent.initialize_options(self)
self.enable=None
self.omit=None
self.enable_all_extensions=build_enable_all_extensions
return v
def finalize_options(self):
global build_enable, build_omit, build_enable_all_extensions
build_enable=self.enable
build_omit=self.omit
build_enable_all_extensions=self.enable_all_extensions
return bparent.finalize_options(self)
def findamalgamation():
amalgamation=(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "sqlite3.c"),
os.path.join(os.path.dirname(os.path.abspath(__file__)), "sqlite3", "sqlite3.c")
)
for path in amalgamation:
if os.path.exists(path):
return path
return None
def find_in_path(name):
for loc in os.getenv("PATH").split(os.pathsep):
f=os.path.abspath(os.path.join(loc, name))
if os.path.exists(f) or os.path.exists(f.lower()) or os.path.exists(f.lower()+".exe"):
return f
return None
beparent=build_ext.build_ext
class apsw_build_ext(beparent):
user_options=beparent.user_options+\
[ ("enable=", None, "Enable SQLite options (comma separated list)"),
("omit=", None, "Omit SQLite functionality (comma separated list)"),
("enable-all-extensions", None, "Enable all SQLite extensions"),
]
boolean_options=beparent.boolean_options+["enable-all-extensions"]
def initialize_options(self):
v=beparent.initialize_options(self)
self.enable=build_enable
self.omit=build_omit
self.enable_all_extensions=build_enable_all_extensions
return v
def finalize_options(self):
v=beparent.finalize_options(self)
if self.enable_all_extensions:
exts=["fts4", "fts3", "fts3_parenthesis", "rtree", "stat4", "json1", "fts5", "rbu"]
if find_in_path("icu-config"):
exts.append("icu")
if not self.enable:
self.enable=",".join(exts)
else:
self.enable=self.enable+","+",".join(exts)
ext=self.extensions[0]
if not ext.define_macros: ext.define_macros=[]
if not ext.depends: ext.depends=[]
if not ext.include_dirs: ext.include_dirs=[]
if not ext.library_dirs: ext.library_dirs=[]
if not ext.libraries: ext.libraries=[]
# Fixup debug setting
if self.debug:
# distutils forces NDEBUG even with --debug so overcome that
ext.define_macros.append( ('APSW_NO_NDEBUG', '1') ) # double negatives are bad
ext.define_macros.append( ('APSW_TESTFIXTURES', '1') ) # extra test harness code
ext.define_macros.append( ('SQLITE_DEBUG', '1') ) # also does NDEBUG mangling
else:
ext.define_macros.append( ('NDEBUG', '1') )
# fork checker?
if hasattr(os, "fork"):
ext.define_macros.append( ('APSW_FORK_CHECKER', '1') )
ext.define_macros.append( ("SQLITE_MAX_COLUMN", "32767") ) # PLASTICITY
ext.define_macros.append( ("SQLITE_MAX_VARIABLE_NUMBER", "99999") ) # PLASTICITY
# SQLite 3
# Look for amalgamation in our directory or in sqlite3 subdirectory
path=findamalgamation()
if path:
if sys.platform=="win32":
# double quotes get consumed by windows arg processing
ext.define_macros.append( ('APSW_USE_SQLITE_AMALGAMATION', '\\"'+path+'\\"') )
else:
ext.define_macros.append( ('APSW_USE_SQLITE_AMALGAMATION', '"'+path+'"') )
ext.depends.append(path)
# we also add the directory to include path since icu tries to use it
ext.include_dirs.append(os.path.dirname(path))
write("SQLite: Using amalgamation", path)
load_extension=True
else:
load_extension=False
d=os.path.join(os.path.dirname(os.path.abspath(__file__)), "sqlite3")
if os.path.isdir(d):
write("SQLite: Using include/libraries in sqlite3 subdirectory")
ext.include_dirs.append(d)
ext.library_dirs.append(d)
else:
write("SQLite: Using system sqlite include/libraries")
# ext.libraries.append('sqlite3') PLASTICITY
s3config=os.path.join(os.path.dirname(os.path.abspath(__file__)), "sqlite3", "sqlite3config.h")
if os.path.exists(s3config):
if sys.platform=="win32":
ext.define_macros.append( ('APSW_USE_SQLITE_CONFIG', '\\"'+s3config+'\\"') )
else:
ext.define_macros.append( ('APSW_USE_SQLITE_CONFIG', '"'+s3config+'"') )
# enables
addicuinclib=False
if self.enable:
for e in self.enable.split(","):
e=e.strip()
if e.lower()=="load_extension":
load_extension=True
continue
ext.define_macros.append( ("SQLITE_ENABLE_"+e.upper(), 1) )
if e.upper()=="ICU":
addicuinclib=True
os.putenv("APSW_TEST_"+e.upper(), "1")
# See issue #55 where I had left off the 3 in fts3. This code
# tries to catch misspelling the name of an extension.
# However the SQLITE_ENABLE prefix is also used by other
# options - see https://sqlite.org/compile.html but almost
# all of those have _ in them, so our abbreviated and
# hopefully future proof test
if "_" not in e.lower() and \
"memsys" not in e.lower() and \
e.lower() not in ("fts4", "fts3", "rtree", "icu", "iotrace",
"stat2", "stat3", "stat4", "dbstat_vtab",
"fts5", "json1", "rbu"):
write("Unknown enable "+e, sys.stderr)
raise ValueError("Bad enable "+e)
# omits
if self.omit:
for e in self.omit.split(","):
e=e.strip()
if e.lower()=="load_extension":
load_extension=False
ext.define_macros.append( ("SQLITE_OMIT_"+e.upper(), 1) )
if not load_extension:
ext.define_macros.append( ("SQLITE_OMIT_LOAD_EXTENSION", 1) )
# icu
if addicuinclib:
foundicu=False
kwargs={}
if sys.version_info>=(2, 6):
# if posix is true then quotes get stripped such as from -Dfoo="bar"
kwargs["posix"]=False
for part in shlex.split(os.popen("icu-config --cppflags", "r").read(), **kwargs):
if part.startswith("-I"):
ext.include_dirs.append(part[2:])
foundicu=True
elif part.startswith("-D"):
part=part[2:]
if '=' in part:
part=tuple(part.split('=', 1))
else:
part=(part, '1')
ext.define_macros.append(part)
foundicu=True
for part in shlex.split(os.popen("icu-config --ldflags", "r").read(), **kwargs):
if part.startswith("-L"):
ext.library_dirs.append(part[2:])
foundicu=True
elif part.startswith("-l"):
ext.libraries.append(part[2:])
foundicu=True
if foundicu:
write("ICU: Added includes, flags and libraries from icu-config")
else:
write("ICU: Unable to determine includes/libraries for ICU using icu-config")
write("ICU: You will need to manually edit setup.py or setup.cfg to set them")
# shell
if not os.path.exists("src/shell.c") or \
os.path.getmtime("src/shell.c")<os.path.getmtime("tools/shell.py") or \
os.path.getmtime(__file__)>os.path.getmtime("src/shell.c"):
create_c_file("tools/shell.py", "src/shell.c")
# done ...
return v
def run(self):
v=beparent.run(self)
return v
sparent=sdist.sdist
class apsw_sdist(sparent):
user_options=sparent.user_options+[
("add-doc", None, "Includes built documentation from doc/build/html into source"),
]
boolean_options=sparent.boolean_options+["add-doc"]
def initialize_options(self):
sparent.initialize_options(self)
self.add_doc=False
# Were we made from a source archive? If so include the help again
if os.path.isfile("doc/index.html") and os.path.isfile("doc/_sources/pysqlite.txt"):
self.add_doc=True
self.use_defaults=False # they are useless
# Make sure the manifest is regenerated
self.force_manifest=True
# Now do some chicanery. If a source distribution is requested and
# fetch --sqlite was requested then make sure the sqlite amalgamation
# ends up as part of the source distribution.
if fetch_parts:
# Use a temporary file for the manifest
tmpmanifest="MANIFEST.in.tmp"
self.template=tmpmanifest
try:
os.remove(tmpmanifest)
except:
pass
min=open("MANIFEST.in", "rU")
mout=open(tmpmanifest, "wt")
for line in min:
mout.write(line)
min.close()
# os.path.relpath emulation
if "sqlite" in fetch_parts:
amalgamationpath=findamalgamation()
amalrelpath=amalgamationpath[len(os.path.dirname(os.path.abspath(__file__)))+1:]
mout.write("include "+amalrelpath+"\n")
# also include headers and extension headers
mout.write("include "+amalrelpath.replace("sqlite3.c", "sqlite3.h")+"\n")
mout.write("include "+amalrelpath.replace("sqlite3.c", "sqlite3ext.h")+"\n")
if os.path.exists("sqlite3/sqlite3config.h"):
mout.write("include sqlite3/sqlite3config.h\n")
mout.close()
def run(self):
v=sparent.run(self)
if self.add_doc:
if len(list(help_walker('')))==0:
raise Exception("The help is not built")
for archive in self.get_archive_files():
add_doc(archive, self.distribution.get_fullname())
return v
def help_walker(arcdir):
# Provides a list of (archive name, disk name) for all the help files
if os.path.isfile("doc/index.html") and os.path.isfile("doc/_sources/pysqlite.txt"):
topdir="doc/"
else:
topdir="doc/build/html/"
for dirpath, _, filenames in os.walk(topdir):
prefix=dirpath[len(topdir):]
for f in filenames:
yield os.path.join(arcdir, "doc", prefix, f), os.path.join(dirpath, f)
def add_doc(archive, topdir):
write("Add help files to",archive)
if archive.endswith(".tar") or ".tar." in archive:
if archive.endswith(".Z"):
raise Exception("tarfile module doesn't support old school compress so we can't add doc "+archive)
fmt=""
if archive.endswith(".gz") or archive.endswith(".tgz"):
fmt=":gz"
elif archive.endswith(".bz2") or archive.endswith(".tbz2"):
fmt=":bz2"
oldarchive=tarfile.open(archive)
newarchive=tarfile.open(archive+"-", mode="w"+fmt)
for mem in oldarchive.getmembers():
newarchive.addfile(mem, oldarchive.extractfile(mem))
oldarchive.close()
for arcname, fname in help_walker(topdir):
newarchive.add(fname, arcname)
newarchive.close()
os.rename(archive+"-", archive)
elif archive.endswith(".zip"):
ofile=zipfile.ZipFile(archive, "a", zipfile.ZIP_DEFLATED)
for arcname, fname in help_walker(topdir):
ofile.write(fname, arcname)
ofile.close()
else:
raise Exception("Don't know what to do with "+archive)
def create_c_file(src, dest):
# Transforms Python src into C dest as a sequence of strings.
# Because of the pathetic microsoft compiler we have to break it
# up into small chunks
out=["/* Automatically generated by setup.py from "+src+" */", ""]
percents=1
size=0
for line in read_whole_file(src, "rt").split("\n"):
if "if__name__=='__main__':" in line.replace(" ",""):
break
if line.strip().startswith('#'): # full line comment
continue
if line.strip()=="import apsw":
continue
size=size+len(line)
comma=size>32000
if comma:
size=0
percents+=1
line=line.replace("\\", "\\\\").\
replace('"', '\\"')
out.append(' "'+line.rstrip()+'\\n"')
if comma:
out[-1]=out[-1]+","
if out[-1].endswith(","):
out[-1]=out[-1][:-1]
out[1]='"%s",' % ("%s" * percents,)
write_whole_file(dest, "wt", "\n".join(out))
# We depend on every .[ch] file in src
depends=[f for f in glob.glob("src/*.[ch]") if f!="src/apsw.c"]
for f in (findamalgamation(), ):
if f:
depends.append(f)
# we produce a .c file from this
depends.append("tools/shell.py")
# work out version number
version=read_whole_file(os.path.join("src", "apswversion.h"), "rt").split()[2].strip('"')
# msi can't use normal version numbers because distutils is retarded,
# so mangle ours to suit it
if "bdist_msi" in sys.argv:
if version.endswith("-r1"):
version=version[:-len("-r1")]
else:
assert False, "MSI version needs help"
version=[int(v) for v in re.split(r"[^\d]+", version)]
# easy pad to 3 items long
while len(version)<3:
version.append(0)
# 4 is our normal length (eg 3.7.3-r1) but sometimes it is more eg
# 3.7.16.1-r1 so combine last elements if longer than 4
while len(version)>4:
version[-2]=10*version[-2]+version-1
del version[-1]
# combine first two elements
if len(version)>3:
version[0]=100*version[0]+version[1]
del version[1]
version=".".join([str(v) for v in version])
setup(name="apsw",
version=version,
description="Another Python SQLite Wrapper",
long_description=\
"""A Python wrapper for the SQLite embedded relational database engine.
In contrast to other wrappers such as pysqlite it focuses on being
a minimal layer over SQLite attempting just to translate the
complete SQLite API into Python.""",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/rogerbinns/apsw/",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved",
"Operating System :: OS Independent",
"Programming Language :: C",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Database :: Front-Ends",
],
keywords=["database", "sqlite"],
license="OSI Approved ::",
ext_modules=[Extension("apsw",
["src/apsw.c", "src/sqlite3.c"], # PLASTICITY
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=libraries,
define_macros=define_macros,
depends=depends)],
cmdclass={'test': run_tests,
'build_test_extension': build_test_extension,
'fetch': fetch,
'build_ext': apsw_build_ext,
'build': apsw_build,
'sdist': apsw_sdist,
'win64hackvars': win64hackvars}
)
|
tests/errors/semantic/non_blocking/var_is_none.py
|
dina-fouad/pyccel
| 206 |
91838
|
<filename>tests/errors/semantic/non_blocking/var_is_none.py<gh_stars>100-1000
# pylint: disable=missing-function-docstring, missing-module-docstring/
from pyccel.decorators import types
@types('int')
def f(a):
b = 0
if a is not None:
b = b + a
return b
|
osf/migrations/0122_auto_20180801_2105.py
|
gaybro8777/osf.io
| 628 |
91855
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-08-01 21:05
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('osf', '0121_merge_20180801_1458'),
]
operations = [
migrations.AlterIndexTogether(
name='basefilenode',
index_together=set([('target_content_type', 'target_object_id')]),
),
]
|
test/fixture/mycompile.py
|
Valkatraz/scons
| 1,403 |
91868
|
<filename>test/fixture/mycompile.py
r"""
Phony "compiler" for testing SCons.
Copies its source files to the target file, dropping lines
that match a pattern, so we can recognize the tool
has made a modification.
"""
import sys
if __name__ == '__main__':
line = ('/*' + sys.argv[1] + '*/\n').encode()
with open(sys.argv[2], 'wb') as ofp:
for f in sys.argv[3:]:
with open(f, 'rb') as ifp:
lines = [ln for ln in ifp if ln != line]
for ln in lines:
ofp.write(ln)
sys.exit(0)
|
databricks/koalas/tests/test_window.py
|
varunsh-coder/koalas
| 3,211 |
91871
|
<reponame>varunsh-coder/koalas<filename>databricks/koalas/tests/test_window.py<gh_stars>1000+
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
from databricks import koalas as ks
from databricks.koalas.exceptions import PandasNotImplementedError
from databricks.koalas.missing.window import (
MissingPandasLikeExpanding,
MissingPandasLikeRolling,
MissingPandasLikeExpandingGroupby,
MissingPandasLikeRollingGroupby,
)
from databricks.koalas.testing.utils import ReusedSQLTestCase, TestUtils
class ExpandingRollingTest(ReusedSQLTestCase, TestUtils):
def test_missing(self):
kdf = ks.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9]})
# Expanding functions
missing_functions = inspect.getmembers(MissingPandasLikeExpanding, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.expanding(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.a.expanding(1), name)() # Series
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Expanding.*{}.*is deprecated".format(name)
):
getattr(kdf.expanding(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Expanding.*{}.*is deprecated".format(name)
):
getattr(kdf.a.expanding(1), name)() # Series
# Rolling functions
missing_functions = inspect.getmembers(MissingPandasLikeRolling, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.a.rolling(1), name)() # Series
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Rolling.*{}.*is deprecated".format(name)
):
getattr(kdf.rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Rolling.*{}.*is deprecated".format(name)
):
getattr(kdf.a.rolling(1), name)() # Series
# Expanding properties
missing_properties = inspect.getmembers(
MissingPandasLikeExpanding, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.expanding(1), name) # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.a.expanding(1), name) # Series
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Expanding.*{}.*is deprecated".format(name)
):
getattr(kdf.expanding(1), name) # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Expanding.*{}.*is deprecated".format(name)
):
getattr(kdf.a.expanding(1), name) # Series
# Rolling properties
missing_properties = inspect.getmembers(
MissingPandasLikeRolling, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.a.rolling(1), name)() # Series
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Rolling.*{}.*is deprecated".format(name)
):
getattr(kdf.rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Rolling.*{}.*is deprecated".format(name)
):
getattr(kdf.a.rolling(1), name)() # Series
def test_missing_groupby(self):
kdf = ks.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9]})
# Expanding functions
missing_functions = inspect.getmembers(
MissingPandasLikeExpandingGroupby, inspect.isfunction
)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.groupby("a").expanding(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.a.groupby(kdf.a).expanding(1), name)() # Series
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Expanding.*{}.*is deprecated".format(name)
):
getattr(kdf.groupby("a").expanding(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Expanding.*{}.*is deprecated".format(name)
):
getattr(kdf.a.groupby(kdf.a).expanding(1), name)() # Series
# Rolling functions
missing_functions = inspect.getmembers(MissingPandasLikeRollingGroupby, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.groupby("a").rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.a.groupby(kdf.a).rolling(1), name)() # Series
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Rolling.*{}.*is deprecated".format(name)
):
getattr(kdf.rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Rolling.*{}.*is deprecated".format(name)
):
getattr(kdf.a.rolling(1), name)() # Series
# Expanding properties
missing_properties = inspect.getmembers(
MissingPandasLikeExpandingGroupby, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.groupby("a").expanding(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.a.groupby(kdf.a).expanding(1), name)() # Series
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Expanding.*{}.*is deprecated".format(name)
):
getattr(kdf.expanding(1), name) # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Expanding.*{}.*is deprecated".format(name)
):
getattr(kdf.a.expanding(1), name) # Series
# Rolling properties
missing_properties = inspect.getmembers(
MissingPandasLikeRollingGroupby, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.groupby("a").rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.a.groupby(kdf.a).rolling(1), name)() # Series
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Rolling.*{}.*is deprecated".format(name)
):
getattr(kdf.rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Rolling.*{}.*is deprecated".format(name)
):
getattr(kdf.a.rolling(1), name)() # Series
|
rl_baselines/rl_algorithm/acer.py
|
anonymous-authors-2018/robotics-repo
| 524 |
91879
|
<filename>rl_baselines/rl_algorithm/acer.py
from stable_baselines import ACER
from rl_baselines.base_classes import StableBaselinesRLObject
class ACERModel(StableBaselinesRLObject):
"""
object containing the interface between baselines.acer and this code base
ACER: Sample Efficient Actor-Critic with Experience Replay
"""
LOG_INTERVAL = 1 # log RL model performance every 1 steps
SAVE_INTERVAL = 20 # Save RL model every 20 steps
def __init__(self):
super(ACERModel, self).__init__(name="acer", model_class=ACER)
def customArguments(self, parser):
super().customArguments(parser)
parser.add_argument('--num-cpu', help='Number of processes', type=int, default=1)
parser.add_argument('--lr-schedule', help='Learning rate schedule', default='constant',
choices=['linear', 'constant', 'double_linear_con', 'middle_drop', 'double_middle_drop'])
return parser
@classmethod
def getOptParam(cls):
return {
"n_steps": (int, (1, 100)),
"q_coef": (float, (0, 1)),
"ent_coef": (float, (0, 1)),
"max_grad_norm": (float, (0.1, 5)),
"learning_rate": (float, (0, 0.1)),
"rprop_epsilon": (float, (0, 0.01)),
"rprop_alpha": (float, (0.5, 1)),
"gamma": (float, (0.5, 1)),
"alpha": (float, (0.5, 1)),
"replay_ratio": (int, (0, 10)),
"correction_term": (float, (1, 10)),
"delta": (float, (0.1, 10)),
"lr_schedule": ((list, str),
['linear', 'constant', 'double_linear_con', 'middle_drop', 'double_middle_drop'])
}
def train(self, args, callback, env_kwargs=None, train_kwargs=None):
if train_kwargs is None:
train_kwargs = {}
param_kwargs = {
"verbose": 1,
"n_steps": 20,
"q_coef": 0.5,
"ent_coef": 0.01,
"max_grad_norm": 10,
"learning_rate": 7e-4,
"rprop_epsilon": 1e-5,
"rprop_alpha": 0.99,
"gamma": 0.99,
"buffer_size": 5000,
"replay_ratio": 4,
"replay_start": 1000,
"correction_term": 10.0,
"trust_region": True,
"alpha": 0.99,
"delta": 1,
"lr_schedule": args.lr_schedule
}
super().train(args, callback, env_kwargs, {**param_kwargs, **train_kwargs})
|
lona/shell/shell.py
|
korantu/lona
| 230 |
91917
|
<gh_stars>100-1000
from rlpython import embed
def load_commands(server):
import_strings = server.settings.CORE_COMMANDS + server.settings.COMMANDS
commands = []
for import_string in import_strings:
commands.append(server.acquire(import_string))
return commands
def embed_shell(server, **embed_kwargs):
embed_kwargs['commands'] = load_commands(server)
embed(**embed_kwargs)
def generate_shell_server(server, **embed_kwargs):
embed_kwargs['commands'] = load_commands(server)
embed_kwargs['multi_session'] = True
return embed(**embed_kwargs)
|
tests/basic/for_range2.py
|
MoonStarCZW/py2rb
| 124 |
91932
|
for i in range(5):
for j in range(i+1, 5):
print("i:%s, j:%s" % (i,j))
|
Machine Learning/Online Search Relevance Metrics/metrics/resources.py
|
endorama/examples
| 2,561 |
91983
|
<gh_stars>1000+
import json
import os
import time
from timeit import default_timer as timer
IGNORES = [400, 404]
INDEX = 'ecs-search-metrics'
TRANSFORM_NAMES = [f'{INDEX}_transform_queryid', f'{INDEX}_transform_completion']
INDEX_NAMES = [INDEX] + TRANSFORM_NAMES
PIPELINE_NAMES = INDEX_NAMES
class Timer:
def __enter__(self):
self.start = timer()
return self
def __exit__(self, *args):
self.end = timer()
self.interval = self.end - self.start
def list_filenames(directory):
"""Lists all files in a directory without traversal."""
listings = [os.path.join(directory, x) for x in os.listdir(directory)]
return [x for x in listings if os.path.isfile(x)]
def file_length(filename):
"""
Count the number of lines in a file.
See: https://gist.github.com/zed/0ac760859e614cd03652#file-gistfile1-py-L48-L49
"""
return sum(1 for _ in open(filename, 'r'))
def load_json(filename):
"""Loads a JSON file."""
with open(filename, 'r') as f:
return json.load(f)
def load_config(kind, name):
f = os.path.join('config', kind, f'{name}.json')
return load_json(f)
def delete_index(es, name):
"""Deletes an index, if it exists."""
print(f"Deleting index: {name}")
es.indices.delete(name, ignore=IGNORES)
def recreate_index(es, name):
"""Creates a new index, deleting any existing index."""
delete_index(es, name)
print(f"Creating new index: {name}")
es.indices.create(name, body=load_config('indices', name))
def recreate_indices(es, names):
for name in names:
recreate_index(es, name)
def delete_transform(es, name):
print(f"Deleting transform: {name}")
es.transform.delete_transform(name, ignore=IGNORES)
def create_transform(es, name):
print(f"Creating new transform: {name}")
with open(os.path.join('config', 'transforms', f'{name}.json'), 'r') as f:
transform = json.load(f)
es.transform.put_transform(name, transform)
def recreate_transform(es, name):
"""Creates transform, deleting any existing transform."""
delete_transform(es, name)
create_transform(es, name)
def recreate_transforms(es, names):
for name in names:
recreate_transform(es, name)
def get_transform_state(es, name):
response = es.transform.get_transform_stats(name)
assert response['count'] == 1
return response['transforms'][0]['state']
def start_transform(es, name):
print(f"Starting batch transform: {name}")
es.transform.start_transform(name)
time.sleep(1)
state = get_transform_state(es, name)
if state != 'stopped':
print(f"Waiting for batch transform: {name} ", end='')
while state != 'stopped':
print(".", end='')
time.sleep(5)
state = get_transform_state(es, name)
print()
es.indices.refresh(name)
def start_transforms(es, names):
for name in names:
start_transform(es, name)
def create_pipeline(es, name):
print(f"Creating pipeline: {name}")
es.ingest.put_pipeline(name, body=load_config('pipelines', name))
def recreate_pipelines(es, names):
for name in names:
create_pipeline(es, name)
def prepare(es, index_names=INDEX_NAMES, pipeline_names=PIPELINE_NAMES, transform_names=TRANSFORM_NAMES):
"""Prepares resources: indices, pipelines, transforms"""
recreate_indices(es, index_names)
recreate_pipelines(es, pipeline_names)
recreate_transforms(es, transform_names)
for x in transform_names:
delete_index(es, f'{x}_failed')
|
examples/cs_maml/cs_maml_edge.py
|
cuiboyuan/plato
| 135 |
92003
|
"""
A federated learning client at the edge server in a cross-silo training workload.
"""
from dataclasses import dataclass
import logging
import os
import pickle
import sys
from plato.clients import edge
@dataclass
class Report(edge.Report):
"""Report from an Axiothea edge server, to be sent to the central server."""
class Client(edge.Client):
"""A federated learning client at the edge server in a cross-silo training workload."""
def __init__(self, server, algorithm=None, trainer=None):
super().__init__(server, algorithm=algorithm, trainer=trainer)
self.do_personalization_test = False
def process_server_response(self, server_response):
"""Additional client-specific processing on the server response."""
if 'personalization_test' in server_response:
self.do_personalization_test = True
else:
super().process_server_response(server_response)
async def test_personalization(self):
"""Test personalization by passing the global meta model to its clients,
and let them train their personlized models and test accuracy."""
logging.info(
"[Edge Server #%d] Passing the global meta model to its clients.",
self.client_id)
# Edge server select clients to conduct personalization test
await self.server.select_testing_clients()
# Wait for clients conducting personalization test
await self.server.per_accuracy_aggregated.wait()
self.server.per_accuracy_aggregated.clear()
report = self.server.personalization_accuracy
payload = 'personalization_accuracy'
return report, payload
async def payload_done(self, client_id, object_key) -> None:
"""Upon receiving all the new payload from the server."""
payload_size = 0
if object_key is None:
if isinstance(self.server_payload, list):
for _data in self.server_payload:
payload_size += sys.getsizeof(pickle.dumps(_data))
elif isinstance(self.server_payload, dict):
for key, value in self.server_payload.items():
payload_size += sys.getsizeof(pickle.dumps({key: value}))
else:
payload_size = sys.getsizeof(pickle.dumps(self.server_payload))
else:
self.server_payload = self.s3_client.receive_from_s3(object_key)
payload_size = sys.getsizeof(pickle.dumps(self.server_payload))
assert client_id == self.client_id
logging.info(
"[Client #%d] Received %s MB of payload data from the server.",
client_id, round(payload_size / 1024**2, 2))
self.load_payload(self.server_payload)
self.server_payload = None
if self.do_personalization_test:
report, payload = await self.test_personalization()
self.do_personalization_test = False
else:
report, payload = await self.train()
logging.info(
"[Server #%d] Model aggregated on edge server (client #%d).",
os.getpid(), client_id)
# Sending the client report as metadata to the server (payload to follow)
await self.sio.emit('client_report', {'report': pickle.dumps(report)})
# Sending the client training payload to the server
await self.send(payload)
|
algorithm/sliding_window_examples.py
|
ganeshskudva/Algorithm_Templates
| 190 |
92011
|
from collections import Counter
from collections import defaultdict
# [3] https://leetcode.com/problems/longest-substring-without-repeating-characters/
# Given a string, find the length of the longest substring without repeating characters.
#
# variation with no pattern
def lengthOfLongestSubstring(s):
# create a default dict to maintain state
counter = defaultdict(int)
count, start, end, res = 0, 0, 0, 0
while end < len(s):
counter[s[end]] += 1
if counter[s[end]] > 1:
count += 1
end += 1
while count > 0:
counter[s[start]] -= 1
if counter[s[start]] > 0:
count -= 1
start += 1
res = max(res, end - start)
return res
# [76] https://leetcode.com/problems/minimum-window-substring/
# Given a string S and a string T, find the minimum window in S which will contain all the characters in T
#
# variation with finding minimum
def minWindow(s: str, t: str) -> str:
counter = Counter(t)
count, start, end, res = len(t), 0, 0, [float('inf'), 0]
while end < len(s):
counter[s[end]] -= 1
# consider duplicate char in t
if counter[s[end]] >= 0:
count -= 1
end += 1
# valid in while
while count == 0:
# update minimum here, inner while loop
if end - start < res[0]:
res = (end - start, start)
counter[s[start]] += 1
if counter[s[start]] > 0:
count += 1
start += 1
return s[res[1]:res[0] + res[1]] if res[0] != float('inf') else ''
# [904] https://leetcode.com/problems/fruit-into-baskets/
# You have two baskets, and each basket can carry any quantity of fruit, but you want each basket to only carry one type of fruit each.
# What is the total amount of fruit you can collect with this procedure?
#
# variation with list
def totalFruit(tree: 'List[int]') -> int:
counter = defaultdict(int)
count, start, end, res = 0, 0, 0, 0
while end < len(tree):
counter[tree[end]] += 1
if counter[tree[end]] == 1:
count += 1
end += 1
while count > 2:
counter[tree[start]] -= 1
if counter[tree[start]] == 0:
count -= 1
start += 1
res = max(res, end - start)
return res
# [438] https://leetcode.com/problems/find-all-anagrams-in-a-string/
# Given a string s and a non-empty string p, find all the start indices of p's anagrams in s.
#
# variation with restrict between start and end
def findAnagrams(s: str, p: str) -> 'List[int]':
len_p, len_s = len(p), len(s)
if len_p > len_s:
return []
counter = Counter(p)
count, start, end, res = len_p, 0, 0, []
while end < len_s:
# only update counter when match char in p
counter[s[end]] -= 1
if counter[s[end]] >= 0:
count -= 1
end += 1
if count == 0:
res.append(start)
# not use a while, because restrict the length
if end - start == len_p:
counter[s[start]] += 1
# exclude char not in p, because always negative
if counter[s[start]] > 0:
count += 1
start += 1
return res
# [30] https://leetcode.com/problems/substring-with-concatenation-of-all-words/
# Find all starting indices of substring(s) in s that is a concatenation of each word in words exactly once and without any intervening characters.
#
# variation with complex match policy
def findSubstring(s: str, words: 'List[str]') -> 'List[int]':
if not words:
return []
word_len, res = len(words[0]), []
# start offset from 0 to word_len, and step is word_len
for i in range(word_len):
# reset state every epoch
counter = Counter(words)
start, end, count = i, i, len(words)
while end < len(s):
cur_word = s[end:end + word_len]
# check is not necessary here, just for performance
if cur_word in counter:
counter[cur_word] -= 1
if counter[cur_word] >= 0:
count -= 1
end += word_len
if count == 0:
res.append(start)
# ensure consecutive words
if end - start == word_len * len(words):
cur_word = s[start:start + word_len]
if cur_word in counter:
counter[cur_word] += 1
if counter[cur_word] > 0:
count += 1
start += word_len
# the order is not necessary here
return res
|
examples/eph/00-simple_eph.py
|
QuESt-Calculator/pyscf
| 501 |
92030
|
<reponame>QuESt-Calculator/pyscf
#!/usr/bin/env python
'''
A simple example to run EPH calculation.
'''
from pyscf import gto, dft, eph
mol = gto.M(atom='N 0 0 0; N 0 0 2.100825', basis='def2-svp', verbose=4, unit="bohr")
# this is a pre-computed relaxed molecule
# for geometry relaxation, refer to pyscf/example/geomopt
mf = dft.RKS(mol, xc='pbe,pbe')
mf.run()
grad = mf.nuc_grad_method().kernel()
assert (abs(grad).sum()<1e-5) # making sure the geometry is relaxed
myeph = eph.EPH(mf)
mat, omega = myeph.kernel()
print(mat.shape, omega)
|
lldb/packages/Python/lldbsuite/test/lang/cpp/class-template-parameter-pack/TestClassTemplateParameterPack.py
|
medismailben/llvm-project
| 765 |
92036
|
<filename>lldb/packages/Python/lldbsuite/test/lang/cpp/class-template-parameter-pack/TestClassTemplateParameterPack.py
from lldbsuite.test import lldbinline
from lldbsuite.test import decorators
lldbinline.MakeInlineTest(
__file__, globals(), [
decorators.expectedFailureAll(
compiler="gcc")])
|
tests/utils_tests/test_glob.py
|
MikeAmy/django
| 5,079 |
92046
|
<reponame>MikeAmy/django
from __future__ import unicode_literals
from django.test import SimpleTestCase
from django.utils.glob import glob_escape
class TestUtilsGlob(SimpleTestCase):
def test_glob_escape(self):
filename = '/my/file?/name[with special chars*'
expected = '/my/file[?]/name[[]with special chars[*]'
filename_b = b'/my/file?/name[with special chars*'
expected_b = b'/my/file[?]/name[[]with special chars[*]'
self.assertEqual(glob_escape(filename), expected)
self.assertEqual(glob_escape(filename_b), expected_b)
|
match.py
|
pammirato/image_vatic
| 571 |
92050
|
<reponame>pammirato/image_vatic<filename>match.py
import munkres
def match(first, second, method):
"""
Attempts to match every path in 'first' with a path in 'second'. Returns
the association along with its score.
Note: if two paths have nothing to do with each other, but there is no
other suitable candidate, the two seeminly unrelated paths will be
associated. It is up to the caller to handle this situation. The 'validate'
method may provide some help. Further, if len(first) != len(second), then
some elements will be associated with None.
"""
if len(first) == len(second) == 0:
return []
costs = buildmatrix(first, second, method)
response = []
for f, s in munkres.Munkres().compute(costs):
response.append((first[f] if f < len(first) else None,
second[s] if s < len(second) else None,
costs[f][s]))
return response
def buildmatrix(first, second, method):
"""
Builds the matrix for the Hungarian algorithm. Pads with the worst to make
the matrix square.
"""
costs = [[method(f,s) for s in second] for f in first]
if len(first) and len(second):
horrible = [max(max(costs)) + 1]
else:
horrible = [1e10]
if len(first) > len(second):
for row in costs:
row.extend(horrible * (len(first) - len(second)))
elif len(first) < len(second):
costs.extend([horrible * len(second)] * (len(second) - len(first)))
return costs
|
tools/optimize/convert_model_batch.py
|
zhouzy-creator/Tengine
| 4,697 |
92067
|
# -*- coding: utf-8 -*-
# OPEN AI LAB is pleased to support the open source community by supporting Tengine available.
#
# Copyright (C) 2021 OPEN AI LAB. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
"""
This tool is used for converting models in batches.
Usage:
file struct as belows
Tengine
├── build
├──convert_model_batch.py
├──onnx_node
├──tools/convert_tool/convert_tool
$ python convert_model_batch.py -f "./tools/convert_tool/convert_tool" -m "onnx_node" -s -sp "./convert_result"
Author:
<EMAIL>
"""
import argparse
import os
import subprocess
support_onnx_op_list = ["Abs", "Acos", "And", "ArgMax", "ArgMin", "Asin", "Atan", "AveragePool",
"Add", "BatchNormalization", "Conv", "ConvTranspose", "Concat", "Clip",
"Ceil", "Cos", "Cast", "Dropout", "DepthToSpace", "Div", "Elu", "Exp",
"Expand", "Equal", "Flatten", "Floor", "Gemm", "Gather", "Greater", "GlobalAveragePool",
"HardSwish", "HardSigmoid", "InstanceNormalization", "Log", "LRN", "Less", "LSTM",
"LeakyRelu", "LogSoftmax", "Mul", "Max", "Min", "Mean", "MatMul", "MaxPool",
"Neg", "Or", "Pad", "Pow", "PRelu", "Relu", "Resize", "Reshape", "ReduceL2",
"ReduceMean", "ReduceLogSumExp", "ReduceLogSum", "ReduceMax", "ReduceMin",
"ReduceProd", "ReduceSumSquare", "ReduceSum", "Reciprocal", "Sub",
"Selu", "Sqrt", "Slice", "Split", "Shape", "Squeeze", "Scatter", "Sigmoid",
"Softmax", "Softplus", "Tanh", "Tile", "Transpose", "Upsample", "Unsqueeze",
"Where"]
support_onnx_op_list = [x.lower() for x in support_onnx_op_list]
def usage_info():
"""
usage info
"""
print("Input params is illegal...╮(╯3╰)╭")
def parse_args():
parser = argparse.ArgumentParser(description='convert tools in batch')
parser.add_argument('-f', help='convert tool path', type=str)
parser.add_argument('-m', help='model folder path', type=str)
parser.add_argument('-c', help='convert type', default='onnx', type=str)
parser.add_argument('-s', help='save convert result', action='store_true')
parser.add_argument('-sp', help='save result path', default='./convert_results', type=str)
args = parser.parse_args()
return args
def convert_model_onnx(convert_tool_path, onnx_model_path):
"""
convert single model
:param convert_tool_path:
:param onnx_model_path:
:return:
"""
folder_dir, file_name = os.path.split(onnx_model_path)
shell_commad = './' if './' not in convert_tool_path else ''
shell_commad += f"{convert_tool_path} -f onnx -m {onnx_model_path} -o {folder_dir}/onnx.tmfile"
# print(shell_commad)
(status, output) = subprocess.getstatusoutput(shell_commad)
if status != 0:
if os.path.exists(f"{folder_dir}/onnx.tmfile"):
shell_commad = f"rm {folder_dir}/onnx.tmfile"
os.system(shell_commad)
return False, output
else:
return True, output
def main():
"""
main function
"""
print("---- batch convert tools ----\n")
args = parse_args()
if args.m == None or args.f == None:
usage_info()
return None
print("convert tool path: ", args.f)
print("model folder path: ", args.m)
print("convert type : ", args.c)
print("save result : ", args.s)
print("save folder : ", args.sp)
shell_log_dir = args.sp
if args.s:
if os.path.exists(shell_log_dir) is not True:
os.mkdir(shell_log_dir)
with open(f"{shell_log_dir}/convert_batch_models.txt", 'w') as f:
f.write(f"{'Model Path':<80} Convert Result\n")
if args.c.lower() == 'onnx':
folder_lists = os.listdir(args.m)
folder_lists.sort()
for sub_folder in folder_lists:
sub_folder_path = f"{args.m}/{sub_folder}"
try:
op_type = sub_folder.split('_')[1].lower()
except:
continue
if os.path.isdir(sub_folder_path) and op_type in support_onnx_op_list:
for item in os.listdir(sub_folder_path):
if '.onnx' in item:
result, log = convert_model_onnx(args.f, f"{sub_folder_path}/{item}")
print(f"{sub_folder_path:<80} {result}")
if args.s:
with open(f"{shell_log_dir}/convert_batch_models.txt", 'a') as f:
f.write(f"{sub_folder_path:<80} {result}\n")
with open(f"{shell_log_dir}/{sub_folder}.txt", 'w') as f:
f.write(log)
if __name__ == '__main__':
main()
|
vbdiar/utils/utils.py
|
VarunSrivastava19/VBDiarization
| 101 |
92069
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 Brno University of Technology FIT
# Author: <NAME> <<EMAIL>>
# All Rights Reserved
import os
import re
import random
from os import listdir
from os.path import isfile, join
import fnmatch
import math
import numpy as np
import yaml
class Utils(object):
""" Class tools handles basic operations with files and directories.
"""
def __init__(self):
""" tools class constructor.
"""
return
@staticmethod
def list_directory_by_suffix(directory, suffix):
""" Return listed directory of files based on their suffix.
:param directory: directory to be listed
:type directory: str
:param suffix: suffix of files in directory
:type suffix: str
:returns: list of files specified byt suffix in directory
:rtype: list
>>> Utils.list_directory_by_suffix('../../tests/tools', '.test')
['empty1.test', 'empty2.test']
>>> Utils.list_directory_by_suffix('../../tests/tools_no_ex', '.test')
Traceback (most recent call last):
...
toolsException: [listDirectoryBySuffix] No directory found!
>>> Utils.list_directory_by_suffix('../../tests/tools', '.py')
[]
"""
abs_dir = os.path.abspath(directory)
try:
ofiles = [f for f in listdir(abs_dir) if isfile(join(abs_dir, f))]
except OSError:
raise ValueError('No directory named {} found!'.format(directory))
out = []
for file_in in ofiles:
if file_in.find(suffix) != -1:
out.append(file_in)
out.sort()
return out
@staticmethod
def list_directory(directory):
""" List directory.
:param directory: directory to be listed
:type directory: str
:returns: list with files in directory
:rtype: list
>>> Utils.list_directory('../../tests/tools')
['empty1.test', 'empty2.test', 'test', 'test.txt']
>>> Utils.list_directory('../../tests/tools_no_ex')
Traceback (most recent call last):
...
toolsException: [listDirectory] No directory found!
"""
directory = os.path.abspath(directory)
try:
out = [f for f in listdir(directory)]
except OSError:
raise ValueError('No directory found!')
out.sort()
return out
@staticmethod
def recursively_list_directory_by_suffix(directory, suffix):
""" Return recursively listed directory of files based on their suffix.
:param directory: directory to be listed
:type directory: str
:param suffix: suffix of files in directory
:type suffix: str
:returns: list of files specified by suffix in directory
:rtype: list
>>> Utils.recursively_list_directory_by_suffix( \
'../../tests/tools', '.test')
['empty1.test', 'empty2.test', 'test/empty.test']
>>> Utils.recursively_list_directory_by_suffix( \
'../../tests/tools_no_ex', '.test')
[]
"""
matches = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, '*' + suffix):
app = os.path.join(root, filename).replace(directory + '/', '')
matches.append(app)
matches.sort()
return matches
@staticmethod
def sed_in_file(input_file, regex1, regex2):
""" Replace in input file by regex.
:param input_file: input file
:type input_file: str
:param regex1: regular expression 1
:type regex1: str
:param regex2: regular expression 2
:type regex2: str
"""
with open(input_file, 'r') as sources:
lines = sources.readlines()
with open(input_file, 'w') as sources:
for line in lines:
sources.write(re.sub(regex1, regex2, line))
@staticmethod
def remove_lines_in_file_by_indexes(input_file, lines_indexes):
""" Remove specified lines in file.
:param input_file: input file name
:type input_file: str
:param lines_indexes: list with lines
:type lines_indexes: list
"""
with open(input_file, 'r') as sources:
lines = sources.readlines()
with open(input_file, 'w') as sources:
for i in range(len(lines)):
if i not in lines_indexes:
sources.write(lines[i])
@staticmethod
def get_method(instance, method):
""" Get method pointer.
:param instance: input object
:type instance: object
:param method: name of method
:type method: str
:returns: pointer to method
:rtype: method
"""
try:
attr = getattr(instance, method)
except AttributeError:
raise ValueError('Unknown class method!')
return attr
@staticmethod
def configure_instance(instance, input_list):
""" Configures instance base on methods list.
:param instance: reference to class instance
:type instance: object
:param input_list: input list with name of class members
:type input_list: list
:returns: configured instance
:rtype: object
"""
for line in input_list:
variable = line[:line.rfind('=')]
value = line[line.rfind('=') + 1:]
method_callback = Utils.get_method(instance, 'Set' + variable)
method_callback(value)
return instance
@staticmethod
def sort(scores, col=None):
""" Sort scores list where score is in n-th-1 column.
:param scores: scores list to be sorted
:type scores: list
:param col: index of column
:type col: int
:returns: sorted scores list
:rtype: list
>>> Utils.sort([['f1', 'f2', 10.0], \
['f3', 'f4', -10.0], \
['f5', 'f6', 9.58]], col=2)
[['f3', 'f4', -10.0], ['f5', 'f6', 9.58], ['f1', 'f2', 10.0]]
>>> Utils.sort([4.59, 8.8, 6.9, -10001.478])
[-10001.478, 4.59, 6.9, 8.8]
"""
if col is None:
return sorted(scores, key=float)
else:
return sorted(scores, key=lambda x: x[col])
@staticmethod
def reverse_sort(scores, col=None):
""" Reversively sort scores list where score is in n-th column.
:param scores: scores list to be sorted
:type scores: list
:param col: number of columns
:type col: int
:returns: reversively sorted scores list
:rtype: list
>>> Utils.reverse_sort([['f1', 'f2', 10.0], \
['f3', 'f4', -10.0], \
['f5', 'f6', 9.58]], col=2)
[['f1', 'f2', 10.0], ['f5', 'f6', 9.58], ['f3', 'f4', -10.0]]
>>> Utils.reverse_sort([4.59, 8.8, 6.9, -10001.478])
[8.8, 6.9, 4.59, -10001.478]
"""
if col is None:
return sorted(scores, key=float, reverse=True)
else:
return sorted(scores, key=lambda x: x[col], reverse=True)
@staticmethod
def get_nth_col(in_list, col):
""" Extract n-th-1 columns from list.
:param in_list: input list
:type in_list: list
:param col: column
:type col: int
:returns: list only with one column
:rtype: list
>>> Utils.get_nth_col([['1', '2'], ['3', '4'], ['5', '6']], col=1)
['2', '4', '6']
>>> Utils.get_nth_col([['1', '2'], ['3', '4'], ['5', '6']], col=42)
Traceback (most recent call last):
...
toolsException: [getNthCol] Column out of range!
"""
try:
out = [row[col] for row in in_list]
except IndexError:
raise ValueError('Column out of range!')
return out
@staticmethod
def find_in_dictionary(in_dict, value):
""" Find value in directory whose items are lists and return key.
:param in_dict: dictionary to search in
:type in_dict: dict
:param value: value to find
:type value: any
:returns: dictionary key
:rtype: any
>>> Utils.find_in_dictionary({ 0 : [42], 1 : [88], 2 : [69]}, 69)
2
>>> Utils.find_in_dictionary(dict(), 69)
Traceback (most recent call last):
...
toolsException: [findInDictionary] Value not found!
"""
for key in in_dict:
if value in in_dict[key]:
return key
raise ValueError('Value not found!')
@staticmethod
def get_scores(scores, key):
""" Get scores from scores list by key.
:param scores: input scores list
:type scores: list
:param key: key to find
:type key: list
:returns: score if key is present in score, None otherwise
:rtype: float
>>> Utils.get_scores([['f1', 'f2', 10.1], ['f3', 'f4', 20.1], \
['f5', 'f6', 30.1]], ['f6', 'f5'])
30.1
"""
if len(key) != 2:
raise ValueError('Unexpected key!')
if len(scores[0]) != 3:
raise ValueError('Invalid input list!')
for score in scores:
a = score[0]
b = score[1]
if (key[0] == a and key[1] == b) or (key[0] == b and key[1] == a):
return score[2]
return None
@staticmethod
def get_line_from_file(line_num, infile):
""" Get specified line from file.
:param line_num: number of line
:type line_num: int
:param infile: file name
:type infile: str
:returns: specified line, None otherwise
:rtype: str
>>> Utils.get_line_from_file(3, '../../tests/tools/test.txt')
'c\\n'
>>> Utils.get_line_from_file(10, '../../tests/tools/test.txt')
Traceback (most recent call last):
...
toolsException: [getLineFromFile] Line number not found!
"""
with open(infile) as fp:
for i, line in enumerate(fp):
if i == line_num - 1:
return line
raise ValueError('Line number {} not found in file.'.format(line_num, infile))
@staticmethod
def list2dict(input_list):
""" Create dictionary from list in format [key1, key2, score].
:param input_list: list to process
:type input_list: list
:returns: preprocessed dictionary
:rtype: dict
>>> Utils.list2dict([['f1', 'f2', 10.1], ['f3', 'f4', 20.1], \
['f5', 'f6', 30.1], ['f1', 'f3', 40.1]])
{'f1 f2': 10.1, 'f5 f6': 30.1, 'f3 f4': 20.1, 'f1 f3': 40.1}
>>> Utils.list2dict([['f1', 'f2', 10.1], ['f3', 'f4']])
Traceback (most recent call last):
...
toolsException: [list2Dict] Invalid format of input list!
"""
dictionary = dict()
for item in input_list:
if len(item) != 3:
raise ValueError('Invalid format of input list!')
tmp_list = [item[0], item[1]]
tmp_list.sort()
dictionary[tmp_list[0] + ' ' + tmp_list[1]] = item[2]
return dictionary
@staticmethod
def merge_dicts(*dict_args):
""" Merge dictionaries into single one.
:param dict_args: input dictionaries
:type dict_args: dict array
:returns: merged dictionaries into single one
:rtype: dict
>>> Utils.merge_dicts( \
{'f1 f2': 10.1, 'f5 f6': 30.1, 'f1 f3': 40.1}, {'f6 f2': 50.1})
{'f1 f2': 10.1, 'f5 f6': 30.1, 'f6 f2': 50.1, 'f1 f3': 40.1}
"""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
@staticmethod
def save_object(obj, path):
""" Saves object to disk.
:param obj: reference to object
:type obj: any
:param path: path to file
:type path: str
"""
np.save(path, obj)
@staticmethod
def load_object(path):
""" Loads object from disk.
:param path: path to file
:type path: str
"""
np.load(path)
@staticmethod
def common_prefix(m):
""" Given a list of pathnames, returns the longest prefix."
:param m: input list
:type m: list
:returns: longest prefix in list
:rtype: str
"""
if not m:
return ''
s1 = min(m)
s2 = max(m)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
@staticmethod
def root_name(d):
""" Return a root directory by name.
:param d: directory name
:type d: str
:returns: root directory name
:rtype d: str
"""
pass
@staticmethod
def read_config(config_path):
""" Read config in yaml format.
Args:
config_path (str): path to config file
Returns:
"""
with open(config_path, 'r') as ymlfile:
return yaml.load(ymlfile)
@staticmethod
def l2_norm(ivecs):
""" Perform L2 normalization.
Args:
ivecs (np.array): input i-vector
Returns:
np.array: normalized i-vectors
"""
ret_ivecs = ivecs.copy()
ret_ivecs /= np.sqrt((ret_ivecs ** 2).sum(axis=1)[:, np.newaxis])
return ret_ivecs
@staticmethod
def cos_sim(v1, v2):
"""
Args:
v1 (np.array): first vector
v2 (np.array): second vector
Returns:
"""
sumxx, sumxy, sumyy = 0, 0, 0
for i in range(len(v1)):
x = v1[i]
y = v2[i]
sumxx += x * x
sumyy += y * y
sumxy += x * y
return sumxy / math.sqrt(sumxx * sumyy)
@staticmethod
def partition(large_list, n_sublists, shuffle=False):
"""Partition a list ``l`` into ``n`` sublists."""
return np.array_split(large_list, n_sublists)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
funsor/compat/ops.py
|
fritzo/funsor
| 198 |
92076
|
<reponame>fritzo/funsor<gh_stars>100-1000
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from torch import ones, randn, tensor, zeros # noqa F401
from funsor.testing import allclose # noqa F401
from .ops import * # noqa F401
|
quaternion/quat_test.py
|
IhorNehrutsa/micropython-samples
| 268 |
92090
|
# quat_test.py Test for quat.py
# Released under the MIT License (MIT). See LICENSE.
# Copyright (c) 2020 <NAME>
from math import sin, cos, isclose, pi, sqrt
from quat import *
print('Properties')
q1 = Quaternion(1, 2, 3, 4)
q1.w = 5
q1.x = 6
q1.y = 7
q1.z = 8
assert q1 == Quaternion(5, 6, 7, 8)
assert (q1.w, q1.x, q1.y, q1.z) == (5, 6, 7, 8)
# Numpy demo at https://quaternion.readthedocs.io/en/latest/README.html
print('Hamilton product')
q1 = Quaternion(1, 2, 3, 4)
q2 = Quaternion(5, 6, 7, 8)
q3 = Quaternion(-60, 12, 30, 24)
assert q3 == q1 * q2
print('Iterator protocol')
assert Quaternion(*q1) == q1
assert list(q1[1:]) == [2,3,4]
foo = iter(q1)
assert next(foo) == 1
print('Assign from tuple')
q1[1:] = (9, 10, 11)
assert list(q1[1:]) == [9, 10, 11]
q1[:] = (8, 9, 10, 99)
assert list(q1[:]) == [8, 9, 10, 99]
print('Assign from scalar')
q1[0] = 88
assert list(q1[:]) == [88, 9, 10, 99]
print('Negation')
q1 = Quaternion(1, 2, 3, 4)
q2 = Quaternion(-1, -2, -3, -4)
assert -q1 == q2
print('Comparison operators and unary +')
assert (q1 is +q1) == False
assert q1 == +q1
assert (q1 is q1.copy()) == False
assert q1 == q1.copy()
assert q1 >= q1.copy()
assert q1 <= q1.copy()
assert (q1 < q1.copy()) == False
assert (q1 > q1.copy()) == False
q2 = Quaternion(1, 2.1, 3, 4)
assert q2 > q1
assert q1 < q2
assert q2 >= q1
assert q1 <= q2
assert (q1 == q2) == False
assert q1 != q2
print('Scalar add')
q2 = Quaternion(5, 2, 3, 4)
assert q2 == q1 + 4
print('Scalar subtract')
q2 = Quaternion(-3, 2, 3, 4)
assert q2 == q1 - 4
print('Scalar multiply')
q2 = Quaternion(2, 4, 6, 8)
assert q2 == q1 * 2
print('Scalar divide')
q2 = Quaternion(0.5, 1, 1.5, 2)
assert q2 == q1/2
print('Conjugate')
assert q1.conjugate() == Quaternion(1, -2, -3, -4)
print('Inverse')
assert q1.inverse() * q1 == Quaternion(1, 0, 0, 0)
print('Multiply by tuple')
assert q1*(2,3,4) == Quaternion(0, 4, 9, 16)
assert q1*(4,5,6,7) == Quaternion(4, 10, 18, 28)
print('Add tuple')
assert q1 + (2,3,4) == Quaternion(0, 4, 6, 8)
assert q1 + (4,5,6,7) == Quaternion(5, 7, 9, 11)
print('abs(), len(), str()')
assert abs(Quaternion(2,2,2,2)) == 4
assert len(q1) == 4
assert str(q1) == 'w = 1.00 x = 2.00 y = 3.00 z = 4.00'
print('Rotation')
p = Vector(0, 1, 0)
r = Rotator(pi/4, 0, 0, 1)
rv = p @ r # Anticlockwise about z axis
assert isclose(rv.w, 0, abs_tol=mdelta)
assert isclose(rv.x, -sin(pi/4), rel_tol=mdelta)
assert isclose(rv.y, sin(pi/4), rel_tol=mdelta)
assert isclose(rv.z, 0, abs_tol=mdelta)
p = Vector(1, 0, 0)
r = Rotator(-pi/4, 0, 0, 1)
rv = p @ r # Clockwise about z axis
assert isclose(rv.w, 0, abs_tol=mdelta)
assert isclose(rv.x, sin(pi/4), rel_tol=mdelta)
assert isclose(rv.y, -sin(pi/4), rel_tol=mdelta)
assert isclose(rv.z, 0, abs_tol=mdelta)
p = Vector(0, 1, 0)
r = Rotator(-pi/4, 1, 0, 0)
rv = p @ r # Clockwise about x axis
assert isclose(rv.w, 0, abs_tol=mdelta)
assert isclose(rv.x, 0, abs_tol=mdelta)
assert isclose(rv.y, sin(pi/4), rel_tol=mdelta)
assert isclose(rv.z, -sin(pi/4), rel_tol=mdelta)
print('Rotation using Euler angles')
# Tait-Brian angles DIN9300: I thought z axis is down towards ground.
# However https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles
# and this implementation implies z is towards sky.
# https://github.com/moble/quaternion/wiki/Euler-angles-are-horrible
# Test heading
# Yaw/Heading: a +ve value is counter clockwise
p = Vector(1, 0, 0) # x is direction of motion
r = Euler(pi/4, 0, 0) # Heading 45°.
rv = p @ r
assert isclose(rv.w, 0, abs_tol=mdelta)
assert isclose(rv.x, sin(pi/4), rel_tol=mdelta)
assert isclose(rv.y, sin(pi/4), rel_tol=mdelta)
assert isclose(rv.z, 0, abs_tol=mdelta)
# Test pitch
# A +ve value is aircraft nose down i.e. z +ve
p = Vector(1, 0, 0) # x is direction of motion
r = Euler(0, pi/4, 0) # Pitch 45°.
rv = p @ r
assert isclose(rv.w, 0, abs_tol=mdelta)
assert isclose(rv.x, sin(pi/4), rel_tol=mdelta)
assert isclose(rv.y, 0, abs_tol=mdelta)
assert isclose(rv.z, -sin(pi/4), rel_tol=mdelta) # Implies z is towards sky
# Test roll
# A +ve value is y +ve
p = Vector(0, 1, 0) # x is direction of motion. Vector is aircraft wing
r = Euler(0, 0, pi/4) # Roll 45°.
rv = p @ r
assert isclose(rv.w, 0, abs_tol=mdelta)
assert isclose(rv.x, 0, abs_tol=mdelta)
assert isclose(rv.y, sin(pi/4), rel_tol=mdelta)
assert isclose(rv.z, sin(pi/4), rel_tol=mdelta) # Implies z is towards sky
print('euler() test')
r = Euler(pi/4, 0, 0)
assert isclose(euler(r)[0], pi/4, rel_tol=mdelta)
r = Euler(0, pi/4, 0)
assert isclose(euler(r)[1], pi/4, rel_tol=mdelta)
r = Euler(0, 0, pi/4)
assert isclose(euler(r)[2], pi/4, rel_tol=mdelta)
print('isrot() and isvec()')
assert Quaternion(0, 1, 2, 3).isvec()
assert not Quaternion(0, 1, 2, 3).isrot()
assert not Quaternion(1, 2, 3, 4).isvec()
q = Rotator(1, 1, 1, 1)
assert q.isrot()
print('to_angle_axis()')
t = Rotator(1, 1, 1, 1).to_angle_axis()
assert isclose(t[0], 1, rel_tol=mdelta)
for v in t[1:]:
assert isclose(v, sqrt(1/3), rel_tol=mdelta)
s = '''
*** Standard tests PASSED. ***
The following test of reflected arithmetic operators will fail unless the
firmware was compiled with MICROPY_PY_REVERSE_SPECIAL_METHODS.
Runs on the Unix build.'''
print(s)
q1 = Quaternion(1, 2, 3, 4)
assert 10 + Quaternion(1, 2, 3, 4) == Quaternion(11, 2, 3, 4)
assert 1/q1 == q1.inverse()
assert 2 * q1 == q1 + q1
assert 1 - q1 == -q1 + 1
s = '''
Reverse/reflected operators OK.
*** All tests PASSED. ***
'''
print(s)
|
tools/converters/torchvision_resnet_to_zcls_resnet.py
|
YinAoXiong/ZCls
| 110 |
92113
|
# -*- coding: utf-8 -*-
"""
@date: 2021/5/4 下午7:11
@file: torchvision_resnet_to_zcls_resnet.py
@author: zj
@description: Transform torchvision pretrained model into zcls format
"""
import os
from torchvision.models.resnet import resnet18, resnet34, resnet50, resnet101, resnet152, resnext50_32x4d, \
resnext101_32x8d
from zcls.model.recognizers.resnet.resnet import ResNet
from zcls.config import cfg
from zcls.util.checkpoint import CheckPointer
def convert(torchvision_resnet, zcls_resnet):
torchvision_resnet_dict = torchvision_resnet.state_dict()
zcls_resnet_dict = zcls_resnet.state_dict()
for k, v in torchvision_resnet_dict.items():
if 'downsample' in k:
zcls_resnet_dict[f"backbone.{k.replace('downsample', 'down_sample')}"] = v
elif 'layer' in k:
zcls_resnet_dict[f'backbone.{k}'] = v
elif 'fc' in k:
zcls_resnet_dict[f'head.{k}'] = v
elif 'conv1.weight' == k:
zcls_resnet_dict['backbone.stem.0.weight'] = v
elif 'bn1' in k:
zcls_resnet_dict[k.replace('bn1', 'backbone.stem.1')] = v
else:
raise ValueError("{k} doesn't exist")
return zcls_resnet_dict
def process(item, cfg_file):
if item == 'resnet18':
torchvision_resnet = resnet18(pretrained=True)
elif item == 'resnet34':
torchvision_resnet = resnet34(pretrained=True)
elif item == 'resnet50':
torchvision_resnet = resnet50(pretrained=True)
elif item == 'resnet101':
torchvision_resnet = resnet101(pretrained=True)
elif item == 'resnet152':
torchvision_resnet = resnet152(pretrained=True)
elif item == 'resnext50_32x4d':
torchvision_resnet = resnext50_32x4d(pretrained=True)
elif item == 'resnext101_32x8d':
torchvision_resnet = resnext101_32x8d(pretrained=True)
else:
raise ValueError(f"{item} doesn't exists")
cfg.merge_from_file(cfg_file)
zcls_resnet = ResNet(cfg)
zcls_resnet_dict = convert(torchvision_resnet, zcls_resnet)
zcls_resnet.load_state_dict(zcls_resnet_dict)
res_dir = 'outputs/converters/'
if not os.path.exists(res_dir):
os.makedirs(res_dir)
checkpoint = CheckPointer(model=zcls_resnet, save_dir=res_dir, save_to_disk=True)
checkpoint.save(f'{item}_imagenet')
if __name__ == '__main__':
item_list = ['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d']
cfg_file_list = [
'r18_zcls_imagenet_224.yaml',
'r34_zcls_imagenet_224.yaml',
'r50_zcls_imagenet_224.yaml',
'r101_zcls_imagenet_224.yaml',
'r152_zcls_imagenet_224.yaml',
'rxt50_32x4d_zcls_imagenet_224.yaml',
'rxt101_32x8d_zcls_imagenet_224.yaml'
]
prefix_path = 'configs/benchmarks/resnet'
for item, cfg_file in zip(item_list, cfg_file_list):
config_path = os.path.join(prefix_path, cfg_file)
print(config_path)
process(item, config_path)
|
src/pretix/base/exporters/json.py
|
fabm3n/pretix
| 1,248 |
92126
|
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 <NAME> and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
# This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of
# the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>.
#
# This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A
# full history of changes and contributors is available at <https://github.com/pretix/pretix>.
#
# This file contains Apache-licensed contributions copyrighted by: <NAME>
#
# Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under the License.
import json
from decimal import Decimal
from django.core.serializers.json import DjangoJSONEncoder
from django.dispatch import receiver
from ..exporter import BaseExporter
from ..signals import register_data_exporters
class JSONExporter(BaseExporter):
identifier = 'json'
verbose_name = 'Order data (JSON)'
def render(self, form_data):
jo = {
'event': {
'name': str(self.event.name),
'slug': self.event.slug,
'organizer': {
'name': str(self.event.organizer.name),
'slug': self.event.organizer.slug
},
'categories': [
{
'id': category.id,
'name': str(category.name),
'internal_name': category.internal_name
} for category in self.event.categories.all()
],
'items': [
{
'id': item.id,
'name': str(item.name),
'internal_name': str(item.internal_name),
'category': item.category_id,
'price': item.default_price,
'tax_rate': item.tax_rule.rate if item.tax_rule else Decimal('0.00'),
'tax_name': str(item.tax_rule.name) if item.tax_rule else None,
'admission': item.admission,
'active': item.active,
'variations': [
{
'id': variation.id,
'active': variation.active,
'price': variation.default_price if variation.default_price is not None else
item.default_price,
'name': str(variation)
} for variation in item.variations.all()
]
} for item in self.event.items.select_related('tax_rule').prefetch_related('variations')
],
'questions': [
{
'id': question.id,
'question': str(question.question),
'type': question.type
} for question in self.event.questions.all()
],
'orders': [
{
'code': order.code,
'status': order.status,
'user': order.email,
'datetime': order.datetime,
'fees': [
{
'type': fee.fee_type,
'description': fee.description,
'value': fee.value,
} for fee in order.fees.all()
],
'total': order.total,
'positions': [
{
'id': position.id,
'item': position.item_id,
'variation': position.variation_id,
'price': position.price,
'attendee_name': position.attendee_name,
'attendee_email': position.attendee_email,
'secret': position.secret,
'addon_to': position.addon_to_id,
'answers': [
{
'question': answer.question_id,
'answer': answer.answer
} for answer in position.answers.all()
]
} for position in order.positions.all()
]
} for order in
self.event.orders.all().prefetch_related('positions', 'positions__answers', 'fees')
],
'quotas': [
{
'id': quota.id,
'size': quota.size,
'items': [item.id for item in quota.items.all()],
'variations': [variation.id for variation in quota.variations.all()],
} for quota in self.event.quotas.all().prefetch_related('items', 'variations')
]
}
}
return '{}_pretixdata.json'.format(self.event.slug), 'application/json', json.dumps(jo, cls=DjangoJSONEncoder)
@receiver(register_data_exporters, dispatch_uid="exporter_json")
def register_json_export(sender, **kwargs):
return JSONExporter
|
cctbx/regression/tst_pair_asu_table.py
|
dperl-sol/cctbx_project
| 155 |
92135
|
from __future__ import absolute_import, division, print_function
from iotbx.kriber import strudat
from cctbx import geometry_restraints
from cctbx import crystal
from cctbx.array_family import flex
import scitbx.math
from scitbx import matrix
from libtbx.test_utils import approx_equal, show_diff
from libtbx.utils import format_cpu_times
import libtbx.load_env
from libtbx import dict_with_default_0
from six.moves import cStringIO as StringIO
import math
import sys, os
from six.moves import range
from six.moves import zip
def exercise_icosahedron(max_level=2, verbose=0):
for level in range(0,max_level+1):
if (0 or verbose):
print("level:", level)
icosahedron = scitbx.math.icosahedron(level=level)
try:
distance_cutoff = icosahedron.next_neighbors_distance()*(1+1.e-3)
estimated_distance_cutoff = False
except RuntimeError as e:
assert str(e) == "next_neighbors_distance not known."
distance_cutoff = 0.4/(2**(level-1))
estimated_distance_cutoff = True
asu_mappings = crystal.direct_space_asu.non_crystallographic_asu_mappings(
sites_cart=icosahedron.sites)
pair_asu_table = crystal.pair_asu_table(asu_mappings=asu_mappings)
pair_asu_table.add_all_pairs(distance_cutoff=distance_cutoff)
if (0 or verbose):
ps = pair_asu_table.show_distances(sites_cart=icosahedron.sites) \
.distances_info
print("level", level, "min", flex.min(ps.distances))
print(" ", " ", "max", flex.max(ps.distances))
assert ps.pair_counts.all_eq(pair_asu_table.pair_counts())
if (level == 0):
for d in ps.distances:
assert approx_equal(d, 1.0514622242382672)
elif (level < 2):
s = StringIO()
ps = pair_asu_table.show_distances(sites_cart=icosahedron.sites, out=s) \
.distances_info
assert ps.pair_counts.all_eq(pair_asu_table.pair_counts())
assert len(s.getvalue().splitlines()) == [72,320][level]
del s
if (level == 0):
assert pair_asu_table.pair_counts().all_eq(5)
else:
assert pair_asu_table.pair_counts().all_eq(3)
del pair_asu_table
max_distance = crystal.neighbors_fast_pair_generator(
asu_mappings=asu_mappings,
distance_cutoff=distance_cutoff).max_distance_sq()**.5
if (0 or verbose):
print("max_distance:", max_distance)
if (not estimated_distance_cutoff):
assert approx_equal(max_distance, icosahedron.next_neighbors_distance())
assert approx_equal(max_distance/icosahedron.next_neighbors_distance(),1)
def is_sym_equiv_interaction_simple(unit_cell,
i_seq,
site_frac_i,
j_seq,
site_frac_j,
special_op_j,
rt_mx_ji_1,
rt_mx_ji_2):
f = unit_cell.shortest_vector_sq()**.5*.1
trial_shifts = [f*x for x in [math.sqrt(2),math.sqrt(3),math.sqrt(5)]]
frac = unit_cell.fractionalize
orth = unit_cell.orthogonalize
dist = unit_cell.distance
for shifts in [[0,0,0], trial_shifts]:
site_j_mod = special_op_j * frac([x+s
for x,s in zip(orth(site_frac_j),shifts)])
if (shifts == [0,0,0] or j_seq != i_seq):
site_i_mod = site_frac_i
else:
site_i_mod = site_j_mod
d1 = dist(rt_mx_ji_1 * site_j_mod, site_i_mod)
d2 = dist(rt_mx_ji_2 * site_j_mod, site_i_mod)
if (shifts == [0,0,0]):
if (abs(d1-d2) >= 1.e-3):
return False
return abs(d1-d2) < 1.e-3
def check_sym_equiv(structure, bond_asu_table, weak=False):
unit_cell = structure.unit_cell()
asu_mappings = bond_asu_table.asu_mappings()
sites_frac = structure.scatterers().extract_sites()
for i_seq,records in enumerate(bond_asu_table.table()):
rt_mx_i_inv = asu_mappings.get_rt_mx(i_seq, 0).inverse()
for j_seq,j_sym_groups in records.items():
i_group_rt_mx_jis = []
for i_group,j_sym_group in enumerate(j_sym_groups):
for j_sym in j_sym_group:
rt_mx_ji = rt_mx_i_inv.multiply(asu_mappings.get_rt_mx(j_seq, j_sym))
i_group_rt_mx_jis.append((i_group,rt_mx_ji))
for gi,ri in i_group_rt_mx_jis:
for gj,rj in i_group_rt_mx_jis:
is_sym_equiv = is_sym_equiv_interaction_simple(
unit_cell=unit_cell,
i_seq=i_seq,
site_frac_i=sites_frac[i_seq],
j_seq=j_seq,
site_frac_j=sites_frac[j_seq],
special_op_j=asu_mappings.special_op(j_seq),
rt_mx_ji_1=ri,
rt_mx_ji_2=rj)
if (is_sym_equiv):
if (not weak): assert gi == gj
else:
assert gi != gj
def check_connectivities(bond_asu_table, connectivities, verbose=0):
n_mismatches = 0
for records,connectivity in zip(bond_asu_table.table(), connectivities):
n = 0
for j_seq,j_sym_groups in records.items():
for j_sym_group in j_sym_groups:
n += len(j_sym_group)
if (0 or verbose):
print("n, connectivity:", n, connectivity)
assert n == connectivity
def exercise_incremental_pairs(
structure,
distance_cutoff,
reference_pair_asu_table):
ip = structure.incremental_pairs(distance_cutoff=distance_cutoff)
for site_frac in structure.sites_frac():
ip.process_site_frac(original_site=site_frac)
assert ip.pair_asu_table().pair_counts().all_eq(
reference_pair_asu_table.pair_counts())
assert ip.pair_asu_table() == reference_pair_asu_table
def exercise_site_cluster_analysis(
structure,
distance_cutoff,
reference_pair_asu_table):
pat_selection = flex.size_t()
pat_keep = []
for i_seq,pair_asu_dict in enumerate(reference_pair_asu_table.table()):
for j_seq,pair_asu_j_sym_groups in pair_asu_dict.items():
if (j_seq == i_seq):
for j_sym_group in pair_asu_j_sym_groups:
assert 0 not in j_sym_group
pat_keep.append(False)
break
if (j_seq < i_seq and pat_keep[j_seq]):
pat_keep.append(False)
break
else:
pat_keep.append(True)
pat_selection.append(i_seq)
assert reference_pair_asu_table.cluster_pivot_selection().all_eq(
pat_selection)
assert reference_pair_asu_table.cluster_pivot_selection(
max_clusters=3).all_eq(pat_selection[:3])
#
sca = structure.site_cluster_analysis(min_distance=distance_cutoff)
sca_selection = flex.size_t()
for i_seq,site_frac in enumerate(structure.sites_frac()):
if (sca.process_site_frac(original_site=site_frac)):
sca_selection.append(i_seq)
assert sca_selection.all_eq(pat_selection)
#
sca = structure.site_cluster_analysis(min_distance=distance_cutoff)
sca_selection = sca.process_sites_frac(
original_sites=structure.sites_frac(),
site_symmetry_table=structure.site_symmetry_table())
assert sca_selection.all_eq(pat_selection)
#
sca = structure.site_cluster_analysis(min_distance=distance_cutoff)
sca_selection = sca.process_sites_frac(
original_sites=structure.sites_frac(),
site_symmetry_table=structure.site_symmetry_table(),
max_clusters=3)
assert sca_selection.all_eq(pat_selection[:3])
#
sca = structure.site_cluster_analysis(min_distance=distance_cutoff)
sca_selection = sca.process_sites_frac(
original_sites=structure.sites_frac())
assert sca_selection.all_eq(pat_selection)
#
sca = structure.site_cluster_analysis(min_distance=distance_cutoff)
sca_selection = sca.process_sites_frac(
original_sites=structure.sites_frac(),
max_clusters=3)
assert sca_selection.all_eq(pat_selection[:3])
#
sca = structure.site_cluster_analysis(min_distance=distance_cutoff)
sca_selection = sca.process_sites_cart(
original_sites=structure.sites_cart(),
site_symmetry_table=structure.site_symmetry_table())
assert sca_selection.all_eq(pat_selection)
#
sca = structure.site_cluster_analysis(min_distance=distance_cutoff)
sca_selection = sca.process_sites_cart(
original_sites=structure.sites_cart(),
site_symmetry_table=structure.site_symmetry_table(),
max_clusters=3)
assert sca_selection.all_eq(pat_selection[:3])
#
sca = structure.site_cluster_analysis(min_distance=distance_cutoff)
sca_selection = sca.process_sites_cart(
original_sites=structure.sites_cart())
assert sca_selection.all_eq(pat_selection)
#
sca = structure.site_cluster_analysis(min_distance=distance_cutoff)
sca_selection = sca.process_sites_cart(
original_sites=structure.sites_cart(),
max_clusters=3)
assert sca_selection.all_eq(pat_selection[:3])
#
sca = structure.site_cluster_analysis(
min_distance=distance_cutoff,
general_positions_only=True)
sca_selection = sca.process_sites_frac(
original_sites=structure.sites_frac(),
site_symmetry_table=structure.site_symmetry_table())
pat_selection = reference_pair_asu_table.cluster_pivot_selection(
general_positions_only=True)
assert sca_selection.all_eq(pat_selection)
def exercise(
structure,
distance_cutoff,
connectivities=None,
weak_check_sym_equiv=False,
verbose=0):
if (0 or verbose):
print("distance_cutoff:", distance_cutoff)
asu_mappings = structure.asu_mappings(buffer_thickness=distance_cutoff)
for i_pass in range(2):
if (i_pass == 0):
bond_asu_table = crystal.pair_asu_table(asu_mappings=asu_mappings)
bond_asu_table.add_all_pairs(
distance_cutoff=distance_cutoff)
exercise_incremental_pairs(
structure=structure,
distance_cutoff=distance_cutoff,
reference_pair_asu_table=bond_asu_table)
exercise_site_cluster_analysis(
structure=structure,
distance_cutoff=distance_cutoff,
reference_pair_asu_table=bond_asu_table)
else:
bond_sym_table = bond_asu_table.extract_pair_sym_table()
bond_asu_table = crystal.pair_asu_table(asu_mappings=asu_mappings)
bond_asu_table.add_pair_sym_table(
sym_table=bond_sym_table)
def exercise_symmetry_equivalent_pair_interactions():
asu_mappings = bond_asu_table.asu_mappings()
for i_seq, j_seq_dict in enumerate(bond_asu_table.table()):
rt_mx_i = asu_mappings.get_rt_mx(i_seq, 0)
rt_mx_i_inv = rt_mx_i.inverse()
for j_seq,j_sym_group in j_seq_dict.items():
scs = structure.scatterers()
def get_coords(symops):
result = []
for s in symops:
result.append(numstr(s * scs[j_seq].site))
result.sort()
return result
prev_equiv_rt_mx_ji = None
for j_syms in j_sym_group:
equiv_rt_mx_ji = []
for j_sym in j_syms:
rt_mx_ji = rt_mx_i_inv.multiply(
asu_mappings.get_rt_mx(j_seq, j_sym))
equiv_rt_mx_ji.append(rt_mx_ji)
old_coords = get_coords(equiv_rt_mx_ji)
all_sepi = set()
for rt_mx_ji in equiv_rt_mx_ji:
_ = asu_mappings.site_symmetry_table()
sepi_obj = _.symmetry_equivalent_pair_interactions(
i_seq=i_seq, j_seq=j_seq, rt_mx_ji=rt_mx_ji)
sepi = sepi_obj.get()
new_coords = get_coords(sepi)
assert new_coords == old_coords
all_sepi.add(";".join([str(_) for _ in sepi]))
for _ in equiv_rt_mx_ji:
assert sepi_obj.is_equivalent(rt_mx_ji=_)
if (prev_equiv_rt_mx_ji is not None):
for _ in prev_equiv_rt_mx_ji:
assert not sepi_obj.is_equivalent(rt_mx_ji=_)
assert len(all_sepi) == 1
prev_equiv_rt_mx_ji = equiv_rt_mx_ji
exercise_symmetry_equivalent_pair_interactions()
def exercise_pair_sym_table_tidy_and_full_connectivity():
def check_one_way(pst):
for sym_pair in pst.iterator():
i_seq, j_seq = sym_pair.i_seqs()
assert i_seq <= j_seq
assert len(pst[i_seq][j_seq]) > 0
if (i_seq != j_seq):
assert i_seq not in pst[j_seq]
def check_two_way(pst):
for sym_pair in pst.iterator():
i_seq, j_seq = sym_pair.i_seqs()
assert len(pst[i_seq][j_seq]) > 0
assert len(pst[j_seq][i_seq]) > 0
pst_extracted = bond_sym_table.tidy(
site_symmetry_table=structure.site_symmetry_table())
check_one_way(pst_extracted)
sio_extracted = StringIO()
structure.pair_sym_table_show(pst_extracted, out=sio_extracted)
pst = pst_extracted.tidy(
site_symmetry_table=structure.site_symmetry_table())
check_one_way(pst)
sio = StringIO()
structure.pair_sym_table_show(pst, out=sio)
assert not show_diff(sio.getvalue(), sio_extracted.getvalue())
pst = pst_extracted.full_connectivity()
check_two_way(pst)
pst_full = pst_extracted.full_connectivity(
site_symmetry_table=structure.site_symmetry_table())
check_two_way(pst_full)
sio = StringIO()
structure.pair_sym_table_show(
pst_full, is_full_connectivity=True, out=sio)
assert sio.getvalue().find("sym. equiv.") < 0
pst = pst_full.tidy(
site_symmetry_table=structure.site_symmetry_table())
check_one_way(pst)
sio = StringIO()
structure.pair_sym_table_show(pst, out=sio)
assert not show_diff(sio.getvalue(), sio_extracted.getvalue())
pst_full2 = pst_full.full_connectivity(
site_symmetry_table=structure.site_symmetry_table())
check_two_way(pst_full2)
pst = pst_full2.tidy(
site_symmetry_table=structure.site_symmetry_table())
check_one_way(pst)
sio = StringIO()
structure.pair_sym_table_show(pst, out=sio)
assert not show_diff(sio.getvalue(), sio_extracted.getvalue())
exercise_pair_sym_table_tidy_and_full_connectivity()
if (connectivities is not None):
check_connectivities(bond_asu_table, connectivities, verbose)
check_sym_equiv(
structure=structure,
bond_asu_table=bond_asu_table,
weak=weak_check_sym_equiv)
def exercise_bond_sorted_asu_proxies(
structure,
distance_cutoff):
asu_mappings = structure.asu_mappings(buffer_thickness=distance_cutoff)
bond_asu_table = crystal.pair_asu_table(asu_mappings=asu_mappings)
bond_asu_table.add_all_pairs(distance_cutoff=distance_cutoff)
bond_sym_table = bond_asu_table.extract_pair_sym_table()
el = bond_sym_table.simple_edge_list()
es = bond_sym_table.full_simple_connectivity()
assert es.size() == bond_sym_table.size()
for i,j in el:
assert j in es[i]
assert i in es[j]
npis = bond_sym_table.number_of_pairs_involving_symmetry()
assert len(list(bond_sym_table.iterator())) == len(el) + npis
bond_params_table = geometry_restraints.bond_params_table(
structure.scatterers().size())
for i_seq,bond_sym_dict in enumerate(bond_sym_table):
for j_seq in bond_sym_dict.keys():
if (i_seq > j_seq):
j_seq,i_seq = i_seq,j_seq
bond_params_table[i_seq][j_seq] = geometry_restraints.bond_params(
distance_ideal=3.1, weight=1)
proxies_fast = geometry_restraints.bond_sorted_asu_proxies(
bond_params_table=bond_params_table,
bond_asu_table=bond_asu_table)
proxies_conservative = geometry_restraints.bond_sorted_asu_proxies(
pair_asu_table=bond_asu_table)
pair_generator = crystal.neighbors_simple_pair_generator(
asu_mappings=asu_mappings,
distance_cutoff=distance_cutoff,
minimal=False)
proxies_slow = geometry_restraints.bond_sorted_asu_proxies(
asu_mappings=asu_mappings)
for pair in pair_generator:
proxies_slow.process(geometry_restraints.bond_asu_proxy(
pair=pair,
distance_ideal=3.1,
weight=1))
def compare_proxies(proxies_1, proxies_2):
assert proxies_1.simple.size() == proxies_2.simple.size()
assert proxies_1.asu.size() == proxies_2.asu.size()
ctrl = {}
for proxy in proxies_1.simple:
assert proxy.i_seqs not in ctrl
ctrl[proxy.i_seqs] = 0
for proxy in proxies_2.simple:
assert proxy.i_seqs in ctrl
ctrl[proxy.i_seqs] += 1
assert list(ctrl.values()) == [1]*len(ctrl)
ctrl = {}
for proxy in proxies_1.asu:
key = proxy.i_seq,proxy.j_seq,proxy.j_sym
assert key not in ctrl
ctrl[key] = 0
for proxy in proxies_2.asu:
key = proxy.i_seq,proxy.j_seq,proxy.j_sym
assert key in ctrl
ctrl[key] += 1
assert list(ctrl.values()) == [1]*len(ctrl)
compare_proxies(proxies_1=proxies_fast, proxies_2=proxies_conservative)
compare_proxies(proxies_1=proxies_fast, proxies_2=proxies_slow)
sites_cart = structure.sites_cart()
for proxy in proxies_conservative.simple:
i,j = proxy.i_seqs
assert approx_equal(
abs(matrix.col(sites_cart[i]) - matrix.col(sites_cart[j])),
proxy.distance_ideal)
assert proxy.weight == 1
distance = proxies_conservative.asu_mappings().unit_cell().distance
get_rt_mx_ji = proxies_conservative.asu_mappings().get_rt_mx_ji
sites_frac = structure.sites_frac()
for proxy in proxies_conservative.asu:
assert approx_equal(
distance(
sites_frac[proxy.i_seq],
get_rt_mx_ji(pair=proxy) * sites_frac[proxy.j_seq]),
proxy.distance_ideal)
assert proxy.weight == 1
def py_pair_asu_table_angle_pair_asu_table(self):
asu_mappings = self.asu_mappings()
result = crystal.pair_asu_table(asu_mappings=asu_mappings)
for i_seq,asu_dict in enumerate(self.table()):
pair_list = []
for j_seq,j_sym_groups in asu_dict.items():
for i_group,j_sym_group in enumerate(j_sym_groups):
for j_sym in j_sym_group:
pair_list.append((j_seq,j_sym))
for i_jj1 in range(0,len(pair_list)-1):
jj1 = pair_list[i_jj1]
rt_mx_jj1_inv = asu_mappings.get_rt_mx(*jj1).inverse()
for i_jj2 in range(i_jj1+1,len(pair_list)):
jj2 = pair_list[i_jj2]
result.add_pair(
i_seq=jj1[0],
j_seq=jj2[0],
rt_mx_ji=rt_mx_jj1_inv.multiply(asu_mappings.get_rt_mx(*jj2)))
return result
def exercise_angle_pair_asu_table(
structure,
distance_cutoff,
connectivities,
reference_apatanl,
reference_cppc):
sg_asu_mappings = structure.asu_mappings(
buffer_thickness=2*distance_cutoff)
sg_pat = crystal.pair_asu_table(asu_mappings=sg_asu_mappings)
sg_pat.add_all_pairs(
distance_cutoff=distance_cutoff,
min_cubicle_edge=0)
# compare connectivities with reference
assert list(sg_pat.pair_counts()) == connectivities
#
p1_structure = structure.expand_to_p1()
p1_asu_mappings = p1_structure.asu_mappings(
buffer_thickness=2*distance_cutoff)
p1_pat = crystal.pair_asu_table(asu_mappings=p1_asu_mappings)
p1_pat.add_all_pairs(
distance_cutoff=distance_cutoff,
min_cubicle_edge=0)
sg_labels = structure.scatterers().extract_labels()
p1_labels = p1_structure.scatterers().extract_labels()
label_connect = dict(zip(sg_labels, sg_pat.pair_counts()))
for l,c in zip(p1_labels, p1_pat.pair_counts()):
# compare connectivities in original space group and in P1
assert label_connect[l] == c
#
sg_apat_py = py_pair_asu_table_angle_pair_asu_table(self=sg_pat)
sg_apat = sg_pat.angle_pair_asu_table()
assert sg_apat.as_nested_lists() == sg_apat_py.as_nested_lists()
sg_counts = {}
for i_seq,pair_asu_dict in enumerate(sg_apat.table()):
lbl_i = sg_labels[i_seq]
for j_seq,pair_asu_j_sym_groups in pair_asu_dict.items():
lbl_j = sg_labels[j_seq]
for j_sym_group in pair_asu_j_sym_groups:
sg_counts.setdefault(lbl_i, dict_with_default_0())[
lbl_j] += len(j_sym_group)
p1_apat = p1_pat.angle_pair_asu_table()
p1_counts = {}
for i_seq,pair_asu_dict in enumerate(p1_apat.table()):
lbl_i = p1_labels[i_seq]
for j_seq,pair_asu_j_sym_groups in pair_asu_dict.items():
lbl_j = p1_labels[j_seq]
for j_sym_group in pair_asu_j_sym_groups:
p1_counts.setdefault(lbl_i, dict_with_default_0())[
lbl_j] += len(j_sym_group)
# self-consistency check
multiplicities = {}
for sc in structure.scatterers():
multiplicities[sc.label] = sc.multiplicity()
assert sorted(p1_counts.keys()) == sorted(sg_counts.keys())
for lbl_i,sg_lc in sg_counts.items():
p1_lc = p1_counts[lbl_i]
assert sorted(p1_lc.keys()) == sorted(sg_lc.keys())
for lbl_j,sg_c in sg_lc.items():
p1_c = p1_lc[lbl_j]
assert p1_c == sg_c * multiplicities[lbl_i]
# compare with reference
apatanl = str(sg_apat.as_nested_lists()).replace(" ","")
if (reference_apatanl is not None):
assert apatanl == reference_apatanl
#
counts = []
for conserve_angles in [False, True]:
proxies = structure.conservative_pair_proxies(
bond_sym_table=sg_pat.extract_pair_sym_table(),
conserve_angles=conserve_angles)
counts.extend([proxies.bond.simple.size(), proxies.bond.asu.size()])
if (not conserve_angles):
assert proxies.angle is None
else:
counts.extend([proxies.angle.simple.size(), proxies.angle.asu.size()])
cppc = ",".join([str(c) for c in counts])
if (reference_cppc is not None):
assert cppc == reference_cppc
def exercise_all():
verbose = "--verbose" in sys.argv[1:]
exercise_icosahedron(verbose=verbose)
default_distance_cutoff = 3.5
regression_misc = libtbx.env.find_in_repositories("phenix_regression/misc")
if (regression_misc is None):
print("Skipping exercise_all(): phenix_regression/misc not available")
return
def get_reference_dict(file_name):
path = os.path.join(regression_misc, file_name)
if (not os.path.isfile(path)):
print("Skipping some tests: reference file not available:", path)
return None
result = {}
with open(path) as f:
lines = f.read().splitlines()
for line in lines:
tag, data = line.split()
assert not tag in result
result[tag] = data
return result
reference_apatanl_dict = get_reference_dict(
"angle_pair_asu_tables_as_nested_lists")
reference_cppc_dict = get_reference_dict(
"conservative_pair_proxies_counts")
file_names = []
for file_name in ["strudat_zeolite_atlas", "strudat_special_bonds"]:
path = os.path.join(regression_misc, file_name)
if (not os.path.isfile(path)):
print("Skipping %s test: input file not available" % file_name)
else:
file_names.append(path)
for file_name in file_names:
with open(file_name) as f:
strudat_entries = strudat.read_all_entries(f)
for i_entry,entry in enumerate(strudat_entries.entries):
if ( file_name.endswith("strudat_zeolite_atlas")
and not ("--full" in sys.argv[1:] or i_entry % 20 == 0)):
continue
if (0 or verbose):
print("strudat tag:", entry.tag)
structure = entry.as_xray_structure()
if (0 or verbose):
structure.show_summary().show_scatterers()
if (entry.title.startswith("cutoff")):
distance_cutoff = float(entry.title.split()[1])
else:
distance_cutoff = default_distance_cutoff
weak_check_sym_equiv = (
entry.reference.find("weak_check_sym_equiv") >= 0)
connectivities = entry.connectivities(all_or_nothing=True)
if (1):
exercise(
structure=structure,
distance_cutoff=distance_cutoff,
connectivities=connectivities,
weak_check_sym_equiv=weak_check_sym_equiv,
verbose=verbose)
if (0 or verbose):
print()
if (file_name.endswith("strudat_zeolite_atlas")):
exercise_bond_sorted_asu_proxies(
structure=structure,
distance_cutoff=distance_cutoff)
if (reference_apatanl_dict is None):
reference_apatanl = None
else:
assert entry.tag in reference_apatanl_dict
reference_apatanl = reference_apatanl_dict[entry.tag]
if (reference_cppc_dict is None):
reference_cppc = None
else:
assert entry.tag in reference_cppc_dict
reference_cppc = reference_cppc_dict[entry.tag]
exercise_angle_pair_asu_table(
structure=structure,
distance_cutoff=distance_cutoff,
connectivities=connectivities,
reference_apatanl=reference_apatanl,
reference_cppc=reference_cppc)
def run():
exercise_all()
print(format_cpu_times())
if (__name__ == "__main__"):
run()
|
platypush/backend/http/app/routes/plugins/camera/pi.py
|
BlackLight/platypush
| 228 |
92137
|
<filename>platypush/backend/http/app/routes/plugins/camera/pi.py
import os
import tempfile
from flask import Response, Blueprint, send_from_directory
from platypush.backend.http.app import template_folder
from platypush.backend.http.app.utils import authenticate, send_request
from platypush.config import Config
from platypush.plugins.camera.pi import CameraPiPlugin
camera_pi = Blueprint('camera-pi', __name__, template_folder=template_folder)
# Declare routes list
__routes__ = [
camera_pi,
]
def video_feed():
camera_conf = Config.get('camera.pi') or {}
camera = CameraPiPlugin(**camera_conf)
with camera:
while True:
output = camera.get_stream()
with output.ready:
output.ready.wait()
frame = output.frame
if frame and len(frame):
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@camera_pi.route('/camera/pi/frame', methods=['GET'])
@authenticate()
def get_frame_img():
filename = os.path.join(tempfile.gettempdir(), 'camera_pi.jpg')
response = send_request('camera.pi.take_picture', image_file=filename)
frame_file = (response.output or {}).get('image_file')
assert frame_file is not None
return send_from_directory(os.path.dirname(frame_file),
os.path.basename(frame_file))
@camera_pi.route('/camera/pi/stream', methods=['GET'])
@authenticate()
def get_stream_feed():
return Response(video_feed(),
mimetype='multipart/x-mixed-replace; boundary=frame')
# vim:sw=4:ts=4:et:
|
Python3/721.py
|
rakhi2001/ecom7
| 854 |
92140
|
__________________________________________________________________________________________________
sample 196 ms submission
class Solution:
def accountsMerge(self, accounts: List[List[str]]) -> List[List[str]]:
email_to_name = {}
graph = collections.defaultdict(set)
for acc in accounts:
# each account
name = acc[0] # e.g. John
for email in acc[1:]:
# each email for an account, e.g. <EMAIL>, <EMAIL>
graph[acc[1]].add(email) # draw an edge from the first email to itself and all other emails
graph[email].add(acc[1]) # draw an edge from email to first account
email_to_name[email] = name # map from emails to names using the same data structure
# DFS
seen = set()
merged = []
for email in graph:
# each email
if email not in seen:
seen.add(email)
stack = [email]
component = []
while stack:
node = stack.pop()
component.append(node)
for nei in graph[node]:
if nei not in seen:
seen.add(nei)
stack.append(nei)
merged.append([email_to_name[email]] + sorted(component))
return merged
__________________________________________________________________________________________________
sample 17712 kb submission
class Solution:
def accountsMerge(self, accounts: List[List[str]]) -> List[List[str]]:
visited = [False] * len(accounts)
email_acct_map = collections.defaultdict(list)
for idx, acct in enumerate(accounts):
emails = acct[1:]
for email in emails:
email_acct_map[email].append(idx)
stack = []
rslts = []
for idx, acct in enumerate(accounts):
if visited[idx]:
continue
stack.append(idx)
emails = set()
name = None
while stack:
acct_idx_next = stack.pop()
visited[acct_idx_next] = True
acct_next = accounts[acct_idx_next]
name = acct_next[0]
for email in acct_next[1:]:
emails.add(email)
nbrs = email_acct_map[email]
for nbr in nbrs:
if not visited[nbr]:
stack.append(nbr)
rslts.append([name] + sorted(list(emails)))
return rslts
__________________________________________________________________________________________________
|
sdk/python/pulumi_azure/core/subscription.py
|
henriktao/pulumi-azure
| 109 |
92163
|
<reponame>henriktao/pulumi-azure
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['SubscriptionArgs', 'Subscription']
@pulumi.input_type
class SubscriptionArgs:
def __init__(__self__, *,
subscription_name: pulumi.Input[str],
alias: Optional[pulumi.Input[str]] = None,
billing_scope_id: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workload: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Subscription resource.
:param pulumi.Input[str] subscription_name: The Name of the Subscription. This is the Display Name in the portal.
:param pulumi.Input[str] alias: The Alias name for the subscription. This provider will generate a new GUID if this is not supplied. Changing this forces a new Subscription to be created.
:param pulumi.Input[str] billing_scope_id: The Azure Billing Scope ID. Can be a Microsoft Customer Account Billing Scope ID, a Microsoft Partner Account Billing Scope ID or an Enrollment Billing Scope ID.
:param pulumi.Input[str] subscription_id: The ID of the Subscription. Changing this forces a new Subscription to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the Subscription.
:param pulumi.Input[str] workload: The workload type of the Subscription. Possible values are `Production` (default) and `DevTest`. Changing this forces a new Subscription to be created.
"""
pulumi.set(__self__, "subscription_name", subscription_name)
if alias is not None:
pulumi.set(__self__, "alias", alias)
if billing_scope_id is not None:
pulumi.set(__self__, "billing_scope_id", billing_scope_id)
if subscription_id is not None:
pulumi.set(__self__, "subscription_id", subscription_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if workload is not None:
pulumi.set(__self__, "workload", workload)
@property
@pulumi.getter(name="subscriptionName")
def subscription_name(self) -> pulumi.Input[str]:
"""
The Name of the Subscription. This is the Display Name in the portal.
"""
return pulumi.get(self, "subscription_name")
@subscription_name.setter
def subscription_name(self, value: pulumi.Input[str]):
pulumi.set(self, "subscription_name", value)
@property
@pulumi.getter
def alias(self) -> Optional[pulumi.Input[str]]:
"""
The Alias name for the subscription. This provider will generate a new GUID if this is not supplied. Changing this forces a new Subscription to be created.
"""
return pulumi.get(self, "alias")
@alias.setter
def alias(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "alias", value)
@property
@pulumi.getter(name="billingScopeId")
def billing_scope_id(self) -> Optional[pulumi.Input[str]]:
"""
The Azure Billing Scope ID. Can be a Microsoft Customer Account Billing Scope ID, a Microsoft Partner Account Billing Scope ID or an Enrollment Billing Scope ID.
"""
return pulumi.get(self, "billing_scope_id")
@billing_scope_id.setter
def billing_scope_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "billing_scope_id", value)
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subscription. Changing this forces a new Subscription to be created.
"""
return pulumi.get(self, "subscription_id")
@subscription_id.setter
def subscription_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subscription_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the Subscription.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def workload(self) -> Optional[pulumi.Input[str]]:
"""
The workload type of the Subscription. Possible values are `Production` (default) and `DevTest`. Changing this forces a new Subscription to be created.
"""
return pulumi.get(self, "workload")
@workload.setter
def workload(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workload", value)
@pulumi.input_type
class _SubscriptionState:
def __init__(__self__, *,
alias: Optional[pulumi.Input[str]] = None,
billing_scope_id: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
subscription_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
workload: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Subscription resources.
:param pulumi.Input[str] alias: The Alias name for the subscription. This provider will generate a new GUID if this is not supplied. Changing this forces a new Subscription to be created.
:param pulumi.Input[str] billing_scope_id: The Azure Billing Scope ID. Can be a Microsoft Customer Account Billing Scope ID, a Microsoft Partner Account Billing Scope ID or an Enrollment Billing Scope ID.
:param pulumi.Input[str] subscription_id: The ID of the Subscription. Changing this forces a new Subscription to be created.
:param pulumi.Input[str] subscription_name: The Name of the Subscription. This is the Display Name in the portal.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the Subscription.
:param pulumi.Input[str] tenant_id: The ID of the Tenant to which the subscription belongs.
:param pulumi.Input[str] workload: The workload type of the Subscription. Possible values are `Production` (default) and `DevTest`. Changing this forces a new Subscription to be created.
"""
if alias is not None:
pulumi.set(__self__, "alias", alias)
if billing_scope_id is not None:
pulumi.set(__self__, "billing_scope_id", billing_scope_id)
if subscription_id is not None:
pulumi.set(__self__, "subscription_id", subscription_id)
if subscription_name is not None:
pulumi.set(__self__, "subscription_name", subscription_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
if workload is not None:
pulumi.set(__self__, "workload", workload)
@property
@pulumi.getter
def alias(self) -> Optional[pulumi.Input[str]]:
"""
The Alias name for the subscription. This provider will generate a new GUID if this is not supplied. Changing this forces a new Subscription to be created.
"""
return pulumi.get(self, "alias")
@alias.setter
def alias(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "alias", value)
@property
@pulumi.getter(name="billingScopeId")
def billing_scope_id(self) -> Optional[pulumi.Input[str]]:
"""
The Azure Billing Scope ID. Can be a Microsoft Customer Account Billing Scope ID, a Microsoft Partner Account Billing Scope ID or an Enrollment Billing Scope ID.
"""
return pulumi.get(self, "billing_scope_id")
@billing_scope_id.setter
def billing_scope_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "billing_scope_id", value)
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subscription. Changing this forces a new Subscription to be created.
"""
return pulumi.get(self, "subscription_id")
@subscription_id.setter
def subscription_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subscription_id", value)
@property
@pulumi.getter(name="subscriptionName")
def subscription_name(self) -> Optional[pulumi.Input[str]]:
"""
The Name of the Subscription. This is the Display Name in the portal.
"""
return pulumi.get(self, "subscription_name")
@subscription_name.setter
def subscription_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subscription_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the Subscription.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Tenant to which the subscription belongs.
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
@property
@pulumi.getter
def workload(self) -> Optional[pulumi.Input[str]]:
"""
The workload type of the Subscription. Possible values are `Production` (default) and `DevTest`. Changing this forces a new Subscription to be created.
"""
return pulumi.get(self, "workload")
@workload.setter
def workload(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workload", value)
class Subscription(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
alias: Optional[pulumi.Input[str]] = None,
billing_scope_id: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
subscription_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workload: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages an Alias for a Subscription - which adds an Alias to an existing Subscription, allowing it to be managed in the provider - or create a new Subscription with a new Alias.
> **NOTE:** Destroying a Subscription controlled by this resource will place the Subscription into a cancelled state. It is possible to re-activate a subscription within 90-days of cancellation, after which time the Subscription is irrevocably deleted, and the Subscription ID cannot be re-used. For further information see [here](https://docs.microsoft.com/en-us/azure/cost-management-billing/manage/cancel-azure-subscription#what-happens-after-subscription-cancellation). Users can optionally delete a Subscription once 72 hours have passed, however, this functionality is not suitable for this provider. A `Deleted` subscription cannot be reactivated.
> **NOTE:** It is not possible to destroy (cancel) a subscription if it contains resources. If resources are present that are not managed by the provider then these will need to be removed before the Subscription can be destroyed.
> **NOTE:** Azure supports Multiple Aliases per Subscription, however, to reliably manage this resource in this provider only a single Alias is supported.
## Example Usage
### Creating A New Alias And Subscription For An Enrollment Account
```python
import pulumi
import pulumi_azure as azure
example_enrollment_account_scope = azure.billing.get_enrollment_account_scope(billing_account_name="1234567890",
enrollment_account_name="0123456")
example_subscription = azure.core.Subscription("exampleSubscription",
subscription_name="My Example EA Subscription",
billing_scope_id=example_enrollment_account_scope.id)
```
### Creating A New Alias And Subscription For A Microsoft Customer Account
```python
import pulumi
import pulumi_azure as azure
example_mca_account_scope = azure.billing.get_mca_account_scope(billing_account_name="e879cf0f-2b4d-5431-109a-f72fc9868693:024cabf4-7321-4cf9-be59-df0c77ca51de_2019-05-31",
billing_profile_name="PE2Q-NOIT-BG7-TGB",
invoice_section_name="MTT4-OBS7-PJA-TGB")
example_subscription = azure.core.Subscription("exampleSubscription",
subscription_name="My Example MCA Subscription",
billing_scope_id=example_mca_account_scope.id)
```
### Creating A New Alias And Subscription For A Microsoft Partner Account
```python
import pulumi
import pulumi_azure as azure
example_mpa_account_scope = azure.billing.get_mpa_account_scope(billing_account_name="e879cf0f-2b4d-5431-109a-f72fc9868693:024cabf4-7321-4cf9-be59-df0c77ca51de_2019-05-31",
customer_name="2281f543-7321-4cf9-1e23-edb4Oc31a31c")
example_subscription = azure.core.Subscription("exampleSubscription",
subscription_name="My Example MPA Subscription",
billing_scope_id=example_mpa_account_scope.id)
```
### Adding An Alias To An Existing Subscription
```python
import pulumi
import pulumi_azure as azure
example = azure.core.Subscription("example",
alias="examplesub",
subscription_id="12345678-12234-5678-9012-123456789012",
subscription_name="My Example Subscription")
```
## Import
Subscriptions can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:core/subscription:Subscription example "/providers/Microsoft.Subscription/aliases/subscription1"
```
In this scenario, the `subscription_id` property can be completed and the provider will assume control of the existing subscription by creating an Alias. See the `adding an Alias to an existing Subscription` above. This provider requires an alias to correctly manage Subscription resources due to Azure Subscription API design.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] alias: The Alias name for the subscription. This provider will generate a new GUID if this is not supplied. Changing this forces a new Subscription to be created.
:param pulumi.Input[str] billing_scope_id: The Azure Billing Scope ID. Can be a Microsoft Customer Account Billing Scope ID, a Microsoft Partner Account Billing Scope ID or an Enrollment Billing Scope ID.
:param pulumi.Input[str] subscription_id: The ID of the Subscription. Changing this forces a new Subscription to be created.
:param pulumi.Input[str] subscription_name: The Name of the Subscription. This is the Display Name in the portal.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the Subscription.
:param pulumi.Input[str] workload: The workload type of the Subscription. Possible values are `Production` (default) and `DevTest`. Changing this forces a new Subscription to be created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SubscriptionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an Alias for a Subscription - which adds an Alias to an existing Subscription, allowing it to be managed in the provider - or create a new Subscription with a new Alias.
> **NOTE:** Destroying a Subscription controlled by this resource will place the Subscription into a cancelled state. It is possible to re-activate a subscription within 90-days of cancellation, after which time the Subscription is irrevocably deleted, and the Subscription ID cannot be re-used. For further information see [here](https://docs.microsoft.com/en-us/azure/cost-management-billing/manage/cancel-azure-subscription#what-happens-after-subscription-cancellation). Users can optionally delete a Subscription once 72 hours have passed, however, this functionality is not suitable for this provider. A `Deleted` subscription cannot be reactivated.
> **NOTE:** It is not possible to destroy (cancel) a subscription if it contains resources. If resources are present that are not managed by the provider then these will need to be removed before the Subscription can be destroyed.
> **NOTE:** Azure supports Multiple Aliases per Subscription, however, to reliably manage this resource in this provider only a single Alias is supported.
## Example Usage
### Creating A New Alias And Subscription For An Enrollment Account
```python
import pulumi
import pulumi_azure as azure
example_enrollment_account_scope = azure.billing.get_enrollment_account_scope(billing_account_name="1234567890",
enrollment_account_name="0123456")
example_subscription = azure.core.Subscription("exampleSubscription",
subscription_name="My Example EA Subscription",
billing_scope_id=example_enrollment_account_scope.id)
```
### Creating A New Alias And Subscription For A Microsoft Customer Account
```python
import pulumi
import pulumi_azure as azure
example_mca_account_scope = azure.billing.get_mca_account_scope(billing_account_name="e879cf0f-2b4d-5431-109a-f72fc9868693:024cabf4-7321-4cf9-be59-df0c77ca51de_2019-05-31",
billing_profile_name="PE2Q-NOIT-BG7-TGB",
invoice_section_name="MTT4-OBS7-PJA-TGB")
example_subscription = azure.core.Subscription("exampleSubscription",
subscription_name="My Example MCA Subscription",
billing_scope_id=example_mca_account_scope.id)
```
### Creating A New Alias And Subscription For A Microsoft Partner Account
```python
import pulumi
import pulumi_azure as azure
example_mpa_account_scope = azure.billing.get_mpa_account_scope(billing_account_name="e879cf0f-2b4d-5431-109a-f72fc9868693:024cab<PASSWORD>-<PASSWORD>-<PASSWORD>-df0c77ca51de_2019-05-31",
customer_name="<PASSWORD>")
example_subscription = azure.core.Subscription("exampleSubscription",
subscription_name="My Example MPA Subscription",
billing_scope_id=example_mpa_account_scope.id)
```
### Adding An Alias To An Existing Subscription
```python
import pulumi
import pulumi_azure as azure
example = azure.core.Subscription("example",
alias="examplesub",
subscription_id="12345678-12234-5678-9012-123456789012",
subscription_name="My Example Subscription")
```
## Import
Subscriptions can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:core/subscription:Subscription example "/providers/Microsoft.Subscription/aliases/subscription1"
```
In this scenario, the `subscription_id` property can be completed and the provider will assume control of the existing subscription by creating an Alias. See the `adding an Alias to an existing Subscription` above. This provider requires an alias to correctly manage Subscription resources due to Azure Subscription API design.
:param str resource_name: The name of the resource.
:param SubscriptionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SubscriptionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
alias: Optional[pulumi.Input[str]] = None,
billing_scope_id: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
subscription_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workload: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SubscriptionArgs.__new__(SubscriptionArgs)
__props__.__dict__["alias"] = alias
__props__.__dict__["billing_scope_id"] = billing_scope_id
__props__.__dict__["subscription_id"] = subscription_id
if subscription_name is None and not opts.urn:
raise TypeError("Missing required property 'subscription_name'")
__props__.__dict__["subscription_name"] = subscription_name
__props__.__dict__["tags"] = tags
__props__.__dict__["workload"] = workload
__props__.__dict__["tenant_id"] = None
super(Subscription, __self__).__init__(
'azure:core/subscription:Subscription',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
alias: Optional[pulumi.Input[str]] = None,
billing_scope_id: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
subscription_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
workload: Optional[pulumi.Input[str]] = None) -> 'Subscription':
"""
Get an existing Subscription resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] alias: The Alias name for the subscription. This provider will generate a new GUID if this is not supplied. Changing this forces a new Subscription to be created.
:param pulumi.Input[str] billing_scope_id: The Azure Billing Scope ID. Can be a Microsoft Customer Account Billing Scope ID, a Microsoft Partner Account Billing Scope ID or an Enrollment Billing Scope ID.
:param pulumi.Input[str] subscription_id: The ID of the Subscription. Changing this forces a new Subscription to be created.
:param pulumi.Input[str] subscription_name: The Name of the Subscription. This is the Display Name in the portal.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the Subscription.
:param pulumi.Input[str] tenant_id: The ID of the Tenant to which the subscription belongs.
:param pulumi.Input[str] workload: The workload type of the Subscription. Possible values are `Production` (default) and `DevTest`. Changing this forces a new Subscription to be created.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SubscriptionState.__new__(_SubscriptionState)
__props__.__dict__["alias"] = alias
__props__.__dict__["billing_scope_id"] = billing_scope_id
__props__.__dict__["subscription_id"] = subscription_id
__props__.__dict__["subscription_name"] = subscription_name
__props__.__dict__["tags"] = tags
__props__.__dict__["tenant_id"] = tenant_id
__props__.__dict__["workload"] = workload
return Subscription(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def alias(self) -> pulumi.Output[str]:
"""
The Alias name for the subscription. This provider will generate a new GUID if this is not supplied. Changing this forces a new Subscription to be created.
"""
return pulumi.get(self, "alias")
@property
@pulumi.getter(name="billingScopeId")
def billing_scope_id(self) -> pulumi.Output[Optional[str]]:
"""
The Azure Billing Scope ID. Can be a Microsoft Customer Account Billing Scope ID, a Microsoft Partner Account Billing Scope ID or an Enrollment Billing Scope ID.
"""
return pulumi.get(self, "billing_scope_id")
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> pulumi.Output[str]:
"""
The ID of the Subscription. Changing this forces a new Subscription to be created.
"""
return pulumi.get(self, "subscription_id")
@property
@pulumi.getter(name="subscriptionName")
def subscription_name(self) -> pulumi.Output[str]:
"""
The Name of the Subscription. This is the Display Name in the portal.
"""
return pulumi.get(self, "subscription_name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the Subscription.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Output[str]:
"""
The ID of the Tenant to which the subscription belongs.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def workload(self) -> pulumi.Output[Optional[str]]:
"""
The workload type of the Subscription. Possible values are `Production` (default) and `DevTest`. Changing this forces a new Subscription to be created.
"""
return pulumi.get(self, "workload")
|
tricks/copy_variant_set_to_prim/python/copy_variant_set_to_prim.py
|
jingtaoh/USD-Cookbook
| 332 |
92170
|
<filename>tricks/copy_variant_set_to_prim/python/copy_variant_set_to_prim.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""An example module that shows how to copy SdfPath objects using the SDF API."""
# IMPORT THIRD-PARTY LIBRARIES
from pxr import Sdf
def main():
"""Run the main execution of the current script."""
source = Sdf.Layer.CreateAnonymous()
root = source.pseudoRoot
prim = Sdf.PrimSpec(root, "SomePrim", Sdf.SpecifierDef)
variant_set = Sdf.VariantSetSpec(prim, "SomeVariantSet")
variant = Sdf.VariantSpec(variant_set, "SomeVariant")
Sdf.PrimSpec(variant.primSpec, "InnerPrim", Sdf.SpecifierDef)
destination = Sdf.Layer.CreateAnonymous()
Sdf.CopySpec(
source, "/SomePrim{SomeVariantSet=SomeVariant}", destination, "/DestinationPrim"
)
# XXX : Notice that we have to call `CreatePrimInLayer` here but
# we didn't need to run it in the last example. That's because
# in this example, the parent Prim path "/Another" doesn't
# exist yet and has to be created before data can be copied to
# "/Another/DestinationPrim".
#
# In the previous example, "/" is the parent of "/DestinationPrim".
# And "/" always exists. So we didn't need to call
# `CreatePrimInLayer`. But generally, you usually should.
#
destination_prim = "/Another/DestinationPrim"
Sdf.CreatePrimInLayer(destination, destination_prim)
Sdf.CopySpec(
source,
"/SomePrim{SomeVariantSet=SomeVariant}",
destination,
destination_prim,
)
print(destination.ExportToString())
if __name__ == "__main__":
main()
|
applications/graph/motif/model/autoencoder.py
|
jonesholger/lbann
| 194 |
92179
|
<filename>applications/graph/motif/model/autoencoder.py<gh_stars>100-1000
import lbann
import lbann.modules
class FullyConnectedAutoencoder(lbann.modules.Module):
"""Multilayer perceptron autoencoder."""
global_count = 0 # Static counter, used for default names
def __init__(
self,
data_dim,
latent_dim,
encoder_hidden_dims=[],
decoder_hidden_dims=[],
activation=lbann.Relu,
data_layout='data_parallel',
name=None,
):
super().__init__()
FullyConnectedAutoencoder.global_count += 1
# Module name
self.name = name
if not self.name:
self.name = f'fcautoencoder{FullyConnectedAutoencoder.global_count}'
# Encoder
self.encoder = []
for i, dim in enumerate(encoder_hidden_dims):
self.encoder.append(
lbann.modules.FullyConnectedModule(
size=dim,
bias=False,
activation=activation,
name=f'{self.name}_encoder{i}',
data_layout=data_layout,
)
)
self.encoder.append(
lbann.modules.FullyConnectedModule(
size=latent_dim,
bias=False,
activation=activation,
name=f'{self.name}_encoder{len(self.encoder)}',
data_layout=data_layout,
)
)
# Decoder
self.decoder = []
for i, dim in enumerate(decoder_hidden_dims):
self.decoder.append(
lbann.modules.FullyConnectedModule(
size=dim,
bias=False,
activation=activation,
name=f'{self.name}_decoder{i}',
data_layout=data_layout,
)
)
self.decoder.append(
lbann.modules.FullyConnectedModule(
size=data_dim,
bias=False,
activation=activation,
name=f'{self.name}_decoder{len(self.decoder)}',
data_layout=data_layout,
)
)
def forward(self, x):
for l in self.encoder:
x = l(x)
for l in self.decoder:
x = l(x)
return x
|
pyclient/confluo/rpc/schema.py
|
Mu-L/confluo
| 1,398 |
92181
|
import struct
import time
import yaml
import yaml.resolver
from collections import OrderedDict
import data_types
from data_types import TypeID
def now_ns():
return int(time.time() * 10 ** 6)
class Schema:
""" The schema for the data in the atomic multilog.
"""
def __init__(self, columns):
""" Initializes the schema to the list of columns passed in.
Args:
columns: The list of columns that make up the schema.
"""
self.record_size_ = 0
self.columns_ = columns
for c in self.columns_:
self.record_size_ += c.data_type_.size_
def __str__(self):
""" Convert to string
Returns:
String representation of schema
"""
return str(self.columns_)
def record_size(self):
""" Get record size in bytes
Returns:
Record size in bytes
"""
return self.record_size_
def columns(self):
""" Get list of columns
Returns:
List of columns
"""
return self.columns_
def apply(self, data):
""" Adds data to the schema.
Args:
data: The data to add.
Returns:
The record.
"""
return Record(data, self)
def pack(self, rec):
""" Pack data into a record.
Args:
rec: The record to pack
Returns:
Packed record
"""
packed = ""
if len(rec) == len(self.columns_):
off = 1
packed += struct.pack('Q', rec[0])
elif len(rec) == len(self.columns_) - 1:
off = 0
packed += struct.pack('Q', now_ns())
else:
raise ValueError("Record does not conform to schema: incorrect number of fields")
for f, c in zip(rec[off:], self.columns_[1:]):
packed += c.data_type_.pack(f)
return packed
class Column:
""" Container of values for a specific type in the schema.
"""
def __init__(self, idx, offset, data_type, name, min_value, max_value):
""" Initializes a column in the schema.
Args:
idx: The index of the column.
offset: The offset of the column.
data_type: The data type of values in the column.
name: The name of the column.
min_value: The minimum value of the column.
max_value: The maximum value of the column.
"""
self.idx_ = idx
self.offset_ = offset
self.data_type_ = data_type
self.name_ = name.upper()
self.value = min_value
self.min_value_ = self.value
self.max_value = max_value
def __str__(self):
""" Convert to string
Returns:
String representation of the column
"""
return '{} : {}'.format(self.name_, self.data_type_)
def apply(self, data):
""" Adds data to the column.
Args:
data: The data to add.
Returns:
A field containing the data.
"""
return Field(self.idx_, self.data_type_, data[self.offset_: self.offset_ + self.data_type_.size_])
class Record:
""" A collection of values containing different types.
"""
def __init__(self, data, schema):
"""
Initializes a record to the specified values.
Args:
data: The data the record should hold.
schema: The schema for the record.
"""
self.data_ = data
self.fields_ = [c.apply(self.data_) for c in schema.columns()]
def __str__(self):
""" Converts to string
Returns:
String representation of record
"""
return str([str(x.unpack()) for x in self.fields_])
def __getitem__(self, idx):
""" Get element at specified index
Args:
idx: Index into record
Returns:
Element at specified index
"""
return self.fields_[idx].unpack()
class Field:
""" Contains data stored as part of a record.
"""
def __init__(self, idx, data_type, data):
""" Initializes the field to the data passed in.
Args:
idx: The index of the field.
data_type: The data type the value of the field contains.
data: The data that the field contains.
"""
self.idx_ = idx
self.data_type_ = data_type
self.data_ = data
def unpack(self):
""" Unpacks the field to get the data.
Returns:
The data in the field.
"""
tid = self.data_type_.type_id_
if tid == TypeID.STRING:
format_code = str(self.data_type_.size_) + data_types.FORMAT_CODES[tid]
else:
format_code = data_types.FORMAT_CODES[tid]
return struct.unpack(format_code, self.data_)[0]
class SchemaBuilder:
""" Builder of a schema for the atomic multilog.
"""
def __init__(self):
""" Initializes a default schema builder.
"""
self.user_provided_ts_ = False
self.offset_ = 0
self.columns_ = []
timestamp_col = Column(0, 0, data_types.ULONG_TYPE, "TIMESTAMP", None, None)
self.columns_.append(timestamp_col)
self.offset_ += data_types.ULONG_TYPE.size_
def add_column(self, data_type, name, min_value=None, max_value=None):
""" Adds a column to the schema builder.
Args:
data_type: The data type of the column.
name: The name of the column.
min_value: The minimum value of the column.
max_value: The maximum value of the column.
"""
if name.upper() == "TIMESTAMP":
self.user_provided_ts_ = True
if data_type != data_types.ULONG_TYPE:
raise ValueError("TIMESTAMP must be of ULONG_TYPE")
return self
col = Column(len(self.columns_), self.offset_, data_type, name, min_value, max_value)
self.columns_.append(col)
self.offset_ += data_type.size_
return self
def build(self):
""" Builds a schema by returning the list of columns.
Returns:
A list of columns that make up the schema.
"""
return self.columns_
def make_schema(s):
"""Converts a JSON-like string representation of the schema to our internal representation of the schema.
Args:
s: A JSON-like schema string
Returns:
Our internal representation of the schema.
"""
def ordered_load(stream):
class OrderedLoader(yaml.Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return OrderedDict(loader.construct_pairs(node))
OrderedLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping)
return yaml.load(stream, OrderedLoader)
s_parsed = ordered_load(s)
sb = SchemaBuilder()
for k in s_parsed:
sb.add_column(data_types.make_type(s_parsed[k]), k)
return Schema(sb.build())
|
examples/image/cath/scripts/cath/networks/ResNet34/ResNet34.py
|
mariogeiger/se3cnn
| 170 |
92190
|
<reponame>mariogeiger/se3cnn
import torch.nn as nn
from functools import partial
from experiments.util.arch_blocks import *
class network(ResNet):
def __init__(self,
n_input,
n_output,
args):
features = [[[32]],
[[32] * 2] * 3,
[[64] * 2] * 4,
[[128] * 2] * 6,
[[256] * 2] * 3]
common_params = {
'downsample_by_pooling': args.downsample_by_pooling,
'conv_dropout_p': args.p_drop_conv,
}
global OuterBlock
OuterBlock = partial(OuterBlock,
res_block=partial(ResBlock, **common_params))
super().__init__(
OuterBlock(n_input, features[0], size=7),
OuterBlock(features[0][-1][-1], features[1], size=args.kernel_size, stride=1),
OuterBlock(features[1][-1][-1], features[2], size=args.kernel_size, stride=2),
OuterBlock(features[2][-1][-1], features[3], size=args.kernel_size, stride=2),
OuterBlock(features[3][-1][-1], features[4], size=args.kernel_size, stride=2),
AvgSpacial(),
nn.Dropout(p=args.p_drop_fully, inplace=True) if args.p_drop_fully is not None else None,
nn.Linear(features[4][-1][-1], n_output))
|
app/api/models/Base.py
|
nurely/lxdui
| 589 |
92202
|
<filename>app/api/models/Base.py<gh_stars>100-1000
from abc import ABC, abstractmethod
class Base(ABC):
@abstractmethod
def info(self):
raise NotImplementedError()
@abstractmethod
def create(self):
raise NotImplementedError()
@abstractmethod
def delete(self):
raise NotImplementedError()
@abstractmethod
def start(self):
raise NotImplementedError()
@abstractmethod
def stop(self):
raise NotImplementedError()
@abstractmethod
def restart(self):
raise NotImplementedError()
@abstractmethod
def update(self):
raise NotImplementedError()
@abstractmethod
def move(self):
raise NotImplementedError()
@abstractmethod
def clone(self):
raise NotImplementedError()
@abstractmethod
def snapshot(self):
raise NotImplementedError()
|
randomcrop.py
|
createnewdemo/SPANet
| 177 |
92206
|
#!/usr/bin/env python
# from __future__ import division
import torch
import math
import random
from PIL import Image, ImageOps, ImageEnhance
import numbers
import torchvision.transforms.functional as F
import numpy as np
class RandomCrop(object):
"""Crop the given PIL Image at a random location.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
padding (int or sequence, optional): Optional padding on each border
of the image. Default is 0, i.e no padding. If a sequence of length
4 is provided, it is used to pad left, top, right, bottom borders
respectively.
pad_if_needed (boolean): It will pad the image if smaller than the
desired size to avoid raising an exception.
"""
def __init__(self, size, padding=0, pad_if_needed=False):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.padding = padding
self.pad_if_needed = pad_if_needed
@staticmethod
def get_params(img, output_size):
"""Get parameters for ``crop`` for a random crop.
Args:
img (PIL Image): Image to be cropped.
output_size (tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
"""
h,w,_ = img.shape
th, tw = output_size
if w == tw and h == th:
return 0, 0, h, w
i = random.randint(0, h - th)
j = random.randint(0, w - tw)
return i, j, th, tw
def __call__(self, img,img_gt):
"""
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
"""
if self.padding > 0:
img = F.pad(img, self.padding)
# pad the width if needed
if self.pad_if_needed and img.size[0] < self.size[1]:
img = F.pad(img, (int((1 + self.size[1] - img.size[0]) / 2), 0))
# pad the height if needed
if self.pad_if_needed and img.size[1] < self.size[0]:
img = F.pad(img, (0, int((1 + self.size[0] - img.size[1]) / 2)))
i, j, h, w = self.get_params(img, self.size)
return img[i:i+self.size[0],j:j+self.size[1],:],img_gt[i:i+self.size[0],j:j+self.size[1],:]
# return F.crop(img, i, j, h, w),F.crop(img_gt, i, j, h, w)
def __repr__(self):
return self.__class__.__name__ + '(size={0}, padding={1})'.format(self.size, self.padding)
class RandomResizedCrop(object):
"""Crop the given PIL Image to random size and aspect ratio.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, scale=(0.8, 1), ratio=(3/4., 4/3), interpolation=Image.BICUBIC):
self.size = (size, size)
self.interpolation = interpolation
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
for attempt in range(10):
area = img.size[0] * img.size[1]
target_area = random.uniform(*scale) * area
aspect_ratio = random.uniform(*ratio)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5:
w, h = h, w
if w <= img.size[0] and h <= img.size[1]:
i = random.randint(0, img.size[1] - h)
j = random.randint(0, img.size[0] - w)
return i, j, h, w
# Fallback
w = min(img.size[0], img.size[1])
i = (img.size[1] - w) // 2
j = (img.size[0] - w) // 2
return i, j, w, w
def __call__(self, img,img_gt):
"""
Args:
img (PIL Image): Image to be cropped and resized.
Returns:
PIL Image: Randomly cropped and resized image.
"""
i, j, h, w = self.get_params(img, self.scale, self.ratio)
return (F.resized_crop(img, i, j, h, w, self.size, self.interpolation),F.resized_crop(img_gt, i, j, h, w, self.size, self.interpolation))
def __repr__(self):
interpolate_str = _pil_interpolation_to_str[self.interpolation]
format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))
format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))
format_string += ', interpolation={0})'.format(interpolate_str)
return format_string
class RandomRotation(object):
"""Rotate the image by angle.
Args:
degrees (sequence or float or int): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees).
resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
An optional resampling filter.
See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
expand (bool, optional): Optional expansion flag.
If true, expands the output to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple, optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
"""
def __init__(self, degrees, resample=Image.BICUBIC, expand=1, center=None):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError("If degrees is a single number, it must be positive.")
self.degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError("If degrees is a sequence, it must be of len 2.")
self.degrees = degrees
self.resample = resample
self.expand = expand
self.center = center
@staticmethod
def get_params(degrees):
"""Get parameters for ``rotate`` for a random rotation.
Returns:
sequence: params to be passed to ``rotate`` for random rotation.
"""
angle = np.random.uniform(degrees[0], degrees[1])
return angle
def __call__(self, img,img_gt):
"""
img (PIL Image): Image to be rotated.
Returns:
PIL Image: Rotated image.
"""
angle = self.get_params(self.degrees)
return (F.rotate(img, angle, self.resample, self.expand, self.center),F.rotate(img_gt, angle, self.resample, self.expand, self.center))
def __repr__(self):
return self.__class__.__name__ + '(degrees={0})'.format(self.degrees)
class RandomHorizontallyFlip(object):
def __call__(self, img, mask):
if random.random() < 0.5:
return img.transpose(Image.FLIP_LEFT_RIGHT), mask.transpose(Image.FLIP_LEFT_RIGHT)
return img, mask
class RandomVerticallyFlip(object):
def __call__(self, img, mask):
if random.random() < 0.5:
return img.transpose(Image.FLIP_TOP_BOTTOM), mask.transpose(Image.FLIP_TOP_BOTTOM)
return img, mask
|
inverse_rl/models/tf_util.py
|
SaminYeasar/inverse_rl
| 220 |
92211
|
<filename>inverse_rl/models/tf_util.py
import tensorflow as tf
import numpy as np
REG_VARS = 'reg_vars'
def linear(X, dout, name, bias=True):
with tf.variable_scope(name):
dX = int(X.get_shape()[-1])
W = tf.get_variable('W', shape=(dX, dout))
tf.add_to_collection(REG_VARS, W)
if bias:
b = tf.get_variable('b', initializer=tf.constant(np.zeros(dout).astype(np.float32)))
else:
b = 0
return tf.matmul(X, W)+b
def discounted_reduce_sum(X, discount, axis=-1):
if discount != 1.0:
disc = tf.cumprod(discount*tf.ones_like(X), axis=axis)
else:
disc = 1.0
return tf.reduce_sum(X*disc, axis=axis)
def assert_shape(tens, shape):
assert tens.get_shape().is_compatible_with(shape)
def relu_layer(X, dout, name):
return tf.nn.relu(linear(X, dout, name))
def softplus_layer(X, dout, name):
return tf.nn.softplus(linear(X, dout, name))
def tanh_layer(X, dout, name):
return tf.nn.tanh(linear(X, dout, name))
def get_session_config():
session_config = tf.ConfigProto()
session_config.gpu_options.allow_growth = True
#session_config.gpu_options.per_process_gpu_memory_fraction = 0.2
return session_config
def load_prior_params(pkl_fname):
import joblib
with tf.Session(config=get_session_config()):
params = joblib.load(pkl_fname)
tf.reset_default_graph()
#joblib.dump(params, file_name, compress=3)
params = params['irl_params']
#print(params)
assert params is not None
return params
|
demos/usage-remove-selected-elements.py
|
kinimesi/dash-cytoscape
| 432 |
92213
|
<gh_stars>100-1000
import json
import os
import random
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import dash_cytoscape as cyto
asset_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'..', 'assets'
)
app = dash.Dash(__name__, assets_folder=asset_path)
server = app.server
random.seed(2019)
nodes = [
{'data': {'id': str(i), 'label': 'Node {}'.format(i)}}
for i in range(1, 21)
]
edges = [
{'data': {'source': str(random.randint(1, 20)), 'target': str(random.randint(1, 20))}}
for _ in range(30)
]
default_elements = nodes + edges
styles = {
'json-output': {
'overflow-y': 'scroll',
'height': 'calc(50% - 25px)',
'border': 'thin lightgrey solid'
},
'tab': {'height': 'calc(98vh - 115px)'}
}
app.layout = html.Div([
html.Div(className='eight columns', children=[
cyto.Cytoscape(
id='cytoscape',
elements=default_elements,
layout={
'name': 'grid'
},
style={
'height': '95vh',
'width': '100%'
}
)
]),
html.Div(className='four columns', children=[
dcc.Tabs(id='tabs', children=[
dcc.Tab(label='Actions', children=[
html.Button("Remove Selected Node", id='remove-button')
]),
dcc.Tab(label='Tap Data', children=[
html.Div(style=styles['tab'], children=[
html.P('Node Data JSON:'),
html.Pre(
id='tap-node-data-json-output',
style=styles['json-output']
),
html.P('Edge Data JSON:'),
html.Pre(
id='tap-edge-data-json-output',
style=styles['json-output']
)
])
]),
dcc.Tab(label='Selected Data', children=[
html.Div(style=styles['tab'], children=[
html.P('Node Data JSON:'),
html.Pre(
id='selected-node-data-json-output',
style=styles['json-output']
),
html.P('Edge Data JSON:'),
html.Pre(
id='selected-edge-data-json-output',
style=styles['json-output']
)
])
])
]),
])
])
@app.callback(Output('cytoscape', 'elements'),
[Input('remove-button', 'n_clicks')],
[State('cytoscape', 'elements'),
State('cytoscape', 'selectedNodeData')])
def remove_selected_nodes(_, elements, data):
if elements and data:
ids_to_remove = {ele_data['id'] for ele_data in data}
print("Before:", elements)
new_elements = [ele for ele in elements if ele['data']['id'] not in ids_to_remove]
print("After:", new_elements)
return new_elements
return elements
@app.callback(Output('tap-node-data-json-output', 'children'),
[Input('cytoscape', 'tapNodeData')])
def displayTapNodeData(data):
return json.dumps(data, indent=2)
@app.callback(Output('tap-edge-data-json-output', 'children'),
[Input('cytoscape', 'tapEdgeData')])
def displayTapEdgeData(data):
return json.dumps(data, indent=2)
@app.callback(Output('selected-node-data-json-output', 'children'),
[Input('cytoscape', 'selectedNodeData')])
def displaySelectedNodeData(data):
return json.dumps(data, indent=2)
@app.callback(Output('selected-edge-data-json-output', 'children'),
[Input('cytoscape', 'selectedEdgeData')])
def displaySelectedEdgeData(data):
return json.dumps(data, indent=2)
if __name__ == '__main__':
app.run_server(debug=True)
|
tests/functions/test_count.py
|
hodgesrm/clickhouse-sqlalchemy
| 251 |
92234
|
from sqlalchemy import Column, func
from clickhouse_sqlalchemy import types, Table
from tests.testcase import CompilationTestCase
class CountTestCaseBase(CompilationTestCase):
table = Table(
't1', CompilationTestCase.metadata(),
Column('x', types.Int32, primary_key=True)
)
def test_count(self):
self.assertEqual(
self.compile(self.session.query(func.count(self.table.c.x))),
'SELECT count(t1.x) AS count_1 FROM t1'
)
def test_count_distinct(self):
query = self.session.query(func.count(func.distinct(self.table.c.x)))
self.assertEqual(
self.compile(query),
'SELECT count(distinct(t1.x)) AS count_1 FROM t1'
)
def test_count_no_column_specified(self):
query = self.session.query(func.count()).select_from(self.table)
self.assertEqual(
self.compile(query),
'SELECT count(*) AS count_1 FROM t1'
)
|
analysis/MxnetA.py
|
YifeiCN/nn_tools
| 370 |
92261
|
<gh_stars>100-1000
from __future__ import absolute_import
import mxnet as mx
import mxnet.symbol as sym
import json
from analysis.layers import *
import re
import ctypes
from mxnet.ndarray import NDArray
import mxnet.ndarray as nd
from mxnet.base import NDArrayHandle, py_str
blob_dict=[]
tracked_layers = []
def tmpnet():
x=sym.Variable('data')
y=sym.Convolution(x,kernel=(3,3),num_filter=32)
y=sym.Activation(y,'relu')
y = sym.Convolution(y, kernel=(3, 3), num_filter=64,stride=(2,2),num_group=2)
y=sym.softmax(y)
return y
def analyse(data_infos,module_json,data_name='data'):
datas={}
for info in data_infos:
datas[info[1]]=info[2]
nodes=json.loads(module_json)['nodes']
input=[]
out=None
for node in nodes:
name=node['name']
bottoms=[str(nodes[i[0]]['name']) for i in node['inputs']]
for i,bottom in enumerate(bottoms):
if bottom+'_output' in datas:
bottoms[i]=datas[bottom+'_output']
elif bottom+'_0' in datas:
bottoms[i]=datas[bottom+'_0']
elif bottom in datas:
bottoms[i]=datas[bottom]
else:
cur_node=node
while True:
bottom = [str(nodes[inp[0]]['name']) for inp in cur_node['inputs']][0]
if bottom + '_output' in datas:
bottoms[i] = datas[bottom + '_output']
break
elif bottom + '_0' in datas:
bottoms[i] = datas[bottom + '_0']
break
elif bottom in datas:
bottoms[i] = datas[bottom]
break
try:
bottom_node = nodes[cur_node['inputs'][0][0]]
except:
pass
cur_node=bottom_node
if data_name==name:
input.append(Blob(datas[data_name]))
elif node['op']=='Convolution':
kernel=eval(node['attrs']['kernel'])
num_out=eval(node['attrs']['num_filter'])
group_size=eval(node['attrs'].get('num_group','1'))
pad=eval(node['attrs'].get('pad','(0,0)'))
stride=eval(node['attrs'].get('stride','(1,1)'))
x=Blob(bottoms[0])
out=Conv(x,kernel_size=kernel,stride=stride,pad=pad,
num_out=num_out,group_size=group_size,name=name)
tracked_layers.append(out)
elif node['op']=='BatchNorm':
x=Blob(bottoms[0])
out = Norm(x, 'batch_norm',name=name)
tracked_layers.append(out)
elif node['op']=='FullyConnected':
x=Blob(bottoms[0])
num_hidden=eval(node['attrs']['num_hidden'])
out=Fc(x,num_hidden,name=name)
tracked_layers.append(out)
elif node['op']=='Activation':
pass
elif 'elemwise' in node['op']:
pass
class Monitor(object):
def __init__(self, interval=1, pattern='.*', sort=False):
def stat(x):
return x.shape
self.stat_func = stat
self.interval = interval
self.activated = False
self.queue = []
self.step = 0
self.exes = []
self.re_prog = re.compile(pattern)
self.sort = sort
def stat_helper(name, array):
array = ctypes.cast(array, NDArrayHandle)
array = NDArray(array, writable=False)
if not self.activated or not self.re_prog.match(py_str(name)):
return
self.queue.append((self.step, py_str(name), stat(array)))
self.stat_helper = stat_helper
def install(self, exe):
exe.set_monitor_callback(self.stat_helper)
self.exes.append(exe)
def tic(self):
if self.step % self.interval == 0:
for exe in self.exes:
for array in exe.arg_arrays:
array.wait_to_read()
for array in exe.aux_arrays:
array.wait_to_read()
self.queue = []
self.activated = True
self.step += 1
def toc(self):
if not self.activated:
return []
for exe in self.exes:
for array in exe.arg_arrays:
array.wait_to_read()
for array in exe.aux_arrays:
array.wait_to_read()
for exe in self.exes:
for name, array in zip(exe._symbol.list_arguments(), exe.arg_arrays):
self.queue.append((self.step, name, self.stat_func(array)))
for name, array in zip(exe._symbol.list_auxiliary_states(), exe.aux_arrays):
# if self.re_prog.match(name):
self.queue.append((self.step, name, self.stat_func(array)))
self.activated = False
res = []
if self.sort:
self.queue.sort(key=lambda x: x[1])
for n, k, v_list in self.queue:
res.append((n, k, v_list))
self.queue = []
return res
def toc_print(self):
pass
def profiling_symbol(symbol,data_shape,data_name='data'):
monitor = Monitor()
model=mx.mod.Module(symbol)
model.bind(data_shapes=[(data_name,tuple(data_shape))])
model.install_monitor(monitor)
model.init_params()
monitor.tic()
model.forward(mx.io.DataBatch(data=(nd.ones(data_shape),)))
data_infos=monitor.toc()
module_json=symbol.tojson()
analyse(data_infos,module_json,data_name)
|
vis/max_bar_plot.py
|
SurvivorT/SRTP
| 489 |
92264
|
#!/usr/bin/env python
#
# File: bar_plot.py
#
import argparse
import os
import pickle
import matplotlib
import numpy as np
params = {
'axes.labelsize': 12,
'font.size': 16,
'font.family': 'serif',
'legend.fontsize': 12,
'xtick.labelsize': 12,
'ytick.labelsize': 12,
#'text.usetex': True,
'figure.figsize': [4.5, 4.5]
}
matplotlib.rcParams.update(params)
import matplotlib.pyplot as plt
plt.style.use('seaborn-colorblind')
import pandas as pd
def main():
parser = argparse.ArgumentParser()
parser.add_argument('dir', type=str)
parser.add_argument('--random', type=float, default=None)
args = parser.parse_args()
if args.random:
random = args.random
else:
random = 0
with open(os.path.join(args.dir, 'results.pkl'), 'rb') as f:
res = pickle.load(f)
control_params = ['decentralized', 'concurrent', 'centralized']
nn_params = ['mlp', 'gru', 'heuristic']
header = ['training']
for nnp in nn_params:
header.append(nnp)
header.append(nnp + '_error')
mat = []
for cp in control_params:
row = [cp]
for nnp in nn_params:
key = cp + '-' + nnp
if key in res:
if res[key]:
row.append(res[key]['retlist'].mean() - random)
row.append(res[key]['retlist'].std())
else:
row.append(None)
row.append(None)
elif 'heuristic' in key:
row.append(res['heuristic']['retlist'].mean() - random)
row.append(res['heuristic']['retlist'].std())
mat.append(row)
dat = pd.DataFrame(mat, columns=header)
print(dat.to_csv(index=False, float_format='%.3f', na_rep='nan'))
csv_errors = dat[['mlp_error', 'gru_error']].rename(
columns={'mlp_error': 'mlp',
'gru_error': 'gru'})
ax = dat[['mlp', 'gru']].plot(kind='bar', title='', legend=True, yerr=csv_errors, alpha=0.7)
ax.plot(dat['heuristic'], linewidth=1.2, linestyle='--', alpha=0.7)
leg = ax.legend(['Heuristic', 'MLP', 'GRU'])
leg.get_frame().set_alpha(0.7)
ax.set_xticklabels(dat.training, rotation=0)
ax.set_ylabel('Normalized Returns')
plt.savefig(os.path.join(args.dir, 'bar.pdf'), bbox_inches='tight')
if __name__ == '__main__':
main()
|
app/grandchallenge/notifications/filters.py
|
nlessmann/grand-challenge.org
| 101 |
92268
|
import re
from functools import reduce
from operator import or_
from actstream.models import Follow
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from django_filters import CharFilter, ChoiceFilter, FilterSet
from machina.apps.forum.models import Forum
from machina.apps.forum_conversation.models import Topic
from grandchallenge.core.filters import FilterForm
from grandchallenge.notifications.models import Notification
BOOLEAN_CHOICES = (
("1", "Read"),
("0", "Unread"),
)
class NotificationFilter(FilterSet):
forum = CharFilter(method="search_filter", label="Forum")
topic = CharFilter(method="search_filter", label="Forum post subject")
read = ChoiceFilter(choices=BOOLEAN_CHOICES, label="Status")
class Meta:
model = Notification
form = FilterForm
fields = ("forum", "topic", "read")
def search_filter(self, queryset, name, value):
if name == "forum":
name_qs = [
x.id for x in Forum.objects.filter(name__icontains=value).all()
]
elif name == "topic":
name_qs = [
x.id
for x in Topic.objects.filter(subject__icontains=value).all()
]
search_fields = (
"action__target_object_id",
"action__action_object_object_id",
)
return queryset.filter(
reduce(
or_, [Q(**{f"{f}__in": name_qs}) for f in search_fields], Q(),
)
)
FOLLOW_CHOICES = (
("forum_forum", "Forums"),
("topic_forum_conversation", "Topics"),
("readerstudy_reader_studies", "Reader studies"),
("archive_archives", "Archives"),
("algorithm_algorithms", "Algorithms"),
("challenge_challenges", "Challenges"),
("externalchallenge_challenges", "External Challenges"),
("phase_evaluation", "Challenge Phase"),
)
class FollowFilter(FilterSet):
forum = CharFilter(method="search_filter", label="Search for a forum")
topic = CharFilter(
method="search_filter", label="Search for a forum topic"
)
forums_for_user = CharFilter(
method="search_forum_topics",
label="Show all topic subscriptions for a specific forum",
)
content_type = ChoiceFilter(
choices=FOLLOW_CHOICES,
method="get_content_type",
label="Filter by subscription type",
)
class Meta:
model = Follow
form = FilterForm
fields = ("forum", "topic", "forums_for_user", "content_type")
def search_filter(self, queryset, name, value):
model_name = name
if model_name == "forum":
app_label = "forum"
model = Forum
kwargs = {"name__icontains": value}
elif model_name == "topic":
app_label = "forum_conversation"
model = Topic
kwargs = {"subject__icontains": value}
name_qs = [x.id for x in model.objects.filter(**kwargs).all()]
return queryset.filter(
**{"object_id__in": name_qs},
**{
"content_type__exact": ContentType.objects.filter(
model=model_name, app_label=app_label
).get()
},
)
def search_forum_topics(self, queryset, name, value):
forums = [
x.id for x in Forum.objects.filter(name__icontains=value).all()
]
name_qs = [
x.id for x in Topic.objects.filter(forum__id__in=forums).all()
]
return queryset.filter(
**{"object_id__in": name_qs},
**{
"content_type__exact": ContentType.objects.filter(
model="topic", app_label="forum_conversation"
).get()
},
)
def get_content_type(self, queryset, name, value):
ct = ContentType.objects.filter(
model=re.split(r"_", value, 1)[0],
app_label=re.split(r"_", value, 1)[1],
).get()
return queryset.filter(content_type__exact=ct)
|
services/workers/emails/tests/test_welcome_email.py
|
paulowe/aws-boilerplate
| 711 |
92275
|
<gh_stars>100-1000
import settings
from .. import handlers
def test_send_welcome_email(mocker, ses_client):
ses_send_email = mocker.patch.object(ses_client, 'send_email', autospec=True)
mocker.patch('emails.sender.get_ses_client', return_value=ses_client)
event = {'to': '<EMAIL>', 'name': '<NAME>', 'type': 'WelcomeEmail'}
handlers.send_email(event, {})
ses_send_email.assert_called_once()
kwargs = ses_send_email.call_args.kwargs
assert kwargs['Source'] == settings.FROM_EMAIL
assert kwargs['Destination']['ToAddresses'] == [event['to']]
assert event['name'] in kwargs['Message']['Body']['Html']['Data']
|
databuilder/databuilder/serializers/neo4_serializer.py
|
defendercrypt/amundsen
| 2,072 |
92282
|
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
from typing import (
Any, Dict, Optional,
)
from databuilder.models.graph_node import GraphNode
from databuilder.models.graph_relationship import GraphRelationship
from databuilder.models.graph_serializable import (
NODE_KEY, NODE_LABEL, RELATION_END_KEY, RELATION_END_LABEL, RELATION_REVERSE_TYPE, RELATION_START_KEY,
RELATION_START_LABEL, RELATION_TYPE,
)
from databuilder.publisher.neo4j_csv_publisher import UNQUOTED_SUFFIX
def serialize_node(node: Optional[GraphNode]) -> Dict[str, Any]:
if node is None:
return {}
node_dict = {
NODE_LABEL: node.label,
NODE_KEY: node.key
}
for key, value in node.attributes.items():
key_suffix = _get_neo4j_suffix_value(value)
formatted_key = f'{key}{key_suffix}'
node_dict[formatted_key] = value
return node_dict
def serialize_relationship(relationship: Optional[GraphRelationship]) -> Dict[str, Any]:
if relationship is None:
return {}
relationship_dict = {
RELATION_START_KEY: relationship.start_key,
RELATION_START_LABEL: relationship.start_label,
RELATION_END_KEY: relationship.end_key,
RELATION_END_LABEL: relationship.end_label,
RELATION_TYPE: relationship.type,
RELATION_REVERSE_TYPE: relationship.reverse_type,
}
for key, value in relationship.attributes.items():
key_suffix = _get_neo4j_suffix_value(value)
formatted_key = f'{key}{key_suffix}'
relationship_dict[formatted_key] = value
return relationship_dict
def _get_neo4j_suffix_value(value: Any) -> str:
if isinstance(value, int):
return UNQUOTED_SUFFIX
if isinstance(value, bool):
return UNQUOTED_SUFFIX
return ''
|
Chapter02/animals/sheep.py
|
RoyMcCrain/Mastering-Vim
| 155 |
92288
|
"""A sheep."""
import animal
class Sheep(animal.Animal):
def __init__(self):
self.kind = 'sheep'
|
powerline/matchers/vim/plugin/commandt.py
|
MrFishFinger/powerline
| 11,435 |
92289
|
<filename>powerline/matchers/vim/plugin/commandt.py<gh_stars>1000+
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
from powerline.bindings.vim import vim_getbufoption, buffer_name
def commandt(matcher_info):
name = buffer_name(matcher_info)
return (
vim_getbufoption(matcher_info, 'filetype') == 'command-t'
or (name and os.path.basename(name) == b'GoToFile')
)
|
doc/examples/scripts/structure/md_analysis.py
|
danijoo/biotite
| 208 |
92305
|
r"""
Basic analysis of a MD simulation
=================================
In this example, we will analyze a trajectory of a *Gromacs* MD
simulation:
The trajectory contains simulation data of lysozyme over the course of
1 ns.
The data is the result of the famous *Gromacs*
'`Lysozyme in Water <http://www.mdtutorials.com/gmx/lysozyme/index.html>`_'
tutorial.
The trajectory file can be downloaded
:download:`here </examples/download/lysozyme_md.xtc>`
and the template PDB can be downloaded
:download:`here </examples/download/lysozyme_md.pdb>`.
We begin by loading the template PDB file as :class:`AtomArray`, sanitizing it
and using it to load the trajectory as :class:`AtomArrayStack`.
"""
# Code source: <NAME>
# License: BSD 3 clause
import biotite
import biotite.structure as struc
import biotite.structure.io as strucio
import biotite.structure.io.xtc as xtc
import numpy as np
import matplotlib.pyplot as plt
# Put here the path of the downloaded files
templ_file_path = "../../download/lysozyme_md.pdb"
traj_file_path = "../../download/lysozyme_md.xtc"
# Gromacs does not set the element symbol in its PDB files,
# but Biotite guesses the element names from the atom names,
# emitting a warning
template = strucio.load_structure(templ_file_path)
# The structure still has water and ions, that are not needed for our
# calculations, we are only interested in the protein itself
# These are removed for the sake of computational speed using a boolean
# mask
protein_mask = struc.filter_amino_acids(template)
template = template[protein_mask]
# We could have loaded the trajectory also with
# 'strucio.load_structure()', but in this case we only want to load
# those coordinates that belong to the already selected atoms of the
# template structure.
# Hence, we use the 'XTCFile' class directly to load the trajectory
# This gives us the additional option that allows us to select the
# coordinates belonging to the amino acids.
xtc_file = xtc.XTCFile.read(traj_file_path, atom_i=np.where(protein_mask)[0])
trajectory = xtc_file.get_structure(template)
# Get simulation time for plotting purposes
time = xtc_file.get_time()
########################################################################
# Since the MD simulation used periodic boundaries, the protein might be
# segmented over the box boundary.
# For further analysis we need to reassemble the protein chain into a
# whole molecule, without periodic boundaries.
# in *Gromacs* we could have used ``gmx trjconv`` for this, but this
# problem can be handled in *Biotite*, too.
trajectory = struc.remove_pbc(trajectory)
########################################################################
# Now our trajectory is ready for some analysis!
# At first we want to see if the simulation converged.
# For this purpose we take the RMSD of a frame compared to the initial
# model as measure. In order to calculate the RMSD we must
# superimpose all models onto a reference, in this case we also choose
# the initial structure.
trajectory, transform = struc.superimpose(trajectory[0], trajectory)
rmsd = struc.rmsd(trajectory[0], trajectory)
figure = plt.figure(figsize=(6,3))
ax = figure.add_subplot(111)
ax.plot(time, rmsd, color=biotite.colors["dimorange"])
ax.set_xlim(time[0], time[-1])
ax.set_ylim(0, 2)
ax.set_xlabel("Time (ps)")
ax.set_ylabel("RMSD (Å)")
figure.tight_layout()
########################################################################
# As we can see the simulation seems to converge already early in the
# simulation.
# After a about 200 ps the RMSD stays in a range of approx. 1 - 2 Å.
#
# In order to futher evaluate the unfolding of our enzyme in the
# course of simulation, we calculate and plot the radius of gyration
# (a measure for the protein radius).
radius = struc.gyration_radius(trajectory)
figure = plt.figure(figsize=(6,3))
ax = figure.add_subplot(111)
ax.plot(time, radius, color=biotite.colors["dimorange"])
ax.set_xlim(time[0], time[-1])
ax.set_ylim(14.0, 14.5)
ax.set_xlabel("Time (ps)")
ax.set_ylabel("Radius of gyration (Å)")
figure.tight_layout()
########################################################################
# From this perspective, the protein seems really stable.
# The radius does merely fluctuate in a range of approximately 0.3 Å
# during the entire simulation.
#
# Let's have a look at single amino acids:
# Which residues fluctuate most?
# For answering this question we calculate the RMSF
# (Root mean square fluctuation).
# It is similar to the RMSD, but instead of averaging over the atoms
# and looking at each time step, we average over the time and look at
# each residue.
# Usually the average model is taken as reference
# (compared to the starting model for RMSD).
#
# Since side chain atoms fluctuate quite a lot, they are not suitable
# for evaluation of the residue flexibility. Therefore, we consider only
# CA atoms.
# In all models, mask the CA atoms
ca_trajectory = trajectory[:, trajectory.atom_name == "CA"]
rmsf = struc.rmsf(struc.average(ca_trajectory), ca_trajectory)
figure = plt.figure(figsize=(6,3))
ax = figure.add_subplot(111)
res_count = struc.get_residue_count(trajectory)
ax.plot(np.arange(1, res_count+1), rmsf, color=biotite.colors["dimorange"])
ax.set_xlim(1, res_count)
ax.set_ylim(0, 1.5)
ax.set_xlabel("Residue")
ax.set_ylabel("RMSF (Å)")
figure.tight_layout()
plt.show()
|
api/tacticalrmm/logs/tests.py
|
juaromu/tacticalrmm
| 903 |
92313
|
from itertools import cycle
from unittest.mock import patch
from django.utils import timezone as djangotime
from model_bakery import baker, seq
from tacticalrmm.test import TacticalTestCase
from logs.models import PendingAction
class TestAuditViews(TacticalTestCase):
def setUp(self):
self.authenticate()
self.setup_coresettings()
def create_audit_records(self):
# create clients for client filter
site = baker.make("clients.Site")
agent1 = baker.make_recipe("agents.agent", site=site, hostname="AgentHostname1")
agent2 = baker.make_recipe("agents.agent", hostname="AgentHostname2")
agent0 = baker.make_recipe("agents.agent", hostname="AgentHostname")
# user jim agent logs
baker.make_recipe(
"logs.agent_logs",
username="jim",
agent="AgentHostname1",
agent_id=agent1.id,
_quantity=15,
)
baker.make_recipe(
"logs.agent_logs",
username="jim",
agent="AgentHostname2",
agent_id=agent2.id,
_quantity=8,
)
# user james agent logs
baker.make_recipe(
"logs.agent_logs",
username="james",
agent="AgentHostname1",
agent_id=agent1.id,
_quantity=7,
)
baker.make_recipe(
"logs.agent_logs",
username="james",
agent="AgentHostname2",
agent_id=agent2.id,
_quantity=10,
)
# generate agent logs with random usernames
baker.make_recipe(
"logs.agent_logs",
agent=seq("AgentHostname"),
agent_id=seq(agent1.id),
_quantity=5,
)
# generate random object data
baker.make_recipe(
"logs.object_logs",
username="james",
_quantity=17,
)
# generate login data for james
baker.make_recipe(
"logs.login_logs",
username="james",
_quantity=11,
)
# generate login data for jim
baker.make_recipe(
"logs.login_logs",
username="jim",
_quantity=13,
)
return {"site": site, "agents": [agent0, agent1, agent2]}
def test_get_audit_logs(self):
url = "/logs/auditlogs/"
# create data
data = self.create_audit_records()
# test data and result counts
data = [
{"filter": {"timeFilter": 30}, "count": 86},
{
"filter": {
"timeFilter": 45,
"agentFilter": [data["agents"][2].id],
},
"count": 19,
},
{
"filter": {
"userFilter": ["jim"],
"agentFilter": [data["agents"][1].id],
},
"count": 15,
},
{
"filter": {
"timeFilter": 180,
"userFilter": ["james"],
"agentFilter": [data["agents"][1].id],
},
"count": 7,
},
{"filter": {}, "count": 86},
{"filter": {"agentFilter": [500]}, "count": 0},
{
"filter": {
"timeFilter": 35,
"userFilter": ["james", "jim"],
"agentFilter": [
data["agents"][1].id,
data["agents"][2].id,
],
},
"count": 40,
},
{"filter": {"timeFilter": 35, "userFilter": ["james", "jim"]}, "count": 81},
{"filter": {"objectFilter": ["user"]}, "count": 26},
{"filter": {"actionFilter": ["login"]}, "count": 12},
{
"filter": {"clientFilter": [data["site"].client.id]},
"count": 23,
},
]
pagination = {
"rowsPerPage": 25,
"page": 1,
"sortBy": "entry_time",
"descending": True,
}
for req in data:
resp = self.client.patch(
url, {**req["filter"], "pagination": pagination}, format="json"
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
len(resp.data["audit_logs"]), # type:ignore
pagination["rowsPerPage"]
if req["count"] > pagination["rowsPerPage"]
else req["count"],
)
self.assertEqual(resp.data["total"], req["count"]) # type:ignore
self.check_not_authenticated("patch", url)
def test_get_pending_actions(self):
url = "/logs/pendingactions/"
agent1 = baker.make_recipe("agents.online_agent")
agent2 = baker.make_recipe("agents.online_agent")
baker.make(
"logs.PendingAction",
agent=agent1,
action_type="chocoinstall",
details={"name": "googlechrome", "output": None, "installed": False},
_quantity=12,
)
baker.make(
"logs.PendingAction",
agent=agent2,
action_type="chocoinstall",
status="completed",
details={"name": "adobereader", "output": None, "installed": False},
_quantity=14,
)
data = {"showCompleted": False}
r = self.client.patch(url, data, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(len(r.data["actions"]), 12) # type: ignore
self.assertEqual(r.data["completed_count"], 14) # type: ignore
self.assertEqual(r.data["total"], 26) # type: ignore
PendingAction.objects.filter(action_type="chocoinstall").update(
status="completed"
)
data = {"showCompleted": True}
r = self.client.patch(url, data, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(len(r.data["actions"]), 26) # type: ignore
self.assertEqual(r.data["completed_count"], 26) # type: ignore
self.assertEqual(r.data["total"], 26) # type: ignore
data = {"showCompleted": True, "agentPK": agent1.pk}
r = self.client.patch(url, data, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(len(r.data["actions"]), 12) # type: ignore
self.assertEqual(r.data["completed_count"], 12) # type: ignore
self.assertEqual(r.data["total"], 12) # type: ignore
self.check_not_authenticated("patch", url)
@patch("agents.models.Agent.nats_cmd")
def test_cancel_pending_action(self, nats_cmd):
nats_cmd.return_value = "ok"
url = "/logs/pendingactions/"
agent = baker.make_recipe("agents.online_agent")
action = baker.make(
"logs.PendingAction",
agent=agent,
action_type="schedreboot",
details={
"time": "2021-01-13 18:20:00",
"taskname": "TacticalRMM_SchedReboot_wYzCCDVXlc",
},
)
data = {"pk": action.pk} # type: ignore
r = self.client.delete(url, data, format="json")
self.assertEqual(r.status_code, 200)
nats_data = {
"func": "delschedtask",
"schedtaskpayload": {"name": "TacticalRMM_SchedReboot_wYzCCDVXlc"},
}
nats_cmd.assert_called_with(nats_data, timeout=10)
# try request again and it should 404 since pending action doesn't exist
r = self.client.delete(url, data, format="json")
self.assertEqual(r.status_code, 404)
nats_cmd.reset_mock()
action2 = baker.make(
"logs.PendingAction",
agent=agent,
action_type="schedreboot",
details={
"time": "2021-01-13 18:20:00",
"taskname": "TacticalRMM_SchedReboot_wYzCCDVXlc",
},
)
data = {"pk": action2.pk} # type: ignore
nats_cmd.return_value = "error deleting sched task"
r = self.client.delete(url, data, format="json")
self.assertEqual(r.status_code, 400)
self.assertEqual(r.data, "error deleting sched task") # type: ignore
self.check_not_authenticated("delete", url)
def test_get_debug_log(self):
url = "/logs/debuglog/"
# create data
agent = baker.make_recipe("agents.agent")
baker.make(
"logs.DebugLog",
log_level=cycle(["error", "info", "warning", "critical"]),
log_type="agent_issues",
agent=agent,
_quantity=4,
)
logs = baker.make(
"logs.DebugLog",
log_type="system_issues",
log_level=cycle(["error", "info", "warning", "critical"]),
_quantity=15,
)
# test agent filter
data = {"agentFilter": agent.id}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.data), 4) # type: ignore
# test log type filter and agent
data = {"agentFilter": agent.id, "logLevelFilter": "warning"}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.data), 1) # type: ignore
# test time filter with other
data = {"logTypeFilter": "system_issues", "logLevelFilter": "error"}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.data), 4) # type: ignore
self.check_not_authenticated("patch", url)
class TestLogTasks(TacticalTestCase):
def test_prune_debug_log(self):
from .models import DebugLog
from .tasks import prune_debug_log
# setup data
debug_log = baker.make(
"logs.DebugLog",
_quantity=50,
)
days = 0
for item in debug_log: # type:ignore
item.entry_time = djangotime.now() - djangotime.timedelta(days=days)
item.save()
days = days + 5
# delete AgentHistory older than 30 days
prune_debug_log(30)
self.assertEqual(DebugLog.objects.count(), 6)
def test_prune_audit_log(self):
from .models import AuditLog
from .tasks import prune_audit_log
# setup data
audit_log = baker.make(
"logs.AuditLog",
_quantity=50,
)
days = 0
for item in audit_log: # type:ignore
item.entry_time = djangotime.now() - djangotime.timedelta(days=days)
item.save()
days = days + 5
# delete AgentHistory older than 30 days
prune_audit_log(30)
self.assertEqual(AuditLog.objects.count(), 6)
|
scaffoldgraph/vis/base.py
|
UCLCheminformatics/ScaffoldGraph
| 121 |
92361
|
<filename>scaffoldgraph/vis/base.py
"""
scaffoldgraph.vis.base
"""
import networkx as nx
from abc import ABC
from scaffoldgraph.core import ScaffoldGraph
from scaffoldgraph.utils import canonize_smiles
from .utils import remove_node_mol_images
class Visualizer(ABC):
"""Base class for ScaffoldGraph visualizers.
A Visualizer contains functions for creating visualizations
of ScaffoldGraphs.
See Also
--------
scaffoldgraph.vis.notebook.cytoscape.CytoscapeVisualizer
"""
def __init__(self, graph, requires_tree=False, refresh_images=False):
"""Initialize the visualizer.
Parameters
----------
graph : ScaffoldGraph
ScaffoldGraph to visualize
requires_tree : bool, optional
Whether the visualizer requires a tree
structure to create a visualization.
refresh_images: bool, optional
If True remove all embeded images from the
input graph and regenerate when required.
The default is False.
"""
self._requires_tree = requires_tree
self._refresh = refresh_images
self._graph = self._validate_graph(graph)
@property
def graph(self):
"""ScaffoldGraph: return the graph associated with the visualizer."""
return self._graph
@graph.setter
def graph(self, graph):
self._graph = self._validate_graph(graph)
def _validate_graph(self, graph):
"""Private: Validate a graph is suitable for visualizer."""
if not issubclass(type(graph), ScaffoldGraph):
raise ValueError(
f'{graph} must be a subclass of ScaffoldGraph'
)
if self._requires_tree:
if not nx.is_tree(graph) or nx.is_forest(graph):
msg = '{} requires a tree/forest structured graph'
msg.format(self.__class__.__name__)
raise ValueError(msg)
if self._refresh is True:
remove_node_mol_images(graph)
return graph
def _subgraph_from_mol(self, molecule):
"""Private: Select a subgraph starting at a molecule node.
Parameters
----------
molecule : str
Molecule node identifier.
Returns
-------
subgraph : ScaffoldGraph
A subgraph starting at `molecule`.
"""
G = self._graph
if not G.molecule_in_graph(molecule):
raise ValueError(f'molecule: {molecule} not in graph {G}')
scaffolds = G.get_scaffolds_for_molecule(molecule)
subgraph = G.subgraph([molecule] + scaffolds)
return subgraph
def _subgraph_from_scf(self, scaffold, traversal):
"""Private: Select a subgraph starting at a scaffold node.
Parameters
----------
scaffold : str
Scaffold node identifier.
traversal : str {'parent', 'child', 'bidirectional'}
The direction of traversal to create the subgraph.
If 'bidirectional' both directions are considered.
Returns
-------
subgraph : ScaffoldGraph
A subgraph starting at `scaffold`.
"""
G = self._graph
query = canonize_smiles(scaffold)
if not G.scaffold_in_graph(query):
raise ValueError(f'scaffold: {query} not in graph {G}')
if traversal == 'parent':
nodes = G.get_parent_scaffolds(query)
elif traversal == 'child':
nodes = list(nx.descendants(G, query))
elif traversal == 'bidirectional':
nodes = G.get_parent_scaffolds(query)
nodes += list(nx.descendants(G, query))
else:
msg = 'traversal must be one of {child, parent, bidirectional}'
raise ValueError(msg)
subgraph = G.subgraph([query] + nodes)
return subgraph
def __repr__(self):
return '<{_cls} at {address}>'.format(
_cls=self.__class__.__name__,
address=hex(id(self))
)
|
proximal/utils/timings_log.py
|
kyleaj/ProxImaL
| 101 |
92406
|
import timeit
class TimingsEntry(object):
"""A log of the runtime for an operation.
"""
def __init__(self, op):
self.op = op
self.evals = 0
self.total_time = 0
self.lastticstamp = None
@property
def avg_time(self):
if self.evals == 0:
return 0
else:
return self.total_time / self.evals
def record_timing(self, elapsed):
"""Updates the log with the new time.
"""
self.evals += 1
self.total_time += elapsed
def tic(self):
""" Default timer
Example: t = tic()
... code
elapsed = toc(t)
print( '{0}: {1:.4f}ms'.format(message, elapsed) )
"""
t = timeit.default_timer()
self.lastticstamp = t
return t
def toc(self):
""" See tic f
"""
# Last tic
if self.lastticstamp is None:
raise Exception('Error: Call to toc did never call tic before.')
else:
t = self.lastticstamp
# Measure time in ms
elapsed = (timeit.default_timer() - t) * 1000.0 # in ms
# Update recrod.
self.record_timing(elapsed)
self.lastticstamp = None
return elapsed
def __str__(self):
return "op = %s, evals = %s, total_time (ms) = %s, avg_time (ms) = %s" % (
self.op, self.evals, self.total_time, self.avg_time)
class TimingsLog(object):
"""A log of the runtime for a set of operations.
"""
def __init__(self, ops):
self.ops = ops
self.data = {}
for op in self.ops:
self.data[op] = TimingsEntry(op)
def __getitem__(self, item):
return self.data[item]
def __str__(self):
logs = []
for op in self.ops:
if self[op].evals > 0:
logs += [str(self.data[op])]
return '\n'.join(logs)
|
PaddleKG/CoKE/bin/pathquery_data_preprocess.py
|
shippingwang/models
| 1,319 |
92435
|
<reponame>shippingwang/models
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
data preprocess for pathquery datasets
"""
import os
import sys
import time
import logging
import argparse
from kbc_data_preprocess import write_vocab
from kbc_data_preprocess import load_vocab
from kbc_data_preprocess import generate_mask_type
from collections import defaultdict, Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
inverted = lambda r: r[:2] == '**'
invert = lambda r: r[2:] if inverted(r) else '**' + r
class EvalDataset(object):
def __init__(self, train_file, test_file):
self.spo_train_fp = train_file
self.spo_test_fp = test_file
train_triples = self._load_spo_triples(self.spo_train_fp)
test_triples = self._load_spo_triples(self.spo_test_fp)
#logger.debug(">>train triples cnt:%d" % len(train_triples))
#logger.debug(">>test triples cnt:%d" % len(test_triples))
_train_cnt = len(train_triples)
all_triples = train_triples
all_triples.update(test_triples)
self.full_graph = Graph(all_triples)
logger.debug(self.full_graph)
def _load_spo_triples(self, spo_path):
"""
:param spo_path:
:return: set of (s,r,t) original triples
"""
logger.debug(">> Begin load base spo for %s at %s" %
(spo_path, time.ctime()))
triples = set()
for line in open(spo_path):
segs = line.strip().split("\t")
assert len(segs) == 3
s, p, o = segs
triples.add((s, p, o))
logger.debug(">> Loaded spo triples :%s cnt:%d" %
(spo_path, len(triples)))
logger.debug(">> End load spo for %s at %s" % (spo_path, time.ctime()))
return triples
class Graph(object):
def __init__(self, triples):
self.triples = triples
neighbors = defaultdict(lambda: defaultdict(set))
relation_args = defaultdict(lambda: defaultdict(set))
logger.info(">> Begin building graph at %s" % (time.ctime()))
self._node_set = set()
for s, r, t in triples:
relation_args[r]['s'].add(s)
relation_args[r]['t'].add(t)
neighbors[s][r].add(t)
neighbors[t][invert(r)].add(s)
self._node_set.add(t)
self._node_set.add(s)
def freeze(d):
frozen = {}
for key, subdict in d.iteritems():
frozen[key] = {}
for subkey, set_val in subdict.iteritems():
frozen[key][subkey] = tuple(set_val)
return frozen
self.neighbors = freeze(neighbors)
self.relation_args = freeze(relation_args)
logger.info(">> Done building graph at %s" % (time.ctime()))
def __repr__(self):
s = ""
s += "graph.relations_args cnt %d\t" % len(self.relation_args)
s += "graph.neighbors cnt %d\t" % len(self.neighbors)
s += "graph.neighbors node set cnt %d" % len(self._node_set)
return s
def walk_all(self, start, path):
"""
walk from start and get all the paths
:param start: start entity
:param path: (r1, r2, ...,rk)
:return: entities set for candidates path
"""
set_s = set()
set_t = set()
set_s.add(start)
for _, r in enumerate(path):
if len(set_s) == 0:
return set()
for _s in set_s:
if _s in self.neighbors and r in self.neighbors[_s]:
_tset = set(self.neighbors[_s][r]) #tupe to set
set_t.update(_tset)
set_s = set_t.copy()
set_t.clear()
return set_s
def repr_walk_all_ret(self, start, path, MAX_T=20):
cand_set = self.walk_all(start, path)
if len(cand_set) == 0:
return ">>start{} path:{} end: EMPTY!".format(
start, "->".join(list(path)))
_len = len(cand_set) if len(cand_set) < MAX_T else MAX_T
cand_node_str = ", ".join(cand_set[:_len])
return ">>start{} path:{} end: {}".format(
start, "->".join(list(path)), cand_node_str)
def type_matching_entities(self, path, position="t"):
assert (position == "t")
if position == "t":
r = path[-1]
elif position == "s":
r = path[0]
else:
logger.error(">>UNKNOWN position at type_matching_entities")
raise ValueError(position)
try:
if not inverted(r):
return r, self.relation_args[r][position]
else:
inv_pos = 's' if position == "t" else "t"
return r, self.relation_args[invert(r)][inv_pos]
except KeyError:
logger.error(
">>UNKNOWN path value at type_matching_entities :%s from path:%s"
% (r, path))
return None, tuple()
def is_trival_query(self, start, path):
"""
:param path:
:return: Boolean if True/False, is all candidates are right answers, return True
"""
#todo: check right again
cand_set = self.type_matching_entities(path, "t")
ans_set = self.walk_all(start, path)
_set = cand_set - ans_set
if len(_set) == 0:
return True
else:
return False
def get_unique_entities_relations(train_file, dev_file, test_file):
entity_lst = dict()
relation_lst = dict()
all_files = [train_file, dev_file, test_file]
for input_file in all_files:
with open(input_file, "r") as f:
for line in f.readlines():
tokens = line.strip().split("\t")
assert len(tokens) == 3
entity_lst[tokens[0]] = len(entity_lst)
entity_lst[tokens[2]] = len(entity_lst)
relations = tokens[1].split(",")
for relation in relations:
relation_lst[relation] = len(relation_lst)
print(">> Number of unique entities: %s" % len(entity_lst))
print(">> Number of unique relations: %s" % len(relation_lst))
return entity_lst, relation_lst
def filter_base_data(raw_train_file, raw_dev_file, raw_test_file,
train_base_file, dev_base_file, test_base_file):
def fil_base(input_file, output_file):
fout = open(output_file, "w")
base_n = 0
with open(input_file, "r") as f:
for line in f.readlines():
tokens = line.strip().split("\t")
assert len(tokens) == 3
relations = tokens[1].split(",")
if len(relations) == 1:
fout.write(line)
base_n += 1
fout.close()
return base_n
train_base_n = fil_base(raw_train_file, train_base_file)
dev_base_n = fil_base(raw_dev_file, dev_base_file)
test_base_n = fil_base(raw_test_file, test_base_file)
print(">> Train base cnt:%d" % train_base_n)
print(">> Valid base cnt:%d" % dev_base_n)
print(">> Test base cnt:%d" % test_base_n)
def generate_onlytail_mask_type(input_file, output_file):
with open(output_file, "w") as fw:
with open(input_file, "r") as fr:
for line in fr.readlines():
fw.write(line.strip('\r \n') + "\tMASK_TAIL\n")
def generate_eval_files(vocab_path, raw_test_file, train_base_file,
dev_base_file, test_base_file, sen_candli_file,
trivial_sen_file):
token2id = load_vocab(vocab_path)
eval_data = EvalDataset(train_base_file, test_base_file)
fout_sen_cand = open(sen_candli_file, "w")
fout_q_trival = open(trivial_sen_file, "w")
sen_candli_cnt = trivial_sen_cnt = 0
j = 0
for line in open(raw_test_file):
line = line.strip()
j += 1
segs = line.split("\t")
s = segs[0]
t = segs[2]
path = tuple(segs[1].split(","))
q_set = eval_data.full_graph.walk_all(s, path)
r, cand_set = eval_data.full_graph.type_matching_entities(path, "t")
cand_set = set(cand_set)
neg_set = cand_set - q_set
sen_tokens = []
sen_tokens.append(line.split("\t")[0])
sen_tokens.extend(line.split("\t")[1].split(","))
sen_tokens.append(line.split("\t")[2])
sen_id = [str(token2id[x]) for x in sen_tokens]
if len(neg_set) == 0:
trivial_sen_cnt += 1
#fout_q_trival.write(line + "\n")
fout_q_trival.write(" ".join(sen_id) + "\n")
else:
sen_candli_cnt += 1
candli_id_set = [str(token2id[x]) for x in neg_set]
sen_canli_str = "%s\t%s" % (" ".join(sen_id),
" ".join(list(candli_id_set)))
fout_sen_cand.write(sen_canli_str + "\n")
if len(cand_set) < len(q_set):
logger.error("ERROR! cand_set %d < q_set %d at line[%d]:%s" %
(len(cand_set), len(q_set), j, line))
if j % 100 == 0:
logger.debug(" ...processing %d at %s" % (j, time.ctime()))
if -100 > 0 and j >= 100:
break
logger.info(">> sen_canli_set count:%d " % sen_candli_cnt)
logger.info(">> trivial sen count:%d " % trivial_sen_cnt)
logger.info(">> Finish generate evaluation candidates for %s file at %s" %
(raw_test_file, time.ctime()))
def pathquery_data_preprocess(raw_train_file, raw_dev_file, raw_test_file,
vocab_path, sen_candli_file, trivial_sen_file,
new_train_file, new_dev_file, new_test_file,
train_base_file, dev_base_file, test_base_file):
entity_lst, relation_lst = get_unique_entities_relations(
raw_train_file, raw_dev_file, raw_test_file)
write_vocab(vocab_path, entity_lst, relation_lst)
filter_base_data(raw_train_file, raw_dev_file, raw_test_file,
train_base_file, dev_base_file, test_base_file)
generate_mask_type(raw_train_file, new_train_file)
generate_onlytail_mask_type(raw_dev_file, new_dev_file)
generate_onlytail_mask_type(raw_test_file, new_test_file)
vocab = load_vocab(vocab_path)
generate_eval_files(vocab_path, raw_test_file, train_base_file,
dev_base_file, test_base_file, sen_candli_file,
trivial_sen_file)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--task",
type=str,
required=True,
default=None,
help="task name: fb15k, fb15k237, wn18rr, wn18, pathqueryFB, pathqueryWN"
)
parser.add_argument(
"--dir",
type=str,
required=True,
default=None,
help="task data directory")
parser.add_argument(
"--train",
type=str,
required=False,
default="train",
help="train file name, default train.txt")
parser.add_argument(
"--valid",
type=str,
required=False,
default="dev",
help="valid file name, default valid.txt")
parser.add_argument(
"--test",
type=str,
required=False,
default="test",
help="test file name, default test.txt")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = get_args()
task = args.task.lower()
assert task in ["pathqueryfb", "pathquerywn"]
raw_train_file = os.path.join(args.dir, args.train)
raw_dev_file = os.path.join(args.dir, args.valid)
raw_test_file = os.path.join(args.dir, args.test)
new_train_file = os.path.join(args.dir, "train.coke.txt")
new_test_file = os.path.join(args.dir, "test.coke.txt")
new_dev_file = os.path.join(args.dir, "dev.coke.txt")
vocab_file = os.path.join(args.dir, "vocab.txt")
sen_candli_file = os.path.join(args.dir, "sen_candli.txt")
trivial_sen_file = os.path.join(args.dir, "trivial_sen.txt")
train_base_file = os.path.join(args.dir, "train.base.txt")
test_base_file = os.path.join(args.dir, "test.base.txt")
dev_base_file = os.path.join(args.dir, "dev.base.txt")
pathquery_data_preprocess(raw_train_file, raw_dev_file, raw_test_file,
vocab_file, sen_candli_file, trivial_sen_file,
new_train_file, new_dev_file, new_test_file,
train_base_file, dev_base_file, test_base_file)
|
headless/examples/generate_traffic.py
|
aclk/abstreet
| 5,680 |
92498
|
#!/usr/bin/python3
# This example loads an exported JSON map, finds different buildings, and
# generates a simple travel demand model.
#
# 1) cargo run --bin dump_map data/system/us/seattle/maps/montlake.bin > montlake.json
# 2) ./headless/examples/generate_traffic.py --map=montlake.json --out=traffic.json
# 3) cargo run --bin import_traffic -- --map=data/system/us/seattle/maps/montlake.bin --input=traffic.json
# 4) Use data/system/us/seattle/scenarios/montlake/monday.bin in the game or from the API.
#
# Keep this script formatted with autopep8 -i
import argparse
import json
import random
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--map', type=str, required=True)
parser.add_argument('--out', type=str, required=True)
args = parser.parse_args()
# Load the map and find all buildings
residential_building_ids = []
commercial_building_ids = []
with open(args.map, encoding='utf8') as f:
map = json.load(f)
for b in map['buildings']:
# These categories are inferred from OpenStreetMap tags
if 'Residential' in b['bldg_type'] or 'ResidentialCommercial' in b['bldg_type']:
residential_building_ids.append(b['id'])
if 'Commercial' in b['bldg_type'] or 'ResidentialCommercial' in b['bldg_type']:
commercial_building_ids.append(b['id'])
# Randomly generate a few people who take just one trip
scenario = {
'scenario_name': 'monday',
'people': []
}
for _ in range(100):
src = random.choice(residential_building_ids)
dst = random.choice(commercial_building_ids)
scenario['people'].append({
'origin': {
'TripEndpoint': {
'Bldg': src,
}
},
'trips': [{
'departure': 1.0,
'destination': {
'TripEndpoint': {
'Bldg': dst,
}
},
'mode': 'Bike',
'purpose': 'Shopping'
}]
})
with open(args.out, 'w') as f:
f.write(json.dumps(scenario, indent=2))
if __name__ == '__main__':
main()
|
utils/lr_scheduler.py
|
happywu/simpledet-1
| 3,195 |
92514
|
<reponame>happywu/simpledet-1
import logging
from math import cos, pi
from mxnet.lr_scheduler import LRScheduler
class WarmupMultiFactorScheduler(LRScheduler):
def __init__(self, step, factor=1, warmup=False, warmup_type='constant', warmup_lr=0, warmup_step=0):
super().__init__()
assert isinstance(step, list)
for i, _step in enumerate(step):
if i != 0 and step[i] <= step[i-1]:
raise ValueError("Schedule step must be an increasing integer list")
if factor > 1.0:
raise ValueError("Factor must be no more than 1 to make lr reduce")
if warmup:
if warmup_step >= step[0]:
raise ValueError("Warmup step must be smaller than schedule step")
if warmup_type not in ['constant', 'gradual']:
raise ValueError("Warmup scheduler only support constant or gradual")
self.step = step
self.cur_step_ind = 0
self.factor = factor
self.count = 0
self.warmup = warmup
self.warmup_type = warmup_type
self.warmup_lr = warmup_lr
self.warmup_step = warmup_step
def __call__(self, num_update):
if self.warmup and num_update <= self.warmup_step:
if self.warmup_type == 'constant':
return self.warmup_lr
elif self.warmup_type == 'gradual':
return (self.base_lr - self.warmup_lr) / self.warmup_step * num_update + self.warmup_lr
while self.cur_step_ind <= len(self.step) - 1:
if num_update > self.step[self.cur_step_ind]:
self.count = self.step[self.cur_step_ind]
self.cur_step_ind += 1
self.base_lr *= self.factor
logging.info("Update[%d]: Change learning rate to %0.5e",
num_update, self.base_lr)
else:
return self.base_lr
return self.base_lr
# from gluoncv
class LRSequential(LRScheduler):
r"""Compose Learning Rate Schedulers
Parameters
----------
schedulers: list
list of LRScheduler objects
"""
def __init__(self, schedulers):
super().__init__()
assert(len(schedulers) > 0)
self.update_sep = []
self.count = 0
self.learning_rate = 0
self.schedulers = []
for lr in schedulers:
self.add(lr)
def add(self, scheduler):
assert(isinstance(scheduler, LRScheduler))
scheduler.offset = self.count
self.count += scheduler.niters
self.update_sep.append(self.count)
self.schedulers.append(scheduler)
def __call__(self, num_update):
self.update(num_update)
return self.learning_rate
def update(self, num_update):
num_update = min(num_update, self.count - 1)
ind = len(self.schedulers) - 1
for i, sep in enumerate(self.update_sep):
if sep > num_update:
ind = i
break
lr = self.schedulers[ind]
lr.update(num_update)
self.learning_rate = lr.learning_rate
# from gluoncv
class AdvancedLRScheduler(LRScheduler):
r"""Learning Rate Scheduler
Parameters
----------
mode : str
Modes for learning rate scheduler.
Currently it supports 'constant', 'step', 'linear', 'poly' and 'cosine'.
base_lr : float
Base learning rate, i.e. the starting learning rate.
target_lr : float
Target learning rate, i.e. the ending learning rate.
With constant mode target_lr is ignored.
niters : int
Number of iterations to be scheduled.
nepochs : int
Number of epochs to be scheduled.
iters_per_epoch : int
Number of iterations in each epoch.
offset : int
Number of iterations before this scheduler.
power : float
Power parameter of poly scheduler.
step_iter : list
A list of iterations to decay the learning rate.
step_epoch : list
A list of epochs to decay the learning rate.
step_factor : float
Learning rate decay factor.
"""
def __init__(self, mode, base_lr=0.1, target_lr=0,
niters=0, nepochs=0, iters_per_epoch=0, offset=0,
power=2, step_iter=None, step_epoch=None, step_factor=0.1,
baselr=None, targetlr=None):
super().__init__()
assert(mode in ['constant', 'step', 'linear', 'poly', 'cosine'])
self.mode = mode
if mode == 'step':
assert(step_iter is not None or step_epoch is not None)
if baselr is not None:
warnings.warn("baselr is deprecated. Please use base_lr.")
if base_lr == 0.1:
base_lr = baselr
self.base_lr = base_lr
if targetlr is not None:
warnings.warn("targetlr is deprecated. Please use target_lr.")
if target_lr == 0:
target_lr = targetlr
self.target_lr = target_lr
if self.mode == 'constant':
self.target_lr = self.base_lr
self.niters = niters
self.step = step_iter
epoch_iters = nepochs * iters_per_epoch
if epoch_iters > 0:
self.niters = epoch_iters
if step_epoch is not None:
self.step = [s*iters_per_epoch for s in step_epoch]
self.offset = offset
self.power = power
self.step_factor = step_factor
def __call__(self, num_update):
self.update(num_update)
return self.learning_rate
def update(self, num_update):
N = self.niters - 1
T = num_update - self.offset
T = min(max(0, T), N)
if self.mode == 'constant':
factor = 0
elif self.mode == 'linear':
factor = 1 - T / N
elif self.mode == 'poly':
factor = pow(1 - T / N, self.power)
elif self.mode == 'cosine':
factor = (1 + cos(pi * T / N)) / 2
elif self.mode == 'step':
if self.step is not None:
count = sum([1 for s in self.step if s <= T])
factor = pow(self.step_factor, count)
else:
factor = 1
else:
raise NotImplementedError
if self.mode == 'step':
self.learning_rate = self.base_lr * factor
else:
self.learning_rate = self.target_lr + (self.base_lr - self.target_lr) * factor
|
alipay/aop/api/domain/InsurancePeriod.py
|
antopen/alipay-sdk-python-all
| 213 |
92554
|
<reponame>antopen/alipay-sdk-python-all<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class InsurancePeriod(object):
def __init__(self):
self._period = None
self._period_unit = None
@property
def period(self):
return self._period
@period.setter
def period(self, value):
self._period = value
@property
def period_unit(self):
return self._period_unit
@period_unit.setter
def period_unit(self, value):
self._period_unit = value
def to_alipay_dict(self):
params = dict()
if self.period:
if hasattr(self.period, 'to_alipay_dict'):
params['period'] = self.period.to_alipay_dict()
else:
params['period'] = self.period
if self.period_unit:
if hasattr(self.period_unit, 'to_alipay_dict'):
params['period_unit'] = self.period_unit.to_alipay_dict()
else:
params['period_unit'] = self.period_unit
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = InsurancePeriod()
if 'period' in d:
o.period = d['period']
if 'period_unit' in d:
o.period_unit = d['period_unit']
return o
|
utils/swift_build_support/swift_build_support/build_graph.py
|
gandhi56/swift
| 72,551 |
92608
|
# swift_build_support/build_graph.py ----------------------------*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
#
# This is a simple implementation of an acyclic build graph. We require no
# cycles, so we just perform a reverse post order traversal to get a topological
# ordering. We check during the reverse post order traversal that we do not
# visit any node multiple times.
#
# Nodes are assumed to be a product's class.
#
# ----------------------------------------------------------------------------
def _get_po_ordered_nodes(root, invertedDepMap):
# Then setup our worklist/visited node set.
worklist = [root]
visitedNodes = set([])
# TODO: Can we unify po_ordered_nodes and visitedNodes in some way?
po_ordered_nodes = []
# Until we no longer have nodes to visit...
while not len(worklist) == 0:
# First grab the last element of the worklist. If we have already
# visited this node, just pop it and skip it.
#
# DISCUSSION: Consider the following build graph:
#
# A -> [C, B]
# B -> [C]
#
# In this case, we will most likely get the following worklist
# before actually processing anything:
#
# A, C, B, C
#
# In this case, we want to ignore the initial C pushed onto the
# worklist by visiting A since we will have visited C already due to
# the edge from B -> C.
node = worklist[-1]
if node in visitedNodes:
worklist.pop()
continue
# Then grab the dependents of our node.
deps = invertedDepMap.get(node, set([]))
assert(isinstance(deps, set))
# Then visit those and see if we have not visited any of them. Push
# any such nodes onto the worklist and continue. If we have already
# visited all of our dependents, then we can actually process this
# node.
foundDep = False
for d in deps:
if d not in visitedNodes:
foundDep = True
worklist.append(d)
if foundDep:
continue
# Now process the node by popping it off the worklist, adding it to
# the visited nodes set, and append it to the po_ordered_nodes in
# its final position.
worklist.pop()
visitedNodes.add(node)
po_ordered_nodes.append(node)
return po_ordered_nodes
class BuildDAG(object):
def __init__(self):
self.root = None
# A map from a node to a list of nodes that depend on the given node.
#
# NOTE: This is an inverted dependency map implying that the root will
# be a "final element" of the graph.
self.invertedDepMap = {}
def add_edge(self, pred, succ):
self.invertedDepMap.setdefault(pred, set([succ])) \
.add(succ)
def set_root(self, root):
# Assert that we always only have one root.
assert(self.root is None)
self.root = root
def produce_schedule(self):
# Grab the root and make sure it is not None
root = self.root
assert(root is not None)
# Then perform a post order traversal from root using our inverted
# dependency map to compute a list of our nodes in post order.
#
# NOTE: The index of each node in this list is the post order number of
# the node.
po_ordered_nodes = _get_po_ordered_nodes(root, self.invertedDepMap)
# Ok, we have our post order list. We want to provide our user a reverse
# post order, so we take our array and construct a dictionary of an
# enumeration of the list. This will give us a dictionary mapping our
# product names to their reverse post order number.
rpo_ordered_nodes = list(reversed(po_ordered_nodes))
node_to_rpot_map = dict((y, x) for x, y in enumerate(rpo_ordered_nodes))
# Now before we return our rpo_ordered_nodes and our node_to_rpot_map, lets
# verify that we didn't find any cycles. We can do this by traversing
# our dependency graph in reverse post order and making sure all
# dependencies of each node we visit has a later reverse post order
# number than the node we are checking.
for n, node in enumerate(rpo_ordered_nodes):
for dep in self.invertedDepMap.get(node, []):
if node_to_rpot_map[dep] < n:
print('n: {}. node: {}.'.format(n, node))
print('dep: {}.'.format(dep))
print('inverted dependency map: {}'.format(self.invertedDepMap))
print('rpo ordered nodes: {}'.format(rpo_ordered_nodes))
print('rpo node to rpo number map: {}'.format(node_to_rpot_map))
raise RuntimeError('Found cycle in build graph!')
return (rpo_ordered_nodes, node_to_rpot_map)
def produce_scheduled_build(input_product_classes):
"""For a given a subset input_input_product_classes of
all_input_product_classes, compute a topological ordering of the
input_input_product_classes + topological closures that respects the
dependency graph.
"""
dag = BuildDAG()
worklist = list(input_product_classes)
visited = set(input_product_classes)
# Construct the DAG.
while len(worklist) > 0:
entry = worklist.pop()
deps = entry.get_dependencies()
if len(deps) == 0:
dag.set_root(entry)
for d in deps:
dag.add_edge(d, entry)
if d not in visited:
worklist.append(d)
visited = visited.union(deps)
# Then produce the schedule.
schedule = dag.produce_schedule()
# Finally check that all of our input_product_classes are in the schedule.
if len(set(input_product_classes) - set(schedule[0])) != 0:
raise RuntimeError('Found disconnected graph?!')
return schedule
|
wagtail/core/tests/test_comments.py
|
brownaa/wagtail
| 8,851 |
92627
|
<reponame>brownaa/wagtail
from django.contrib.auth import get_user_model
from django.test import TestCase
from wagtail.core.models import Comment, Page
class CommentTestingUtils:
def setUp(self):
self.page = Page.objects.get(title="Welcome to the Wagtail test site!")
self.revision_1 = self.page.save_revision()
self.revision_2 = self.page.save_revision()
def create_comment(self, revision_created):
return Comment.objects.create(
page=self.page,
user=get_user_model().objects.first(),
text='test',
contentpath='title',
revision_created=revision_created,
)
class TestRevisionDeletion(CommentTestingUtils, TestCase):
fixtures = ['test.json']
def setUp(self):
super().setUp()
self.revision_3 = self.page.save_revision()
self.old_comment = self.create_comment(self.revision_1)
self.new_comment = self.create_comment(self.revision_3)
def test_deleting_old_revision_moves_comment_revision_created_forwards(self):
# test that when a revision is deleted, a comment linked to it via revision_created has its revision_created moved
# to the next revision
self.revision_1.delete()
self.old_comment.refresh_from_db()
self.assertEqual(self.old_comment.revision_created, self.revision_2)
def test_deleting_most_recent_revision_deletes_created_comments(self):
# test that when the most recent revision is deleted, any comments created on it are also deleted
self.revision_3.delete()
with self.assertRaises(Comment.DoesNotExist):
self.new_comment.refresh_from_db()
|
examples/integrations/sklearn/svm.py
|
anukaal/opytimizer
| 528 |
92653
|
<gh_stars>100-1000
import numpy as np
from sklearn import svm
from sklearn.datasets import load_digits
from sklearn.model_selection import KFold, cross_val_score
from opytimizer import Opytimizer
from opytimizer.core import Function
from opytimizer.optimizers.swarm import PSO
from opytimizer.spaces import SearchSpace
# Loads digits dataset
digits = load_digits()
# Gathers samples and targets
X = digits.data
Y = digits.target
def _svm(opytimizer):
# Gathers params
C = opytimizer[0][0]
# Instanciating an SVC class
svc = svm.SVC(C=C, kernel='linear')
# Creates a cross-validation holder
k_fold = KFold(n_splits=5)
# Fitting model using cross-validation
scores = cross_val_score(svc, X, Y, cv=k_fold, n_jobs=-1)
# Calculates scores mean
mean_score = np.mean(scores)
return 1 - mean_score
# Number of agents and decision variables
n_agents = 10
n_variables = 1
# Lower and upper bounds (has to be the same size as `n_variables`)
lower_bound = [0.000001]
upper_bound = [10]
# Creates the space, optimizer and function
space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound)
optimizer = PSO()
function = Function(_svm)
# Bundles every piece into Opytimizer class
opt = Opytimizer(space, optimizer, function)
# Runs the optimization task
opt.start(n_iterations=100)
|
third_party/py/gflags/gflags/flags_unicode_literals_test.py
|
sevki/bazel
| 218 |
92656
|
#!/usr/bin/env python
"""Test the use of flags when from __future__ import unicode_literals is on."""
from __future__ import unicode_literals
import unittest
import gflags
gflags.DEFINE_string('seen_in_crittenden', 'alleged mountain lion',
'This tests if unicode input to these functions works.')
class FlagsUnicodeLiteralsTest(unittest.TestCase):
def testUnicodeFlagNameAndValueAreGood(self):
alleged_mountain_lion = gflags.FLAGS.seen_in_crittenden
self.assertTrue(
isinstance(alleged_mountain_lion, type(u'')),
msg='expected flag value to be a {} not {}'.format(
type(u''), type(alleged_mountain_lion)))
self.assertEqual(alleged_mountain_lion, u'alleged mountain lion')
if __name__ == '__main__':
unittest.main()
|
app/src/thirdparty/telemetry/internal/platform/tracing_agent/chrome_tracing_agent_unittest.py
|
ta2edchimp/big-rig
| 925 |
92660
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.internal.platform.tracing_agent import chrome_tracing_agent
class FakePlatformBackend(object):
pass
class FakeDevtoolsClient(object):
def __init__(self, remote_port):
self.is_alive = True
self.tracing_started = False
self.remote_port = remote_port
self.will_raise_exception_in_stop_tracing = False
def IsAlive(self):
return self.is_alive
def StartChromeTracing(self, _trace_options, _filter_string, _timeout=10):
self.tracing_started = True
def StopChromeTracing(self, _trace_data_builder):
self.tracing_started = False
if self.will_raise_exception_in_stop_tracing:
raise Exception
def IsChromeTracingSupported(self):
return True
class FakeTraceOptions(object):
def __init__(self):
self.enable_chrome_trace = True
class FakeCategoryFilter(object):
def __init__(self):
self.filter_string = 'foo'
class ChromeTracingAgentUnittest(unittest.TestCase):
def setUp(self):
self.platform1 = FakePlatformBackend()
self.platform2 = FakePlatformBackend()
self.platform3 = FakePlatformBackend()
def StartTracing(self, platform_backend, enable_chrome_trace=True):
assert chrome_tracing_agent.ChromeTracingAgent.IsSupported(platform_backend)
agent = chrome_tracing_agent.ChromeTracingAgent(platform_backend)
trace_options = FakeTraceOptions()
trace_options.enable_chrome_trace = enable_chrome_trace
agent.Start(trace_options, FakeCategoryFilter(), 10)
return agent
def StopTracing(self, tracing_agent):
tracing_agent.Stop(None)
def testRegisterDevtoolsClient(self):
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
FakeDevtoolsClient(1), self.platform1)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
FakeDevtoolsClient(2), self.platform1)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
FakeDevtoolsClient(3), self.platform1)
tracing_agent_of_platform1 = self.StartTracing(self.platform1)
with self.assertRaises(chrome_tracing_agent.ChromeTracingStartedError):
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
FakeDevtoolsClient(4), self.platform1)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
FakeDevtoolsClient(5), self.platform2)
self.StopTracing(tracing_agent_of_platform1)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
FakeDevtoolsClient(6), self.platform1)
def testIsSupport(self):
self.assertFalse(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform1))
self.assertFalse(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform2))
self.assertFalse(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform3))
devtool1 = FakeDevtoolsClient(1)
devtool2 = FakeDevtoolsClient(2)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
devtool1, self.platform1)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
devtool2, self.platform2)
devtool2.is_alive = False
# Chrome tracing is only supported on platform 1 since only platform 1 has
# an alive devtool.
self.assertTrue(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform1))
self.assertFalse(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform2))
self.assertFalse(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform3))
def testStartAndStopTracing(self):
devtool1 = FakeDevtoolsClient(1)
devtool2 = FakeDevtoolsClient(2)
devtool3 = FakeDevtoolsClient(3)
devtool4 = FakeDevtoolsClient(2)
# Register devtools 1, 2, 3 on platform1 and devtool 4 on platform 2
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
devtool1, self.platform1)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
devtool2, self.platform1)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
devtool3, self.platform1)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
devtool4, self.platform2)
devtool2.is_alive = False
tracing_agent1 = self.StartTracing(self.platform1)
with self.assertRaises(chrome_tracing_agent.ChromeTracingStartedError):
self.StartTracing(self.platform1)
self.assertTrue(devtool1.tracing_started)
self.assertFalse(devtool2.tracing_started)
self.assertTrue(devtool3.tracing_started)
# Devtool 4 shouldn't have tracing started although it has the same remote
# port as devtool 2
self.assertFalse(devtool4.tracing_started)
self.StopTracing(tracing_agent1)
self.assertFalse(devtool1.tracing_started)
self.assertFalse(devtool2.tracing_started)
self.assertFalse(devtool3.tracing_started)
self.assertFalse(devtool4.tracing_started)
# Test that it should be ok to start & stop tracing on platform1 again.
tracing_agent1 = self.StartTracing(self.platform1)
self.StopTracing(tracing_agent1)
tracing_agent2 = self.StartTracing(self.platform2)
self.assertTrue(devtool4.tracing_started)
self.StopTracing(tracing_agent2)
self.assertFalse(devtool4.tracing_started)
def testExceptionRaisedInStopTracing(self):
devtool1 = FakeDevtoolsClient(1)
devtool2 = FakeDevtoolsClient(2)
# Register devtools 1, 2 on platform 1
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
devtool1, self.platform1)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
devtool2, self.platform1)
tracing_agent1 = self.StartTracing(self.platform1)
self.assertTrue(devtool1.tracing_started)
self.assertTrue(devtool2.tracing_started)
devtool2.will_raise_exception_in_stop_tracing = True
with self.assertRaises(chrome_tracing_agent.ChromeTracingStoppedError):
self.StopTracing(tracing_agent1)
devtool1.is_alive = False
devtool2.is_alive = False
# Register devtools 3 on platform 1 should not raise any exception.
devtool3 = FakeDevtoolsClient(3)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
devtool3, self.platform1)
# Start & Stop tracing on platform 1 should work just fine.
tracing_agent2 = self.StartTracing(self.platform1)
self.StopTracing(tracing_agent2)
|
codalab/apps/queues/migrations/0001_initial.py
|
AIMultimediaLab/AI4Media-EaaS-prototype-Py2-public
| 333 |
92661
|
<reponame>AIMultimediaLab/AI4Media-EaaS-prototype-Py2-public
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = (
("authenz", "0002_auto__add_cluser"),
)
def forwards(self, orm):
# Adding model 'Queue'
db.create_table(u'queues_queue', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('vhost', self.gf('django.db.models.fields.CharField')(unique=True, max_length=36, blank=True)),
('is_public', self.gf('django.db.models.fields.BooleanField')(default=False)),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['authenz.ClUser'])),
))
db.send_create_signal(u'queues', ['Queue'])
# Adding M2M table for field organizers on 'Queue'
m2m_table_name = db.shorten_name(u'queues_queue_organizers')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('queue', models.ForeignKey(orm[u'queues.queue'], null=False)),
('cluser', models.ForeignKey(orm[u'authenz.cluser'], null=False))
))
db.create_unique(m2m_table_name, ['queue_id', 'cluser_id'])
def backwards(self, orm):
# Deleting model 'Queue'
db.delete_table(u'queues_queue')
# Removing M2M table for field organizers on 'Queue'
db.delete_table(db.shorten_name(u'queues_queue_organizers'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'authenz.cluser': {
'Meta': {'object_name': 'ClUser'},
'bibtex': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'contact_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_on_submission_finished_successfully': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'method_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'method_name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'organization_or_affiliation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'organizer_direct_message_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizer_status_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'participation_status_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'password': ('<PASSWORD>', [], {'max_length': '128'}),
'project_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'publication_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'team_members': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'team_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'queues.queue': {
'Meta': {'object_name': 'Queue'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organizers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'organizers'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['authenz.ClUser']"}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authenz.ClUser']"}),
'vhost': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36', 'blank': 'True'})
}
}
complete_apps = ['queues']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.