max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
Zhang2017MultiStyle/test.py
|
czczup/URST
| 119 |
104583
|
<filename>Zhang2017MultiStyle/test.py
import torch
import utils
import argparse
import numpy as np
from model import Net
from PIL import Image
from torch.autograd import Variable
import torchvision.transforms as transforms
import torch.nn.functional as F
from tqdm import tqdm
import math
import time
import os
import sys
sys.path.append("..")
from tools import unpadding, preprocess
from thumb_instance_norm import init_thumbnail_instance_norm
def test_transform(size, crop):
transform_list = []
if size != 0:
transform_list.append(transforms.Resize(size))
if crop:
transform_list.append(transforms.CenterCrop(size))
transform_list.append(transforms.ToTensor())
transform = transforms.Compose(transform_list)
return transform
def set_style_target(style, model):
style = utils.tensor_rgb2bgr(style)
style = style.unsqueeze(0)
init_thumbnail_instance_norm(model, collection=True)
model.setTarget(style)
def save_image(image, save_path):
image = image.add_(0.5).clamp_(0, 255).squeeze(0)
image = utils.tensor_bgr2rgb(image)
image = image.permute(1, 2, 0).to(torch.uint8).cpu().numpy()
image = Image.fromarray(image)
image.save(save_path)
def style_transfer_thumbnail(thumb, model, save_path, save=True):
thumb = thumb.unsqueeze(0)
thumb = utils.preprocess_batch(thumb)
init_thumbnail_instance_norm(model, collection=True)
stylized_thumb = model.forward(thumb)
if save:
save_image(stylized_thumb, save_path)
def style_transfer_high_resolution(patches, model, padding, save_path,
collection=False, save=True):
stylized_patches = []
init_thumbnail_instance_norm(model, collection=collection)
for patch in tqdm(patches):
patch = utils.tensor_rgb2bgr(patch).unsqueeze(0).to(device)
stylized_patch = model.forward(patch)
stylized_patch = F.interpolate(stylized_patch, patch.shape[2:], mode='bilinear', align_corners=True)
stylized_patch = unpadding(stylized_patch, padding=padding)
stylized_patches.append(stylized_patch.cpu())
stylized_patches = torch.cat(stylized_patches, dim=0)
b, c, h, w = stylized_patches.shape
stylized_patches = stylized_patches.unsqueeze(dim=0)
stylized_patches = stylized_patches.view(1, b, c * h * w).permute(0, 2, 1).contiguous()
output_size = (int(math.sqrt(b) * h), int(math.sqrt(b) * w))
stylized_image = F.fold(stylized_patches, output_size=output_size,
kernel_size=(h, w), stride=(h, w))
if save:
save_image(stylized_image, save_path)
def load_model(model_path):
style_model = Net(ngf=128)
model_dict = torch.load(model_path)
model_dict_clone = model_dict.copy()
for key, value in model_dict_clone.items():
if key.endswith(('running_mean', 'running_var')):
del model_dict[key]
style_model.load_state_dict(model_dict, False)
style_model = style_model.to(device)
return style_model
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--content", type=str, required=True, help="path to content image")
parser.add_argument("--style", type=str, required=True, help="path to style image")
parser.add_argument("--model", type=str, default="models/21styles.model",
help="path to checkpoint model")
parser.add_argument('--patch_size', type=int, default=1000, help='patch size')
parser.add_argument('--thumb_size', type=int, default=1024, help='thumbnail size')
parser.add_argument('--style_size', type=int, default=1024, help='style size')
parser.add_argument('--padding', type=int, default=32, help='padding size')
parser.add_argument('--test_speed', action="store_true", help='test the speed')
parser.add_argument('--outf', type=str, default="images/outputs", help='path to save')
parser.add_argument('--URST', action="store_true", help='use URST framework')
parser.add_argument("--device", type=str, default="cuda", help="device")
args = parser.parse_args()
print(args)
device = torch.device(args.device)
os.makedirs(args.outf, exist_ok=True)
PATCH_SIZE = args.patch_size
PADDING = args.padding
content_tf = test_transform(0, False)
style_tf = test_transform(args.style_size, True)
model = load_model(model_path=args.model)
model.eval()
repeat = 15 if args.test_speed else 1
time_list = []
for i in range(repeat):
# Prepare input
image = Image.open(args.content)
IMAGE_WIDTH, IMAGE_HEIGHT = image.size
style = Image.open(args.style)
torch.cuda.synchronize()
start_time = time.time()
style = np.array(style).transpose(2, 0, 1)
style = torch.from_numpy(style).float().to(device)
if args.URST:
aspect_ratio = IMAGE_WIDTH / IMAGE_HEIGHT
thumbnail = image.resize((int(aspect_ratio * args.thumb_size), args.thumb_size))
patches = preprocess(image, padding=PADDING, patch_size=PATCH_SIZE,
transform=content_tf, cuda=False).mul_(255.0)
thumbnail = np.array(thumbnail).transpose(2, 0, 1)
thumbnail = torch.from_numpy(thumbnail).float().to(device)
print("patch:", patches.shape)
print("thumb:", thumbnail.shape)
print("style:", style.shape)
with torch.no_grad():
set_style_target(style=style, model=model)
style_transfer_thumbnail(thumbnail, model=model,
save_path=os.path.join(args.outf, "thumb-%d.jpg" % args.thumb_size),
save=False if args.test_speed else True)
style_transfer_high_resolution(
patches, model, padding=PADDING, collection=False,
save_path=os.path.join(args.outf, "ours-patch%d-padding%d.jpg" % (PATCH_SIZE, PADDING)),
save=False if args.test_speed else True
)
# style_transfer_high_resolution(patches, model, padding=PADDING, collection=True,
# save_path=os.path.join(args.outf, "baseline-width%d-padding%d.jpg" % (PATCH_SIZE, PADDING))
# )
else:
image = np.array(image).transpose(2, 0, 1)
image = torch.from_numpy(image).float().to(device)
print("image:", image.shape)
print("style:", style.shape)
with torch.no_grad():
set_style_target(style=style, model=model)
style_transfer_thumbnail(image, model=model,
save_path=os.path.join(args.outf, "original_result.jpg"),
save=False if args.test_speed else True)
torch.cuda.synchronize()
time_list.append(time.time() - start_time)
print("time: %.2fs" % np.mean(time_list[-10:]))
# print("Max GPU memory allocated: %.4f GB" % (torch.cuda.max_memory_allocated(device=0) / 1024. / 1024. / 1024.))
# print("Total memory of the current GPU: %.4f GB" % (torch.cuda.get_device_properties(device=0).total_memory / 1024. / 1024 / 1024))
|
examples/pytorch/bgnn/run.py
|
ketyi/dgl
| 9,516 |
104605
|
<reponame>ketyi/dgl
import torch
from BGNN import BGNNPredictor
import pandas as pd
import numpy as np
import json
import os
from dgl.data.utils import load_graphs
from dgl.nn.pytorch import GATConv as GATConvDGL, GraphConv, ChebConv as ChebConvDGL, \
AGNNConv as AGNNConvDGL, APPNPConv
from torch.nn import Dropout, ELU, Sequential, Linear, ReLU
import torch.nn.functional as F
from category_encoders import CatBoostEncoder
from sklearn import preprocessing
class GNNModelDGL(torch.nn.Module):
def __init__(self, in_dim, hidden_dim, out_dim,
dropout=0., name='gat', residual=True, use_mlp=False, join_with_mlp=False):
super(GNNModelDGL, self).__init__()
self.name = name
self.use_mlp = use_mlp
self.join_with_mlp = join_with_mlp
self.normalize_input_columns = True
if name == 'gat':
self.l1 = GATConvDGL(in_dim, hidden_dim//8, 8, feat_drop=dropout, attn_drop=dropout, residual=False,
activation=F.elu)
self.l2 = GATConvDGL(hidden_dim, out_dim, 1, feat_drop=dropout, attn_drop=dropout, residual=residual, activation=None)
elif name == 'gcn':
self.l1 = GraphConv(in_dim, hidden_dim, activation=F.elu)
self.l2 = GraphConv(hidden_dim, out_dim, activation=F.elu)
self.drop = Dropout(p=dropout)
elif name == 'cheb':
self.l1 = ChebConvDGL(in_dim, hidden_dim, k = 3)
self.l2 = ChebConvDGL(hidden_dim, out_dim, k = 3)
self.drop = Dropout(p=dropout)
elif name == 'agnn':
self.lin1 = Sequential(Dropout(p=dropout), Linear(in_dim, hidden_dim), ELU())
self.l1 = AGNNConvDGL(learn_beta=False)
self.l2 = AGNNConvDGL(learn_beta=True)
self.lin2 = Sequential(Dropout(p=dropout), Linear(hidden_dim, out_dim), ELU())
elif name == 'appnp':
self.lin1 = Sequential(Dropout(p=dropout), Linear(in_dim, hidden_dim),
ReLU(), Dropout(p=dropout), Linear(hidden_dim, out_dim))
self.l1 = APPNPConv(k=10, alpha=0.1, edge_drop=0.)
def forward(self, graph, features):
h = features
if self.use_mlp:
if self.join_with_mlp:
h = torch.cat((h, self.mlp(features)), 1)
else:
h = self.mlp(features)
if self.name == 'gat':
h = self.l1(graph, h).flatten(1)
logits = self.l2(graph, h).mean(1)
elif self.name in ['appnp']:
h = self.lin1(h)
logits = self.l1(graph, h)
elif self.name == 'agnn':
h = self.lin1(h)
h = self.l1(graph, h)
h = self.l2(graph, h)
logits = self.lin2(h)
elif self.name == 'che3b':
lambda_max = dgl.laplacian_lambda_max(graph)
h = self.drop(h)
h = self.l1(graph, h, lambda_max)
logits = self.l2(graph, h, lambda_max)
elif self.name == 'gcn':
h = self.drop(h)
h = self.l1(graph, h)
logits = self.l2(graph, h)
return logits
def read_input(input_folder):
X = pd.read_csv(f'{input_folder}/X.csv')
y = pd.read_csv(f'{input_folder}/y.csv')
categorical_columns = []
if os.path.exists(f'{input_folder}/cat_features.txt'):
with open(f'{input_folder}/cat_features.txt') as f:
for line in f:
if line.strip():
categorical_columns.append(line.strip())
cat_features = None
if categorical_columns:
columns = X.columns
cat_features = np.where(columns.isin(categorical_columns))[0]
for col in list(columns[cat_features]):
X[col] = X[col].astype(str)
gs, _ = load_graphs(f'{input_folder}/graph.dgl')
graph = gs[0]
with open(f'{input_folder}/masks.json') as f:
masks = json.load(f)
return graph, X, y, cat_features, masks
def normalize_features(X, train_mask, val_mask, test_mask):
min_max_scaler = preprocessing.MinMaxScaler()
A = X.to_numpy(copy=True)
A[train_mask] = min_max_scaler.fit_transform(A[train_mask])
A[val_mask + test_mask] = min_max_scaler.transform(A[val_mask + test_mask])
return pd.DataFrame(A, columns=X.columns).astype(float)
def replace_na(X, train_mask):
if X.isna().any().any():
return X.fillna(X.iloc[train_mask].min() - 1)
return X
def encode_cat_features(X, y, cat_features, train_mask, val_mask, test_mask):
enc = CatBoostEncoder()
A = X.to_numpy(copy=True)
b = y.to_numpy(copy=True)
A[np.ix_(train_mask, cat_features)] = enc.fit_transform(A[np.ix_(train_mask, cat_features)], b[train_mask])
A[np.ix_(val_mask + test_mask, cat_features)] = enc.transform(A[np.ix_(val_mask + test_mask, cat_features)])
A = A.astype(float)
return pd.DataFrame(A, columns=X.columns)
if __name__ == '__main__':
# datasets can be found here: https://www.dropbox.com/s/verx1evkykzli88/datasets.zip
# Read dataset
input_folder = 'datasets/avazu'
graph, X, y, cat_features, masks = read_input(input_folder)
train_mask, val_mask, test_mask = masks['0']['train'], masks['0']['val'], masks['0']['test']
encoded_X = X.copy()
normalizeFeatures = False
replaceNa = True
if len(cat_features):
encoded_X = encode_cat_features(encoded_X, y, cat_features, train_mask, val_mask, test_mask)
if normalizeFeatures:
encoded_X = normalize_features(encoded_X, train_mask, val_mask, test_mask)
if replaceNa:
encoded_X = replace_na(encoded_X, train_mask)
# specify parameters
task = 'regression'
hidden_dim = 128
trees_per_epoch = 5 # 5-10 are good values to try
backprop_per_epoch = 5 # 5-10 are good values to try
lr = 0.1 # 0.01-0.1 are good values to try
append_gbdt_pred = False # this can be important for performance (try True and False)
train_input_features = False
gbdt_depth = 6
gbdt_lr = 0.1
out_dim = y.shape[1] if task == 'regression' else len(set(y.iloc[test_mask, 0]))
in_dim = out_dim + X.shape[1] if append_gbdt_pred else out_dim
# specify GNN model
gnn_model = GNNModelDGL(in_dim, hidden_dim, out_dim)
# initialize BGNN model
bgnn = BGNNPredictor(gnn_model, task=task,
loss_fn=None,
trees_per_epoch=trees_per_epoch,
backprop_per_epoch=backprop_per_epoch,
lr=lr,
append_gbdt_pred=append_gbdt_pred,
train_input_features=train_input_features,
gbdt_depth=gbdt_depth,
gbdt_lr=gbdt_lr)
# train
metrics = bgnn.fit(graph, encoded_X, y, train_mask, val_mask, test_mask,
original_X = X, cat_features=cat_features,
num_epochs=100, patience=10, metric_name='loss')
bgnn.plot_interactive(metrics, legend=['train', 'valid', 'test'], title='Avazu', metric_name='loss')
|
pyNastran/gui/gui_objects/alt_geometry_storage.py
|
luzpaz/pyNastran
| 293 |
104616
|
<gh_stars>100-1000
from copy import deepcopy
class AltGeometry:
representations = ['main', 'toggle', 'wire', 'point', 'surface',
'bar', 'wire+point', 'wire+surf']
displays = ['Wireframe', 'Surface', 'point', None]
def __repr__(self):
msg = ('AltGeometry(%r, color=%s, line_width=%s, opacity=%s,\n'
' point_size=%s, bar_scale=%s, representation=%r, display=%r, is_visible=%s,\n'
'is_pickable=%s, label_actors=%s)' % (
self.name, str(self.color), self.line_width, self.opacity, self.point_size,
self.bar_scale, self.representation, self.display, self.is_visible,
self.is_pickable, self.label_actors))
return msg
def __init__(self, parent, name, color=None, line_width=1, opacity=0.0,
point_size=1, bar_scale=1.0, representation='main', display=None, is_visible=True,
is_pickable=False, label_actors=None):
"""
Creates an AltGeometry object
Parameters
----------
line_width : int
the width of the line for 'surface' and 'main'
color : [int, int, int]
the RGB colors (0-255)
opacity : float
0.0 -> solid
1.0 -> transparent
point_size : int
the point size for 'point'
bar_scale : float
the scale for the CBAR / CBEAM elements
representation : str
main - change with main mesh
wire - always wireframe
point - always points
surface - always surface
bar - can use bar scale
toggle - follow the main mesh
wire+point - is this used???
wire+surf - two options
display : str
only relevant to wire+surf
the active state of the mesh
is_visible : bool; default=True
is this actor currently visible
is_pickable : bool; default=False
can you pick a node/cell on this actor
label_actors : List[annotation]; None -> []
stores annotations (e.g., for a control surface)
"""
representation_map = {
'main' : None,
'wire' : 'Wireframe',
'point' : 'point',
'surface' : 'Surface',
'wire+surf' : 'Surface',
'wire+point' : 'Wireframe',
'bar' : None,
'toggle' : None,
}
if display is None:
try:
display = representation_map[representation]
except KeyError:
valid_keys = list(representation_map.keys())
valid_keys.sort()
raise RuntimeError('%r is not a valid representation\nvalid=[%s]' % (
representation, ', '.join(valid_keys)))
if line_width is None:
line_width = 1
if opacity is None:
opacity = 1.0
if label_actors is None:
label_actors = []
self.parent = parent
self.name = name
self.display = display
assert display in self.displays, 'dislay=%r displays=%s' % (display, self.displays)
assert isinstance(name, str), 'name=%r' % name
assert isinstance(label_actors, list), 'name=%r label_actors=%s' % (name, str(label_actors))
self._color = None
if color is not None:
assert color is not None, color
self.color = color
self.line_width = line_width
self.point_size = point_size
self._opacity = opacity
self.bar_scale = bar_scale
self.label_actors = []
assert isinstance(is_visible, bool), is_visible
self.is_visible = is_visible
self.is_pickable = is_pickable
if representation not in self.representations:
msg = 'representation=%r is invalid\nrepresentations=%r' % (
representation, self.representations)
raise RuntimeError(msg)
self.representation = representation
def __deepcopy__(self, memo):
"""doesn't copy the label_actors to speed things up?"""
keys = ['name', '_color', 'display', 'line_width', 'point_size', '_opacity',
'_representation', 'is_visible', 'bar_scale', 'is_pickable']
cls = self.__class__
result = cls.__new__(cls)
idi = id(self)
memo[idi] = result
for key in keys:
value = self.__dict__[key]
setattr(result, key, deepcopy(value, memo))
#result.label_actors = [] #= memo['label_actors']
return result
@property
def opacity(self):
"""
0 -> transparent
1 -> solid
"""
assert 0.0 <= self._opacity <= 1.0, self._opacity
return self._opacity
@opacity.setter
def opacity(self, opacity):
assert 0.0 <= opacity <= 1.0, opacity
self._opacity = opacity
@property
def transparency(self):
"""
0 -> solid
1 -> transparent
"""
assert 0.0 <= self._opacity <= 1.0, self._opacity
return 1.0 - self._opacity
@transparency.setter
def transparency(self, transparency):
assert 0.0 <= transparency <= 1.0, transparency
self._opacity = 1.0 - transparency
@property
def color(self):
if self._color is None:
return (255, 0, 0) # the default color; red
return self._color
@color.setter
def color(self, color):
assert len(color) == 3, color
if isinstance(color[0], int):
assert isinstance(color[0], int), color[0]
assert isinstance(color[1], int), color[1]
assert isinstance(color[2], int), color[2]
self._color = tuple(color)
else:
assert isinstance(color[0], float), color[0]
assert isinstance(color[1], float), color[1]
assert isinstance(color[2], float), color[2]
self._color = (int(color[0] * 255), int(color[1] * 255), int(color[2] * 255))
@property
def color_float(self):
return tuple([i/255. for i in self._color])
def set_color(self, color, mode='rgb'):
assert mode == 'rgb', 'mode=%r' % mode
self.color = color
assert len(color) == 3, color
#self.mode = 'rgb'
@property
def representation(self):
"""
Gets the representation
* main - main mesh
* toggle - change with main mesh
* wire - always wireframe
* point - always points
* surface - always surface
* bar - this can use bar scale
* wire+point - point (vertex) and wireframe allowed
* wire+surf - the user can switch between surface and wireframe as a selection
"""
return self._representation
@representation.setter
def representation(self, representation):
"""Sets the representation"""
if representation not in self.representations:
msg = 'representation=%r is invalid\nrepresentations=%r' % (
representation, self.representations)
raise RuntimeError(msg)
self._representation = representation
|
harvesttext/resources.py
|
buvta/HarvestText
| 1,524 |
104619
|
<filename>harvesttext/resources.py
#coding=utf-8
#!/usr/bin/env python
# Resources
# 褒贬义词典 清华大学 李军
#
# 此资源被用于以下论文中:
# <NAME> and <NAME>, Experimental Study on Sentiment Classification of Chinese Review using Machine Learning Techniques, in Proceding of IEEE NLPKE 2007
# 李军 中文评论的褒贬义分类实验研究 硕士论文 清华大学 2008
import os
import json
from collections import defaultdict
def get_qh_sent_dict():
"""
获得参考褒贬义词典:
褒贬义词典 清华大学 李军
此资源被用于以下论文中:
<NAME> and <NAME>, Experimental Study on Sentiment Classification of Chinese Review using Machine Learning Techniques, in Proceding of IEEE NLPKE 2007
李军 中文评论的褒贬义分类实验研究 硕士论文 清华大学 2008
:return: qh_sent_dict = {"pos":[words],"neg":[words]}
"""
pwd = os.path.abspath(os.path.dirname(__file__))
with open(pwd+"/resources/qh_sent_dict.json","r",encoding="utf-8") as f:
qh_sent_dict = json.load(f)
return qh_sent_dict
def get_baidu_stopwords():
"""
获得百度停用词列表
来源,网上流传的版本:https://wenku.baidu.com/view/98c46383e53a580216fcfed9.html
包含了中英文常见词及部分标点符号
:return: stopwords: set of string
"""
pwd = os.path.abspath(os.path.dirname(__file__))
with open(pwd + "/resources/bd_stopwords.json", "r", encoding="utf-8") as f:
stopwords = json.load(f)
return set(stopwords)
def get_nltk_en_stopwords():
"""
来自nltk的英语停用词
:return: stopwords: set of string
"""
import nltk
try:
nltk.data.find('corpora/stopwords')
except:
nltk.download('stopwords')
from nltk.corpus import stopwords
return set(stopwords.words('english'))
def get_qh_typed_words(used_types = ['IT', '动物', '医药', '历史人名', '地名', '成语', '法律', '财经', '食物']):
"""
THUOCL:清华大学开放中文词库
http://thuocl.thunlp.org/
IT 财经 成语 地名 历史名人 诗词 医学 饮食 法律 汽车 动物
:param used_types:
:return: typed_words: 字典,键为类型,值为该类的词语组成的set
"""
pwd = os.path.abspath(os.path.dirname(__file__))
with open(pwd + "/resources/THUOCL.json", "r", encoding="utf-8") as f:
typed_words0 = json.load(f)
typed_words = dict()
for type0 in typed_words0:
if type0 in used_types:
typed_words[type0] = set(typed_words0[type0])
return typed_words
def get_sanguo():
"""
获得三国演义原文
:return: ["章节1文本","章节2文本",...]
"""
pwd = os.path.abspath(os.path.dirname(__file__))
with open(pwd+"/resources/sanguo_docs.json","r",encoding="utf-8") as f:
docs = json.load(f)
return docs
def get_sanguo_entity_dict():
"""
获得三国演义中的人名、地名、势力名的知识库。
自行搭建的简单版,一定有遗漏和错误,仅供参考使用
:return: entity_mention_dict,entity_type_dict
"""
import json
pwd = os.path.abspath(os.path.dirname(__file__))
with open(pwd+"/resources/sanguo_entity_dict.json","r",encoding="utf-8") as f:
entity_dict = json.load(f)
return entity_dict["mention"], entity_dict["type"]
def get_english_senti_lexicon(type="LH"):
"""
获得英语情感词汇表
目前默认为来自这里的词汇表
https://www.cs.uic.edu/~liub/FBS/sentiment-analysis.html#lexicon
If you use this list, please cite the following paper:
<NAME> and <NAME>. "Mining and Summarizing Customer Reviews."
Proceedings of the ACM SIGKDD International Conference on Knowledge
Discovery and Data Mining (KDD-2004), Aug 22-25, 2004, Seattle,
Washington, USA,
:return: sent_dict = {"pos":[words],"neg":[words]}
"""
pwd = os.path.abspath(os.path.dirname(__file__))
with open(pwd + "/resources/LH_senti_lexicon.json", "r", encoding="utf-8") as f:
senti_lexicon = json.load(f)
return senti_lexicon
def get_jieba_dict(min_freq=0, max_freq=float('inf'), with_pos=False, use_proxy=False, proxies=None):
"""
获得jieba自带的中文词语词频词典
:params min_freq: 选取词语需要的最小词频
:params max_freq: 选取词语允许的最大词频
:params with_pos: 返回结果是否包括词性信息
:return if not with_pos, dict of {wd: freq}, else, dict of {(wd, pos): freq}
"""
from .download_utils import RemoteFileMetadata, check_download_resource
remote = RemoteFileMetadata(
filename='jieba_dict.txt',
url='https://github.com/blmoistawinde/HarvestText/releases/download/V0.8/jieba_dict.txt',
checksum='7197c3211ddd98962b036cdf40324d1ea2bfaa12bd028e68faa70111a88e12a8')
file_path = check_download_resource(remote, use_proxy, proxies)
ret = defaultdict(int)
with open(file_path, "r", encoding="utf-8") as f:
for line in f:
if len(line.strip().split()) == 3:
wd, freq, pos = line.strip().split()
freq = int(freq)
if freq > min_freq and freq < max_freq:
if not with_pos:
ret[wd] = freq
else:
ret[(wd, pos)] = freq
return ret
|
plugins/dns/client.py
|
tgragnato/geneva
| 1,182 |
104661
|
<filename>plugins/dns/client.py
"""
Client
Run by the evaluator, tries to make a GET request to a given server
"""
import argparse
import logging
import os
import random
import socket
import sys
import time
import traceback
import urllib.request
import dns.resolver
import requests
import actions.utils
from plugins.plugin_client import ClientPlugin
class DNSClient(ClientPlugin):
"""
Defines the DNS client.
"""
name = "dns"
def __init__(self, args):
"""
Initializes the DNS client.
"""
ClientPlugin.__init__(self)
self.args = args
@staticmethod
def get_args(command):
"""
Defines required args for this plugin
"""
super_args = ClientPlugin.get_args(command)
parser = argparse.ArgumentParser(description='DNS Client')
parser.add_argument('--use-tcp', action='store_true', help='leverage TCP for this plugin')
parser.add_argument('--dns-server', action='store', default="8.8.8.8", help='domain server to connect to')
parser.add_argument('--query', action='store', default="facebook.com", help='censored domain to query')
parser.add_argument('--timeout', action='store', default="3", type=int, help='how long in seconds the client should wait for a response')
parser.add_argument('--port', action='store', default="53", type=int, help='port the DNS server is running on (must be 53)')
args, _ = parser.parse_known_args(command)
args = vars(args)
super_args.update(args)
return super_args
def run(self, args, logger, engine=None):
"""
Try to make a forbidden DNS query.
"""
fitness = 0
to_lookup = args.get("query", "facebook.com")
dns_server = args.get("dns_server", "8.8.8.8")
use_tcp = args.get("use_tcp", False)
assert dns_server, "Cannot launch DNS test with no DNS server"
assert to_lookup, "Cannot launch DNS test with no server to query"
fitness = -1000
try:
fitness = self.dns_test(to_lookup, dns_server, args["output_directory"], args["environment_id"], logger, timeout=args.get("timeout", 3), use_tcp=use_tcp)
except Exception:
logger.exception("Exception caught in DNS test to resolver %s.", dns_server)
fitness += -100
# When performing a DNS test, a timeout is indistinguishable from
# a reset, which means we can't tell if the strategy broke the packet
# stream, or if the censor caught us. Strategies that break the stream
# should be punished more harshly, so raise the fitness slightly
# if the engine detected censorship for failed DNS tests.
if use_tcp and engine and engine.censorship_detected and fitness < 0:
fitness += 10
return fitness * 4
def dns_test(self, to_lookup, dns_server, output_dir, environment_id, logger, timeout=3, use_tcp=False):
"""
Makes a DNS query to a given censored domain.
"""
# Make the path an absolute path
if not output_dir.startswith("/"):
output_dir = os.path.join(actions.utils.PROJECT_ROOT, output_dir)
resolver = dns.resolver.Resolver()
protocol = "UDP"
if use_tcp:
protocol = "TCP"
logger.debug("Querying %s to DNS server %s over %s" % (to_lookup, dns_server, protocol))
resolver.nameservers = [dns_server]
# Setup the timeout and lifetime for this resolver
resolver.timeout = timeout
resolver.lifetime = 3
try:
answer = resolver.query(to_lookup, "A", tcp=use_tcp)[0]
logger.debug("Got IP address: %s" % answer)
# At this point, we've been given an IP address by the DNS resolver, but we don't
# yet know if this IP address is a bogus injected response, or legitimate. Further,
# because we are likely running this code from within a censored regime which might
# employ secondary censorship at the IP level, we cannot check if this IP is legit
# here. Instead, we write it out to a file for the evaluator to extract and check for us.
with open(os.path.join(output_dir, "flags", environment_id)+".dnsresult", "w") as dnsfile:
dnsfile.write(str(answer))
# For now, set fitness to a positive metric, though the evaluator will lower it if
# the IP address we were given was bogus.
fitness = 100
except dns.exception.Timeout:
logger.error("DNS query timed out.")
fitness = -100
except dns.resolver.NoNameservers:
logger.error("DNS server failed to respond")
fitness = -100
return fitness
|
tests/test_vim.py
|
maralla/validator.vim
| 255 |
104674
|
import json
from lints.vim import VimVint, VimLParserLint
def test_vint_undefined_variable():
msg = ['t.vim:3:6: Undefined variable: s:test (see :help E738)']
res = VimVint().parse_loclist(msg, 1)
assert json.loads(res)[0] == {
"lnum": "3",
"col": "6",
"text": "[vint]Undefined variable: s:test (see :help E738)",
"enum": 1,
"bufnr": 1,
"type": "E"
}
def test_vimlparser_message_wihtout_code():
msg = ['CCTree/plugin/cctree.vim:549:18: vimlparser: unexpected EOL']
res = VimLParserLint().parse_loclist(msg, 1)
assert json.loads(res)[0] == {
"lnum": "549",
"col": "18",
"text": '[vimlparser]unexpected EOL',
"enum": 1,
"bufnr": 1,
"type": "E",
"code": None,
"error": None,
"warning": None,
}
def test_vimlparser_message_with_code():
msg = ['vim-unite-vcs/autoload/vcs/git/revert.vim:29:19: vimlparser: E488: Trailing characters: )'] # noqa
res = VimLParserLint().parse_loclist(msg, 1)
assert json.loads(res)[0] == {
"lnum": "29",
"col": "19",
"text": '[vimlparser]E488: Trailing characters: )',
"enum": 1,
"bufnr": 1,
"type": "E",
"code": "488",
"error": "E",
"warning": None,
}
|
api/serializers/subscribers.py
|
lucasmgana/Pharmacy-Light-weight
| 192 |
104675
|
<reponame>lucasmgana/Pharmacy-Light-weight
from rest_framework import serializers
from backend.models.subscribers import Subscriber
class SubscribersSerializer(serializers.ModelSerializer):
class Meta:
model = Subscriber
fields = ('name', 'contact_method', 'contact_info')
|
tests/test_tanh_distortion.py
|
ankitshah009/audiomentations
| 930 |
104678
|
import unittest
import numpy as np
import pytest
from audiomentations import TanhDistortion
from audiomentations.core.utils import calculate_rms
class TestTanhDistortion(unittest.TestCase):
def test_single_channel(self):
samples = np.random.normal(0, 0.1, size=(2048,)).astype(np.float32)
sample_rate = 16000
augmenter = TanhDistortion(min_distortion=0.2, max_distortion=0.6, p=1.0)
distorted_samples = augmenter(samples=samples, sample_rate=sample_rate)
self.assertEqual(samples.dtype, distorted_samples.dtype)
self.assertEqual(samples.shape, distorted_samples.shape)
assert np.amax(distorted_samples) < np.amax(samples)
assert calculate_rms(distorted_samples) == pytest.approx(
calculate_rms(samples), abs=1e-3
)
def test_multichannel(self):
num_channels = 3
samples = np.random.normal(0, 0.1, size=(num_channels, 5555)).astype(np.float32)
sample_rate = 16000
augmenter = TanhDistortion(min_distortion=0.05, max_distortion=0.6, p=1.0)
distorted_samples = augmenter(samples=samples, sample_rate=sample_rate)
self.assertEqual(samples.dtype, distorted_samples.dtype)
self.assertEqual(samples.shape, distorted_samples.shape)
for i in range(num_channels):
assert not np.allclose(samples[i], distorted_samples[i])
assert calculate_rms(distorted_samples[i]) == pytest.approx(
calculate_rms(samples[i]), abs=1e-3
)
|
exercises/alphametics/alphametics.py
|
kishankj/python
| 1,177 |
104772
|
def solve(puzzle):
pass
|
utils/models_utils.py
|
phygitalism/PTI
| 345 |
104778
|
<reponame>phygitalism/PTI
import pickle
import functools
import torch
from configs import paths_config, global_config
def toogle_grad(model, flag=True):
for p in model.parameters():
p.requires_grad = flag
def load_tuned_G(run_id, type):
new_G_path = f'{paths_config.checkpoints_dir}/model_{run_id}_{type}.pt'
with open(new_G_path, 'rb') as f:
new_G = torch.load(f).to(global_config.device).eval()
new_G = new_G.float()
toogle_grad(new_G, False)
return new_G
def load_old_G():
with open(paths_config.stylegan2_ada_ffhq, 'rb') as f:
old_G = pickle.load(f)['G_ema'].to(global_config.device).eval()
old_G = old_G.float()
return old_G
|
jetbot/camera/__init__.py
|
geoc1234/jetbot
| 2,624 |
104789
|
<gh_stars>1000+
import os
DEFAULT_CAMERA = os.environ.get('JETBOT_DEFAULT_CAMERA', 'opencv_gst_camera')
if DEFAULT_CAMERA == 'zmq_camera':
from .zmq_camera import ZmqCamera
Camera = ZmqCamera
else:
from .opencv_gst_camera import OpenCvGstCamera
Camera = OpenCvGstCamera
|
streamalert/shared/lookup_tables/utils.py
|
cninja1/streamalert
| 2,770 |
104849
|
<reponame>cninja1/streamalert
"""
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from streamalert.shared.lookup_tables.drivers import PersistenceDriver
# pylint: disable=protected-access
class LookupTablesMagic:
"""Namespace full of magic methods that dig around the public interface of LookupTables.
These methods are not on the public interface by design to prevent these access patterns from
being utilized in "normal" Lambda code.
"""
@staticmethod
def get_all_table_data(table):
"""
Return all of the data in the given lookup table as a dict. Only works with S3, and you
should DEFINITELY AVOID USING THIS.
Args:
- table (LookupTable)
Returns:
dict
"""
if table.driver_type != PersistenceDriver.TYPE_S3:
raise RuntimeError("Cannot use lookup_table helper on non-S3 table.")
# Make a single dummy call to force the table to initialize
table.get('dummy', None)
# Do some black magic tomfoolery
return table._driver._cache._data
@staticmethod
def set_table_value(table, key, new_value):
"""Set a value into a LookupTable and then immediately commit it.
Args:
- table (LookupTable)
- key (str)
- new_value (str|int|list|dict|mixed)
"""
table._driver.set(key, new_value)
table._driver.commit()
@staticmethod
def get_all_tables(lookup_tables_core):
"""Returns all lookup tables, keyed by their names
Args:
- lookup_tables_core (LookupTablesCore)
Returns:
dict[str, LookupTable]
"""
return lookup_tables_core._tables
|
pymer4/tests/test_utils.py
|
turbach/pymer4
| 127 |
104850
|
<filename>pymer4/tests/test_utils.py
from __future__ import division
from pymer4.utils import con2R, R2con, get_resource_path, result_to_table
import pandas as pd
import numpy as np
from pymer4.models import Lm
import os
def test_con2R():
x = np.array([[-1, 0, 0, 1], [-0.5, -0.5, 0.5, 0.5], [-3 / 3, 1 / 3, 1 / 3, 1 / 3]])
out = con2R(x)
assert out.shape == (4, 3)
names = ["1 v s4", "1+2 vs 3+4", "1 vs 2+3+4"]
out = con2R(x, names=names)
assert isinstance(out, pd.DataFrame)
assert [x == y for x, y in zip(out.columns, names)]
assert out.shape == (4, 3)
out = con2R(np.array([-1, 0, 1]))
assert np.allclose(
out, np.array([[-0.5, 0.40824829], [0.0, -0.81649658], [0.5, 0.40824829]])
)
def test_result_to_table():
df = pd.read_csv(os.path.join(get_resource_path(), "sample_data.csv"))
model = Lm("DV ~ IV1 + IV3", data=df)
model.fit(summarize=False)
formatted = result_to_table(model, drop_intercept=False)
assert isinstance(formatted, pd.DataFrame)
assert formatted.shape == (3, 6)
assert set(["Predictor", "b", "ci", "t", "df", "p"]) == set(formatted.columns)
assert formatted.iloc[0, -1] == "< .001"
formatted = result_to_table(model, drop_intercept=True)
assert isinstance(formatted, pd.DataFrame)
assert formatted.shape == (2, 6)
|
DPGAnalysis/Skims/python/valSkim_cff.py
|
ckamtsikis/cmssw
| 852 |
104923
|
<reponame>ckamtsikis/cmssw
from DPGAnalysis.Skims.goodvertexSkim_cff import *
###Tracks selection
trackSelector =cms.EDFilter("TrackSelector",
src = cms.InputTag("generalTracks"),
cut = cms.string('quality("highPurity")')
)
#trackSelector = cms.EDProducer("QualityFilter",
# TrackQuality = cms.string('highPurity'),
# recTracks = cms.InputTag("generalTracks")
# )
trackFilter = cms.EDFilter("TrackCountFilter",
src = cms.InputTag("trackSelector"),
minNumber = cms.uint32(10)
)
nottoomanytracks = cms.EDFilter("NMaxPerLumi",
nMaxPerLumi = cms.uint32(8)
)
relvaltrackSkim = cms.Sequence(goodvertexSkim+trackSelector + trackFilter + nottoomanytracks )
### muon selection
muonSelector = cms.EDFilter("MuonSelector",
src = cms.InputTag("muons"),
cut = cms.string(" isGlobalMuon && isTrackerMuon && pt > 3")
)
muonFilter = cms.EDFilter("MuonCountFilter",
src = cms.InputTag("muonSelector"),
minNumber = cms.uint32(1)
)
nottoomanymuons = cms.EDFilter("NMaxPerLumi",
nMaxPerLumi = cms.uint32(2)
)
relvalmuonSkim = cms.Sequence(goodvertexSkim+muonSelector + muonFilter + nottoomanymuons )
|
knet/det/knet.py
|
yinchimaoliang/K-Net
| 361 |
104938
|
<reponame>yinchimaoliang/K-Net
import torch
import torch.nn.functional as F
from mmdet.models.builder import DETECTORS
from mmdet.models.detectors import TwoStageDetector
from mmdet.utils import get_root_logger
from .utils import sem2ins_masks
@DETECTORS.register_module()
class KNet(TwoStageDetector):
def __init__(self,
*args,
num_thing_classes=80,
num_stuff_classes=53,
mask_assign_stride=4,
thing_label_in_seg=0,
**kwargs):
super(KNet, self).__init__(*args, **kwargs)
assert self.with_rpn, 'KNet does not support external proposals'
self.num_thing_classes = num_thing_classes
self.num_stuff_classes = num_stuff_classes
self.mask_assign_stride = mask_assign_stride
self.thing_label_in_seg = thing_label_in_seg
logger = get_root_logger()
logger.info(f'Model: \n{self}')
def forward_train(self,
img,
img_metas,
gt_bboxes=None,
gt_labels=None,
gt_bboxes_ignore=None,
gt_masks=None,
proposals=None,
gt_semantic_seg=None,
**kwargs):
super(TwoStageDetector, self).forward_train(img, img_metas)
assert proposals is None, 'KNet does not support' \
' external proposals'
assert gt_masks is not None
# gt_masks and gt_semantic_seg are not padded when forming batch
gt_masks_tensor = []
gt_sem_seg = []
gt_sem_cls = []
# batch_input_shape shoud be the same across images
pad_H, pad_W = img_metas[0]['batch_input_shape']
assign_H = pad_H // self.mask_assign_stride
assign_W = pad_W // self.mask_assign_stride
for i, gt_mask in enumerate(gt_masks):
mask_tensor = gt_mask.to_tensor(torch.float, gt_labels[0].device)
if gt_mask.width != pad_W or gt_mask.height != pad_H:
pad_wh = (0, pad_W - gt_mask.width, 0, pad_H - gt_mask.height)
mask_tensor = F.pad(mask_tensor, pad_wh, value=0)
if gt_semantic_seg is not None:
# gt_semantic seg is padded by 255 and
# zero indicating the first class
sem_labels, sem_seg = sem2ins_masks(
gt_semantic_seg[i],
num_thing_classes=self.num_thing_classes)
if sem_seg.shape[0] == 0:
gt_sem_seg.append(
mask_tensor.new_zeros(
(mask_tensor.size(0), assign_H, assign_W)))
else:
gt_sem_seg.append(
F.interpolate(
sem_seg[None], (assign_H, assign_W),
mode='bilinear',
align_corners=False)[0])
gt_sem_cls.append(sem_labels)
else:
gt_sem_seg = None
gt_sem_cls = None
if mask_tensor.shape[0] == 0:
gt_masks_tensor.append(
mask_tensor.new_zeros(
(mask_tensor.size(0), assign_H, assign_W)))
else:
gt_masks_tensor.append(
F.interpolate(
mask_tensor[None], (assign_H, assign_W),
mode='bilinear',
align_corners=False)[0])
gt_masks = gt_masks_tensor
x = self.extract_feat(img)
rpn_results = self.rpn_head.forward_train(x, img_metas, gt_masks,
gt_labels, gt_sem_seg,
gt_sem_cls)
(rpn_losses, proposal_feats, x_feats, mask_preds,
cls_scores) = rpn_results
losses = self.roi_head.forward_train(
x_feats,
proposal_feats,
mask_preds,
cls_scores,
img_metas,
gt_masks,
gt_labels,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_bboxes=gt_bboxes,
gt_sem_seg=gt_sem_seg,
gt_sem_cls=gt_sem_cls,
imgs_whwh=None)
losses.update(rpn_losses)
return losses
def simple_test(self, img, img_metas, rescale=False):
x = self.extract_feat(img)
rpn_results = self.rpn_head.simple_test_rpn(x, img_metas)
(proposal_feats, x_feats, mask_preds, cls_scores,
seg_preds) = rpn_results
segm_results = self.roi_head.simple_test(
x_feats,
proposal_feats,
mask_preds,
cls_scores,
img_metas,
imgs_whwh=None,
rescale=rescale)
return segm_results
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/get_flops.py`
"""
# backbone
x = self.extract_feat(img)
# rpn
num_imgs = len(img)
dummy_img_metas = [
dict(img_shape=(800, 1333, 3)) for _ in range(num_imgs)
]
rpn_results = self.rpn_head.simple_test_rpn(x, dummy_img_metas)
(proposal_feats, x_feats, mask_preds, cls_scores,
seg_preds) = rpn_results
# roi_head
roi_outs = self.roi_head.forward_dummy(x_feats, proposal_feats,
dummy_img_metas)
return roi_outs
|
examples/wish_export.py
|
HighnessAtharva/genshinstats
| 182 |
104989
|
<filename>examples/wish_export.py
import csv
import genshinstats as gs
with open("export.csv", "w", newline="", encoding="utf-8") as file:
fieldnames = ["time", "name", "type", "rarity", "banner"]
writer = csv.DictWriter(file, fieldnames, extrasaction="ignore")
writer.writeheader()
print("preparing data...", end="\r")
for i, pull in enumerate(gs.get_wish_history()):
print(f"fetched {i} pulls", end="\r")
writer.writerow(pull)
|
examples/image/cath/util/__init__.py
|
mariogeiger/se3cnn
| 170 |
104991
|
<reponame>mariogeiger/se3cnn<gh_stars>100-1000
__all__ = [
'arch_blocks',
'get_mask',
'get_param_groups',
'logger',
'losses',
'lr_schedulers',
'optimizers_L1L2',
'tensorflow_logger',
]
|
TM1py/Objects/Server.py
|
adscheevel/tm1py
| 113 |
105035
|
# -*- coding: utf-8 -*-
from typing import Dict
class Server:
""" Abstraction of the TM1 Server
:Notes:
contains the information you get from http://localhost:5895/api/v1/Servers
no methods so far
"""
def __init__(self, server_as_dict: Dict):
self.name = server_as_dict['Name']
self.ip_address = server_as_dict['IPAddress']
self.ip_v6_address = server_as_dict['IPv6Address']
self.port_number = server_as_dict['PortNumber']
self.client_message_port_number = server_as_dict['ClientMessagePortNumber']
self.http_port_number = server_as_dict['HTTPPortNumber']
self.using_ssl = server_as_dict['UsingSSL']
self.accepting_clients = server_as_dict['AcceptingClients']
self.self_registered = server_as_dict['SelfRegistered']
self.host = server_as_dict['Host']
self.is_local = server_as_dict['IsLocal']
self.ssl_certificate_id = server_as_dict['SSLCertificateID']
self.ssl_certificate_authority = server_as_dict['SSLCertificateAuthority']
self.ssl_certificate_revocation_list = server_as_dict['SSLCertificateRevocationList']
self.client_export_ssl_server_keyid = server_as_dict['ClientExportSSLSvrKeyID']
self.client_export_ssl_server_cert = server_as_dict['ClientExportSSLSvrCert']
self.last_updated = server_as_dict['LastUpdated']
|
torchsketch/utils/general_utils/get_filenames_and_classes.py
|
songyzh/torchsketch
| 182 |
105040
|
<reponame>songyzh/torchsketch
import os
def get_filenames_and_classes(dataset_dir):
class_names = []
for filename in os.listdir(dataset_dir):
path = os.path.join(dataset_dir, filename)
if os.path.isdir(path):
class_names.append(filename)
class_names_to_ids = dict(zip(sorted(class_names), range(len(class_names))))
return class_names_to_ids, len(class_names)
|
slow_tests/attackers_chinese.py
|
e-tornike/OpenAttack
| 444 |
105080
|
from OpenAttack import substitute
import sys, os
sys.path.insert(0, os.path.join(
os.path.dirname(os.path.abspath(__file__)),
".."
))
import OpenAttack
def get_attackers_on_chinese(dataset, clsf):
triggers = OpenAttack.attackers.UATAttacker.get_triggers(clsf, dataset, clsf.tokenizer)
attackers = [
OpenAttack.attackers.FDAttacker(token_unk=clsf.token_unk, lang="chinese"),
OpenAttack.attackers.UATAttacker(triggers=triggers, lang="chinese"),
OpenAttack.attackers.TextBuggerAttacker(lang="chinese"),
OpenAttack.attackers.GeneticAttacker(lang="chinese", filter_words=["的", "了", "着"]),
OpenAttack.attackers.PWWSAttacker(lang="chinese"),
OpenAttack.attackers.PSOAttacker(lang="chinese")
]
return attackers
|
friendly/runtime_errors/os_error.py
|
matan-h/friendly
| 287 |
105154
|
"""Only identifying failed connection to a server for now."""
from ..my_gettext import current_lang, no_information
def get_cause(_value, _frame, tb_data):
tb = "\n".join(tb_data.formatted_tb)
if (
"socket.gaierror" in tb
or "urllib.error" in tb
or "urllib3.exception" in tb
or "requests.exception" in tb
):
return handle_connection_error()
return no_information()
def handle_connection_error():
_ = current_lang.translate
cause = _(
"I suspect that you are trying to connect to a server and\n"
"that a connection cannot be made.\n\n"
"If that is the case, check for typos in the URL\n"
"and check your internet connectivity.\n"
)
return {"cause": cause}
|
python/test/mapreduce/module_test.py
|
Batterii/appengine-mapreduce
| 228 |
105161
|
<reponame>Batterii/appengine-mapreduce
#!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
import unittest
from google.appengine.api import module_testutil
from mapreduce import context
from mapreduce import control
from mapreduce import datastore_range_iterators
from mapreduce import errors
from mapreduce import input_readers
from mapreduce import key_ranges
from mapreduce import mapper_pipeline
from mapreduce import mapreduce_pipeline
from mapreduce import model
from mapreduce import namespace_range
from mapreduce import operation as op
from mapreduce import output_writers
from mapreduce import property_range
from mapreduce import records
from mapreduce import shuffler
from mapreduce import util
class NamespaceRangeTest(module_testutil.ModuleInterfaceTest,
googletest.TestCase):
"""Test module interface."""
MODULE = namespace_range
class PropertyRangeTest(module_testutil.ModuleInterfaceTest,
googletest.TestCase):
"""Test module interface."""
MODULE = property_range
class KeyRangesTest(module_testutil.ModuleInterfaceTest,
googletest.TestCase):
"""Test module interface."""
MODULE = key_ranges
class DatastoreRangeIteratorsTest(module_testutil.ModuleInterfaceTest,
googletest.TestCase):
"""Test module interface."""
MODULE = datastore_range_iterators
class ContextTest(module_testutil.ModuleInterfaceTest,
googletest.TestCase):
"""Test context module interface."""
MODULE = context
class ControlTest(module_testutil.ModuleInterfaceTest,
googletest.TestCase):
"""Test control module interface."""
MODULE = control
class CountersTest(module_testutil.ModuleInterfaceTest,
googletest.TestCase):
"""Test counters module interface."""
MODULE = op.counters
class DbTest(module_testutil.ModuleInterfaceTest,
googletest.TestCase):
"""Test db module interface."""
MODULE = op.db
class ErrorsTest(module_testutil.ModuleInterfaceTest,
googletest.TestCase):
"""Test errors module interface."""
MODULE = errors
class InputReadersTest(module_testutil.ModuleInterfaceTest,
googletest.TestCase):
"""Test input_readers module interface."""
MODULE = input_readers
class ModelTest(module_testutil.ModuleInterfaceTest,
googletest.TestCase):
"""Test model module interface."""
MODULE = model
class OutputWritersTest(module_testutil.ModuleInterfaceTest,
googletest.TestCase):
"""Test output_writers module interface."""
MODULE = output_writers
class UtilTest(module_testutil.ModuleInterfaceTest,
googletest.TestCase):
"""Test util module interface."""
MODULE = util
class MapperPipelineTest(module_testutil.ModuleInterfaceTest,
googletest.TestCase):
"""Test mapper_pipeline module interface."""
MODULE = mapper_pipeline
class MapreducePipelineTest(module_testutil.ModuleInterfaceTest,
googletest.TestCase):
"""Test mapreduce_pipeline module interface."""
MODULE = mapreduce_pipeline
class ShufflerTest(module_testutil.ModuleInterfaceTest,
googletest.TestCase):
"""Test shuffler module interface."""
MODULE = shuffler
class RecordsTests(module_testutil.ModuleInterfaceTest,
googletest.TestCase):
"""Test records module interface."""
MODULE = records
if __name__ == '__main__':
googletest.main()
|
sdc/datatypes/sdc_typeref.py
|
dlee992/sdc
| 540 |
105165
|
# *****************************************************************************
# Copyright (c) 2021, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import pandas as pd
from numba.core import types
from numba.extending import (models, register_model, )
from numba.core.typing.templates import infer_global
from sdc.extensions.sdc_hashmap_type import ConcurrentDict, ConcurrentDictType
from sdc.datatypes.indexes import MultiIndexType
# FIXME_Numba#6781: due to overlapping of overload_methods for Numba TypeRef
# we have to use our new SdcTypeRef to type objects created from types.Type
# (i.e. ConcurrentDict meta-type). This should be removed once it's fixed.
def sdc_make_new_typeref_class():
class SdcTypeRef(types.Dummy):
"""Reference to a type.
Used when a type is passed as a value.
"""
def __init__(self, instance_type):
self.instance_type = instance_type
super(SdcTypeRef, self).__init__('sdc_typeref[{}]'.format(self.instance_type))
@register_model(SdcTypeRef)
class SdcTypeRefModel(models.OpaqueModel):
def __init__(self, dmm, fe_type):
models.OpaqueModel.__init__(self, dmm, fe_type)
return SdcTypeRef
ConcurrentDictTypeRef = sdc_make_new_typeref_class()
MultiIndexTypeRef = sdc_make_new_typeref_class()
infer_global(ConcurrentDict, ConcurrentDictTypeRef(ConcurrentDictType))
infer_global(pd.MultiIndex, MultiIndexTypeRef(MultiIndexType))
|
models/backbone/__init__.py
|
briana-jin-zhang/spatial-segmentation
| 733 |
105207
|
from .unet import *
from .vae import *
from .others import *
from .pconv_unet import *
from .discriminator import *
from .resnet_cls import *
|
asterioids-pygame-project/source_code_step_5/space_rocks/utils.py
|
syberflea/materials
| 3,682 |
105220
|
<reponame>syberflea/materials
from pygame.image import load
from pygame.math import Vector2
def load_sprite(name, with_alpha=True):
path = f"assets/sprites/{name}.png"
loaded_sprite = load(path)
if with_alpha:
return loaded_sprite.convert_alpha()
else:
return loaded_sprite.convert()
def wrap_position(position, surface):
x, y = position
w, h = surface.get_size()
return Vector2(x % w, y % h)
|
dataset/make_dataset.py
|
cdpidan/captcha_trainer_pytorch
| 182 |
105281
|
#!/usr/bin/env python
# _*_coding:utf-8_*_
"""
@Time : 2020/8/23 0:11
@Author: sml2h3
@File: make_dataset
@Software: PyCharm
"""
from utils.constants import *
from utils.exception import *
from config import Config
from PIL import Image
import os
import sys
import json
import time
import random
class MakeDataset(object):
def __init__(self, project_name: str, images_path: str, word: bool = False,
datatype: DataType = DataType.ClassFication):
self.project_name = project_name
self.images_path = images_path
self.base_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "projects",
self.project_name)
self.data_path = os.path.join(self.base_path, "datas")
if not os.path.exists(self.data_path):
raise DataDirNotFoundError(
"Project named {} didn't found data dir!Please init project first!".format(self.project_name))
self.word = word
self.files_list = [os.path.join(self.images_path, filename) for filename in os.listdir(self.images_path)]
self.label_list = self.get_labels()
self.len = len(self.files_list)
self.label_len = len(self.label_list)
self.datatype = datatype
self.ignore_files = ['.DS_Store']
self.allow_ext = ['jpg', 'png', 'jpeg', 'bmp']
def get_labels(self):
label_map_temp = [""]
for idx, filename in enumerate(self.files_list):
path, labels = os.path.split(filename)
labels = str(labels.split('_')[0]).lower()
if self.word:
label_map_temp += [labels]
else:
label_map_temp += list(labels)
return list(set(label_map_temp))
def make(self, scale: float = 0.97):
random_seed = int(time.time() * 1000)
random.seed(random_seed)
random.shuffle(self.files_list)
train_data_num = int(self.len * scale)
test_data_num = self.len - train_data_num
train_dataset = self.files_list[test_data_num:]
test_dataset = self.files_list[:test_data_num]
dataset = {
RunMode.Train.value: train_dataset,
RunMode.Test.value: test_dataset
}
for dataset_type in [RunMode.Train, RunMode.Test]:
data_path = os.path.join(self.data_path, "{}_{}.json".format(self.project_name, dataset_type.value))
if os.path.exists(data_path):
os.remove(data_path)
if dataset_type.value == RunMode.Train.value:
used_dataset = dataset[RunMode.Train.value]
else:
used_dataset = dataset[RunMode.Test.value]
self._covert_img_tojson(used_dataset, data_path)
config = Config(self.project_name)
conf = config.load_config()
conf["Model"]["CharSet"] = json.dumps(self.label_list, ensure_ascii=False)
config.make_config(conf)
def _covert_img_tojson(self, dataset, output):
simple_collection_length = len(dataset)
collects = []
for idx, filename in enumerate(dataset):
if filename in self.ignore_files or filename.split('.')[-1].lower() not in self.allow_ext:
continue
else:
try:
sys.stdout.write(
'\r{}'.format(">> Converting Image {}/{}".format(idx + 1, simple_collection_length)))
sys.stdout.flush()
Image.open(filename)
collects.append(filename)
except Exception as e:
print(e)
print("\n")
with open(output, 'w', encoding="utf-8") as f:
f.write(json.dumps(collects, ensure_ascii=False))
|
leet/strings/findAnagrams.py
|
monishshah18/python-cp-cheatsheet
| 140 |
105308
|
"""
time: c*26 + p
space: 26 + 26 (1)
"""
class Solution:
def findAnagrams(self, s: str, p: str) -> List[int]:
cntP = collections.Counter(p)
cntS = collections.Counter()
P = len(p)
S = len(s)
if P > S:
return []
ans = []
for i, c in enumerate(s):
cntS[c] += 1
if i >= P:
if cntS[s[i-P]] > 1:
cntS[s[i-P]] -= 1
else:
del cntS[s[i-P]]
if cntS == cntP:
ans.append(i-(P-1))
return ans
|
det/configs/involution/mask_rcnn_red50_neck_fpn_head_1x_coco.py
|
shikishima-TasakiLab/involution
| 1,260 |
105328
|
_base_ = [
'../_base_/models/mask_rcnn_red50_neck_fpn_head.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x_warmup.py', '../_base_/default_runtime.py'
]
optimizer_config = dict(grad_clip(dict(_delete_=True, max_norm=5, norm_type=2)))
|
code_examples/popart/block_sparse/examples/test_block_sparse.py
|
payoto/graphcore_examples
| 260 |
105344
|
<reponame>payoto/graphcore_examples
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import os
import numpy as np
from functools import reduce
from operator import mul
import popart
import pytest
# range for filling blocks
MATRIX_LOW_VALUE = -10
MATRIX_HIGH_VALUE = 10
# library provides support for 3 kinds of sparse MM's
# from set of 2 inputs and one output, 2 of them will be dense
# and the third will be sparse
g_sparseMatMulTypeLookup = {
'DENSE_LHS_SPARSE_RHS_DENSE_OUT': 0,
'DENSE_LHS_DENSE_RHS_SPARSE_OUT': 1,
'SPARSE_LHS_SPARSE_RHS_SPARSE_OUT': 2
}
# g_ -> global
#
g_input_data_type = "float32"
g_output_data_type = "float32"
g_pp_data_type = "float32"
np.set_printoptions(linewidth=500)
g_random_sparse_mask = np.random.RandomState()
g_random_data = np.random.RandomState()
g_random_labels = np.random.RandomState()
"""
Set seeds of random generatorts.
"""
g_random_sparse_mask.seed(1)
g_random_data.seed(1)
g_random_labels.seed(1)
def create_sparse_list(dims, block_size, sparsity, initial_value=0):
"""
dims: dimensions of the sparse matrix
block_size: size of a block (8x8, 16x16 etc)
sparsity: sparsity level (0.4 means 40% of blocks are empty)
returns--
block_sparse_matrix: np.array of num_blocks * block_sz
dense_matrix: np.array if size dim with a dense representation of the matrix.
i.e. explcit zeros for a zero block. Used to perform
dense MM's for a reference output
mask: list with sparsity pattern. size = num_blocks (both dense and sparse)
e.g for a sparse matrix of size (6, 6) with block size 2x2
Matrix contains 9 blocks of size 2x2, some sparse and some dense
If the 6x6 matrices has 2 non zero blocks, then ..
Inputs:
dims = [6, 6]
block_size = [2,2]
sparsity = 0.4 (say)
Outputs:
block_sparse_matrix = 2 x 4 array
dense_matrix = 6x6 array
sparsity = 9x1 list
"""
block_size_row = block_size[0]
block_size_col = block_size[1]
num_block_rows = dims[0] // block_size_row
num_block_cols = dims[1] // block_size_col
assert(sparsity < 1.0)
proportion = [sparsity, 1 - sparsity]
mask = g_random_sparse_mask.choice([0, 1], size=(num_block_rows, num_block_cols), p=proportion)
# dont want mask to be all zeros
while np.all(mask == 0):
mask = g_random_sparse_mask.choice([0, 1], size=(num_block_rows, num_block_cols), p=proportion)
if initial_value == 0:
dense_matrix = np.zeros((num_block_rows * block_size_row,
num_block_cols * block_size_col))
else:
dense_matrix = np.empty((num_block_rows * block_size_row,
num_block_cols * block_size_col))
dense_matrix.fill(initial_value)
block_sparse_matrix = []
for block_row in range(num_block_rows):
for block_col in range(num_block_cols):
if mask[block_row][block_col]:
block_data = g_random_data.randint(low=MATRIX_LOW_VALUE,
high=MATRIX_HIGH_VALUE,
size=block_size_row * block_size_col).astype("float32")
block_sparse_matrix.append(block_data)
dense_matrix[block_row * block_size_row: (block_row+1) * block_size_row,
block_col * block_size_col: (block_col+1) * block_size_col] = block_data.reshape(block_size_row, block_size_col)
# At this point mask is a 2D array, flatten it into 1D list and return, bsr_rhs is already a list (so convert to array)
return np.array(block_sparse_matrix), dense_matrix, mask.flatten().tolist()
def create_dense_matrix(dims):
return g_random_data.randint(low=MATRIX_LOW_VALUE, high=MATRIX_HIGH_VALUE, size=dims).astype(g_input_data_type)
def create_sparse_matrix(nominal_shape, block_size, sparsity, initial_value=0):
"""
Create a sparse_matrix.
Inputs:
nominal_shape: List of dimensions of the sparse tensor e.g (2, 3, 4, 4)
block_size : size of each block (e.g. [8, 8, 8])
sparsity : block sparsity level (0.4 means, 40% of blocks are zeros)
Outputs:
bsr : sparse representation of matrix (nnz_blocks * block size)
lengths_per_2d_plane: List with num-non-zero blocks per group dim.
i.e. for a (2, 3, 4, 4) tensor with 2 nnz_blocks in each 4x4 matrix,
this will have shape of 6x1 and each row storing 2
dense_matrix: dense representation of the matrix (for ref calc)
mask : list of num_blocks (1 for non-zero blocks and 0 for others)
"""
# skip last two dimensions
# last 2 dims enter the MM, others form the group
num_grouped_dims = reduce(mul, nominal_shape[:-2], 1)
rows = nominal_shape[-2]
cols = nominal_shape[-1]
# Create dense matrix of nominal dims
if initial_value == 0:
dense_matrix = np.zeros(nominal_shape).astype(g_input_data_type)
else:
dense_matrix = np.empty(nominal_shape).astype(g_input_data_type)
dense_matrix.fill(initial_value)
dense_matrix = dense_matrix.reshape((num_grouped_dims, rows, cols))
dims = [nominal_shape[-2], nominal_shape[-1]]
bsr = []
bsr_lengths_per_2d_plane = []
mask = []
for dim in range(num_grouped_dims):
_bsr, dense_matrix[dim], _mask = create_sparse_list(dims, block_size, sparsity, initial_value)
# _bsr comes as array
# _mask comes as list
bsr.extend(_bsr)
mask.extend(_mask)
bsr_lengths_per_2d_plane.append(_bsr.shape[0])
dense_matrix = dense_matrix.reshape(nominal_shape)
mask = np.array(mask)
block_size_row = block_size[0]
block_size_col = block_size[1]
num_block_rows = dims[0] // block_size_row
num_block_cols = dims[1] // block_size_col
# all parameters are returned as numpy arrays
return np.array(bsr), np.array(bsr_lengths_per_2d_plane), dense_matrix, mask
def mm(lhs, rhs):
return np.matmul(lhs, rhs)
# Stable softmax numpy implementation
def softmax(x):
x_max = np.max(x, axis = -1)
x = x - np.expand_dims(x_max, axis=-1)
x = np.exp(x)
x_sum = np.sum(x, axis=-1)
x = x / np.expand_dims(x_sum, axis=-1)
return x
def sparse_mm_infer(sparse_mm_type, lhs_dims, vanilla_rhs_dims, block_size, sparsity_level, transpose_rhs, memory_cycle_ratio, inner_group_size):
""" """
if transpose_rhs:
matmul_dims = [lhs_dims[-2], vanilla_rhs_dims[-1], vanilla_rhs_dims[-2]]
else:
matmul_dims = [lhs_dims[-2], vanilla_rhs_dims[-2], vanilla_rhs_dims[-1]]
lhs = create_dense_matrix(lhs_dims)
if sparse_mm_type == g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT']:
bsr_rhs, lengths_per_2d_plane, vanilla_rhs, sparsity_mask = create_sparse_matrix(vanilla_rhs_dims, block_size[1:], sparsity_level)
rhs = bsr_rhs
rhs_dims = bsr_rhs.shape
elif sparse_mm_type == g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT']:
output_dims = lhs_dims[:-1]
output_dims.append(vanilla_rhs_dims[-1])
output_block_size = [block_size[0], block_size[2]]
bsr_output, lengths_per_2d_plane, _, sparsity_mask = create_sparse_matrix(output_dims, output_block_size, sparsity_level)
rhs_dims = vanilla_rhs_dims
rhs = create_dense_matrix(rhs_dims)
# Create a builder and construct a graph
builder = popart.Builder()
lhs_tensorInfo = popart.TensorInfo("FLOAT", lhs_dims)
rhs_tensorInfo = popart.TensorInfo("FLOAT", rhs_dims)
lhsTensor = builder.addInputTensor(lhs_tensorInfo)
rhsTensor = builder.addInputTensor(rhs_tensorInfo)
outTensor = builder.customOp(opName = "BSMatMul",
opVersion=1,
domain = "ai.graphcore",
inputs = [lhsTensor, rhsTensor],
attributes = {
"bsr_rhs_lengths_per_2d_plane": lengths_per_2d_plane.tolist(),
"matrix_dims": matmul_dims,
"block_size": block_size,
"sparsity_mask": sparsity_mask.tolist(),
"bsmatmul_type": sparse_mm_type,
"transpose_rhs": transpose_rhs,
"memory_cycle_ratio": memory_cycle_ratio,
"inner_group_size": inner_group_size,
"in_type": g_input_data_type,
"out_type": g_output_data_type,
"pp_type": g_pp_data_type
})[0]
builder.addOutputTensor(outTensor)
proto = builder.getModelProto()
# Describe how to run the model
dataFlow = popart.DataFlow(1, {outTensor: popart.AnchorReturnType("ALL")})
# Create a session to compile and execute the graph
session = popart.InferenceSession(
fnModel=proto,
dataFlow=dataFlow,
deviceInfo=popart.DeviceManager().acquireAvailableDevice(1))
# Compile graph
session.prepareDevice()
# Create buffers to receive results from the execution
anchors = session.initAnchorArrays()
rhs = np.array(rhs, dtype=g_input_data_type)
stepio = popart.PyStepIO({lhsTensor: lhs, rhsTensor: rhs}, anchors)
session.run(stepio)
ipuOutput = anchors[outTensor]
if sparse_mm_type == g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT']:
if transpose_rhs:
transpose_indices = list(range(len(vanilla_rhs_dims)))
transpose_indices[-2], transpose_indices[-1] = transpose_indices[-1], transpose_indices[-2]
vanilla_rhs = vanilla_rhs.transpose(tuple(transpose_indices))
goldOutput = mm(lhs, vanilla_rhs)
else:
goldOutput = mm(lhs, vanilla_rhs)
else:
assert len(lhs.shape) == len(rhs.shape)
if(len(lhs.shape) == 2):
lhs = np.expand_dims(lhs, 0)
rhs = np.expand_dims(rhs, 0)
mmOutput = mm(lhs, rhs)
totalGroupDims = int(np.prod(lhs_dims[:-2]))
num_rows_sparsity_mask_2d = output_dims[-2] // block_size[0]
num_cols_sparsity_mask_2d = output_dims[-1] // block_size[2]
assert sparsity_mask.shape == (totalGroupDims * num_rows_sparsity_mask_2d * num_cols_sparsity_mask_2d,)
mmOutput = mmOutput.reshape((totalGroupDims, lhs_dims[-2], rhs_dims[-1]))
goldOutput = []
for dim in range(totalGroupDims):
offset = num_rows_sparsity_mask_2d * num_cols_sparsity_mask_2d
mmOutput_2d = mmOutput[dim]
sliced_sparsity_mask = sparsity_mask[dim * offset: dim * offset + offset]
for sparsity_mask_idx in range(len(sliced_sparsity_mask)):
if sliced_sparsity_mask[sparsity_mask_idx]:
mmOutput_2d_row_start = (sparsity_mask_idx // num_cols_sparsity_mask_2d) * block_size[0]
mmOutput_2d_row_end = mmOutput_2d_row_start + block_size[0]
mmOutput_2d_col_start = (sparsity_mask_idx % num_cols_sparsity_mask_2d) * block_size[2]
mmOutput_2d_col_end = mmOutput_2d_col_start + block_size[2]
mmOutput_2d_sliced = mmOutput_2d[mmOutput_2d_row_start: mmOutput_2d_row_end, mmOutput_2d_col_start: mmOutput_2d_col_end]
goldOutput.append(mmOutput_2d_sliced.reshape(block_size[0] * block_size[2]))
goldOutput = np.array(goldOutput)
return ipuOutput, goldOutput
def sparse_mm_train(sparse_mm_type, lhs_dims, vanilla_rhs_dims, block_size, sparsity_level, transpose_rhs, memory_cycle_ratio, inner_group_size):
if transpose_rhs:
matmul_dims = [lhs_dims[-2], vanilla_rhs_dims[-1], vanilla_rhs_dims[-2]]
else:
matmul_dims = [lhs_dims[-2], vanilla_rhs_dims[-2], vanilla_rhs_dims[-1]]
lhs = create_dense_matrix(lhs_dims)
if sparse_mm_type == g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT']:
bsr_rhs, lengths_per_2d_plane, vanilla_rhs, sparsity_mask = create_sparse_matrix(vanilla_rhs_dims, block_size[1:], sparsity_level)
rhs = bsr_rhs
rhs_dims = bsr_rhs.shape
elif sparse_mm_type == g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT']:
output_dims = lhs_dims[:-1]
output_dims.append(vanilla_rhs_dims[-1])
output_block_size = [block_size[0], block_size[2]]
bsr_output, lengths_per_2d_plane, vanilla_output, sparsity_mask = create_sparse_matrix(output_dims, output_block_size, sparsity_level)
lhs_inv = np.linalg.inv(lhs)
rhs = np.matmul(lhs_inv, vanilla_output)
rhs_dims = vanilla_rhs_dims
# MODEL CREATION
builder = popart.Builder()
lhs_tensorInfo = popart.TensorInfo("FLOAT", lhs_dims)
lhsTensor = builder.addInputTensor(lhs_tensorInfo)
rhsTensor = builder.addInitializedInputTensor(rhs)
outTensor = builder.customOp(opName = "BSMatMul",
opVersion=1,
domain = "ai.graphcore",
inputs = [lhsTensor, rhsTensor],
attributes = {
"bsr_rhs_lengths_per_2d_plane": lengths_per_2d_plane.tolist(),
"matrix_dims": matmul_dims,
"block_size": block_size,
"sparsity_mask": sparsity_mask.tolist(),
"bsmatmul_type": sparse_mm_type,
"transpose_rhs": transpose_rhs,
"memory_cycle_ratio": memory_cycle_ratio,
"inner_group_size": inner_group_size,
"in_type": g_input_data_type,
"out_type": g_output_data_type,
"pp_type": g_pp_data_type
})[0]
builder.addOutputTensor(outTensor)
probs = builder.aiOnnx.softmax([outTensor], axis=1)
if sparse_mm_type == g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT']:
labels_shape = lhs_dims[:-1]
elif sparse_mm_type == g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT']:
labels_shape = [np.sum(sparsity_mask)]
label_tensorInfo = popart.TensorInfo("INT32", labels_shape)
labelTensor = builder.addInputTensor(label_tensorInfo)
loss = builder.aiGraphcore.nllloss([probs, labelTensor], debugContext = "nllLossVal")
proto = builder.getModelProto()
#######################
# Describe how to run the model
anchor_desc = {
outTensor: popart.AnchorReturnType("ALL"),
loss: popart.AnchorReturnType("ALL")
}
dataFlow = popart.DataFlow(1, anchor_desc)
label_data = g_random_labels.choice(9, labels_shape)
session = popart.TrainingSession(fnModel=proto,
loss=loss,
deviceInfo=popart.DeviceManager().acquireAvailableDevice(1),
optimizer=popart.ConstSGD(0.01),
dataFlow=dataFlow)
# Compile graph
session.prepareDevice()
# Create buffers to receive results from the execution
anchors = session.initAnchorArrays()
# TRAINING
session.weightsFromHost()
stepio = popart.PyStepIO({
lhsTensor: lhs,
labelTensor: label_data}, anchors)
session.run(stepio)
def sparse_softmax(dims, block_size, sparsity_level, inner_group_size):
""" """
sparse_input, lengths_per_2d_plane, dense_input, sparsity_mask = create_sparse_matrix(dims, block_size, sparsity_level, -1000)
# Create a builder and construct a graph
builder = popart.Builder()
tensor_info = popart.TensorInfo("FLOAT", sparse_input.shape)
input_tensor = builder.addInputTensor(tensor_info)
output_tensor = builder.customOp(opName = "BsSoftmax",
opVersion = 1,
domain = "ai.graphcore",
inputs = [input_tensor],
attributes = {
"matrixDims": dims,
"blockSize": block_size,
"sparsity": sparsity_mask.tolist(),
"groupSizes": lengths_per_2d_plane.tolist(),
"innerGroupSize": inner_group_size,
"subBlockMaskPerGroup": "None" * len(lengths_per_2d_plane)
})[0]
builder.addOutputTensor(output_tensor)
proto = builder.getModelProto()
# Describe how to run the model
dataFlow = popart.DataFlow(1, {output_tensor: popart.AnchorReturnType("ALL")})
# Create a session to compile and execute the graph
session = popart.InferenceSession(
fnModel=proto,
dataFlow=dataFlow,
deviceInfo=popart.DeviceManager().acquireAvailableDevice(1))
# Compile graph
session.prepareDevice()
# Create buffers to receive results from the execution
anchors = session.initAnchorArrays()
sparse_input = np.array(sparse_input, dtype=g_input_data_type)
stepio = popart.PyStepIO({input_tensor: sparse_input}, anchors)
session.run(stepio)
ipu_output = anchors[output_tensor]
group_dims = dims[:-2]
mat_dims = dims[-2:]
blocks_2d = [mat_dims[0] // block_size[0], mat_dims[1] // block_size[1]]
num_blocks_2d = blocks_2d[0] * blocks_2d[1]
block_area = block_size[0] * block_size[1]
total_group_dims = int(np.prod(group_dims))
assert sparsity_mask.shape == (total_group_dims * num_blocks_2d,)
cpu_output = softmax(dense_input)
np.set_printoptions(precision=2)
np.set_printoptions(suppress=True)
cpu_output = cpu_output.reshape([total_group_dims, blocks_2d[0], block_size[0], blocks_2d[1], block_size[1]])
cpu_output = np.transpose(cpu_output, [0, 1, 3, 2, 4])
cpu_output = cpu_output.reshape(total_group_dims, num_blocks_2d, block_area)
gold_output = []
offset = 0
for g in range(total_group_dims):
cpu_output_2d = cpu_output[g]
sliced_sparsity_mask = sparsity_mask[offset: offset + num_blocks_2d]
offset = offset + num_blocks_2d
for sparsity_mask_idx in range(num_blocks_2d):
if sliced_sparsity_mask[sparsity_mask_idx]:
gold_output.append(cpu_output_2d[sparsity_mask_idx])
gold_output = np.array(gold_output)
assert ipu_output.shape == gold_output.shape
return ipu_output, gold_output
#
# INFERENCE TEST
#
# test_data_infer tuple --> (matMulType, lhs_dims, rhs_dims, block_size, sparsity, transpose_rhs, inner_group_size)
test_data_infer = [
# 2D
("tag_inf_0", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [8, 8], [8, 8], [8, 8, 8], 0.5, False, 1),
("tag_inf_1", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [16, 16], [16, 16], [8, 8, 8], 0.1, False, 1),
("tag_inf_2", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [32, 32], [32, 32], [16, 8, 8], 0.8, False, 1),
("tag_inf_3", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [64, 64], [64, 256], [64, 8, 64], 0.9, False, 1),
("tag_inf_4", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [128, 128], [128, 128], [32, 8, 16], 0.2, False, 1),
# 3D, False
("tag_inf_5", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [2, 8, 8], [2, 8, 8], [8, 8, 8], 0.1, False, 1),
("tag_inf_6", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [5, 16, 16], [5, 16, 16], [8, 8, 8], 0.3, False, 1),
("tag_inf_7", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [7, 32, 32], [7, 32, 32], [16, 8, 8], 0.5, False, 1),
("tag_inf_8", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [11, 64, 64], [11, 64, 64], [64, 8, 64], 0.6, False, 1),
("tag_inf_9", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [12, 128, 128], [12, 128, 128], [32, 8, 16], 0.8, False, 1),
# 4D, False
("tag_inf_10", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [1, 1, 8, 8], [1, 1, 8, 8], [8, 8, 8], 0.5, False, 1),
("tag_inf_11", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [1, 1, 16, 16], [1, 1, 16, 16], [8, 8, 8], 0.8, False, 1),
("tag_inf_12", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [1, 1, 32, 32], [1, 1, 32, 32], [16, 8, 8], 0.5, False, 1),
("tag_inf_13", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [1, 1, 64, 128], [1, 1, 128, 256], [64, 8, 64], 0.5, False, 1),
("tag_inf_14", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [1, 1, 128, 64], [1, 1, 64, 128], [32, 8, 16], 0.5, False, 1),
("tag_inf_14", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [1, 2, 8, 8], [1, 2, 8, 8], [8, 8, 8], 0.5, False, 1),
("tag_inf_16", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [1, 5, 16, 16], [1, 5, 16, 16], [8, 8, 8], 0.8, False, 1),
("tag_inf_17", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [1, 7, 32, 32], [1, 7, 32, 32], [16, 8, 8], 0.5, False, 1),
("tag_inf_18", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [1, 5, 64, 128], [1, 5, 128, 256], [64, 8, 64], 0.5, False, 1),
("tag_inf_19", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [1, 12, 128, 64], [1, 12, 64, 128], [32, 8, 16], 0.5, False, 1),
("tag_inf_20", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [2, 2, 8, 8], [2, 2, 8, 8], [8, 8, 8], 0.5, False, 1),
("tag_inf_21", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [5, 5, 16, 16], [5, 5, 16, 16], [8, 8, 8], 0.8, False, 1),
("tag_inf_22", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [13, 7, 32, 32], [13, 7, 32, 32], [16, 8, 8], 0.5, False, 1),
("tag_inf_24", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [1, 12, 128, 64], [1, 12, 64, 128], [32, 8, 16], 0.5, False, 1),
# 2D, lhs has to be square to take inverse, False
("tag_inf_25", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [8, 8], [8, 8], [8, 8, 8], 0.5, False, 1),
("tag_inf_26", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [16, 16], [16, 16], [8, 8, 8], 0.1, False, 1),
("tag_inf_27", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [32, 32], [32, 32], [16, 8, 8], 0.8, False, 1),
("tag_inf_28", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [64, 64], [64, 64], [64, 8, 64], 0.9, False, 1),
("tag_inf_29", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [128, 128], [128, 128], [32, 8, 16], 0.7, False, 1),
# 3D, lhs has to be square to take, False
("tag_inf_30", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [2, 8, 8], [2, 8, 8], [8, 8, 8], 0.1, False, 1),
("tag_inf_31", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [5, 16, 16], [5, 16, 16], [8, 8, 8], 0.3, False, 1),
("tag_inf_32", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [7, 32, 32], [7, 32, 32], [16, 8, 8], 0.5, False, 1),
("tag_inf_33", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [11, 64, 64], [11, 64, 64], [64, 8, 64], 0.6, False, 1),
("tag_inf_34", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [12, 128, 128], [12, 128, 128], [32, 8, 16], 0.1, False, 1),
# 4D, lhs has to be square to take, False
("tag_inf_36", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [1, 1, 8, 8], [1, 1, 8, 8], [8, 8, 8], 0.5, False, 1),
("tag_inf_36", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [1, 1, 16, 16], [1, 1, 16, 16], [8, 8, 8], 0.8, False, 1),
("tag_inf_37", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [1, 1, 32, 32], [1, 1, 32, 32], [16, 8, 8], 0.5, False, 1),
("tag_inf_38", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [1, 1, 64, 64], [1, 1, 64, 256], [64, 8, 64], 0.5, False, 1),
("tag_inf_39", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [1, 1, 128, 128], [1, 1, 128, 128], [32, 8, 16], 0.5, False, 1),
("tag_inf_40", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [1, 2, 8, 8], [1, 2, 8, 8], [8, 8, 8], 0.5, False, 1),
("tag_inf_41", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [1, 5, 16, 16], [1, 5, 16, 16], [8, 8, 8], 0.8, False, 1),
("tag_inf_42", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [1, 7, 32, 32], [1, 7, 32, 32], [16, 8, 8], 0.5, False, 1),
("tag_inf_43", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [1, 11, 64, 64], [1, 11, 64, 256], [64, 8, 64], 0.5, False, 1),
("tag_inf_44", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [1, 12, 128, 128], [1, 12, 128, 128], [32, 8, 16], 0.5, False, 1),
("tag_inf_45", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [2, 2, 8, 8], [2, 2, 8, 8], [8, 8, 8], 0.5, False, 1),
("tag_inf_46", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [5, 5, 16, 16], [5, 5, 16, 16], [8, 8, 8], 0.8, False, 1),
("tag_inf_47", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [13, 7, 32, 32], [13, 7, 32, 32], [16, 8, 8], 0.5, False, 1),
("tag_inf_49", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [1, 12, 128, 128], [1, 12, 128, 1024], [32, 8, 16], 0.5, False, 1),
# For transpose_rhs True case, last 2 dimensions of block_size must be 8
("tag_inf_50", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [8, 8], [8, 8], [8, 8, 8], 0.5, True, 1),
("tag_inf_51", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [16, 16], [16, 16], [8, 8, 8], 0.1, True, 1),
("tag_inf_52", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [32, 32], [32, 32], [16, 8, 8], 0.8, True, 1),
("tag_inf_53", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [64, 64], [256, 64], [64, 8, 8], 0.9, True, 1),
("tag_inf_54", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [128, 128], [128, 128], [32, 8, 8], 0.2, True, 1),
("tag_inf_55", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [2, 8, 8], [2, 8, 8], [8, 8, 8], 0.5, True, 1),
("tag_inf_56", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [3, 16, 16], [3, 16, 16], [8, 8, 8], 0.1, True, 1),
("tag_inf_57", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [7, 128, 128], [7, 128, 128], [32, 8, 8], 0.2, True, 1),
("tag_inf_58", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [3, 5, 8, 8], [3, 5, 8, 8], [8, 8, 8], 0.5, True, 1),
("tag_inf_59", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [1, 6, 16, 16], [1, 6, 16, 16], [8, 8, 8], 0.1, True, 1),
("tag_inf_60", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [4, 4, 32, 32], [4, 4, 32, 32], [16, 8, 8], 0.8, True, 1),
# 3D, inner group size > 1
("tag_inf_61", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [12, 128, 128], [12, 128, 128], [32, 8, 16], 0.8, False, 3),
("tag_inf_62", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [12, 128, 128], [12, 128, 128], [32, 8, 16], 0.1, False, 4),
# 4D, inner group size > 1
("tag_inf_23", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [4, 8, 64, 128], [4, 8, 128, 256], [64, 8, 64], 0.5, False, 4),
("tag_inf_48", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [3, 11, 64, 64], [3, 11, 64, 256], [64, 8, 64], 0.5, False, 3),
]
@pytest.mark.parametrize("tag, matmul_type, lhs_dims, rhs_dims, block_size, sparsity_level, transpose_rhs, inner_group_size", test_data_infer)
def test_bsmatmul_infer(custom_ops, tag, matmul_type, lhs_dims, rhs_dims, block_size, sparsity_level, transpose_rhs, inner_group_size):
print("Running test_bsmatmul_infer() with tag: {}, matmul_type:{}, lhs_dims:{}, rhs_dims:{}, block_size:{}, sparsity_level:{}, transpose_rhs:{}, inner_group_size {}"
.format(tag, "DENSE_LHS_SPARSE_RHS_DENSE_OUT" if matmul_type == g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'] else "DENSE_LHS_DENSE_RHS_SPARSE_OUT",
lhs_dims, rhs_dims, block_size, sparsity_level, transpose_rhs, inner_group_size))
memory_cycle_ratio = 1.0
ipuOutput, goldOutput = sparse_mm_infer(matmul_type,
lhs_dims,
rhs_dims,
block_size,
sparsity_level,
transpose_rhs,
memory_cycle_ratio,
inner_group_size)
rtol = 1e-3
atol = 1e-3
np.testing.assert_allclose(ipuOutput, goldOutput, rtol=rtol, atol=atol)
#
# TRAINING TEST
#
# test_data_train tuple --> (matMulType, lhs_dims, rhs_dims, block_size, sparsity, transpose_rhs, inner_group_size)
test_data_train = [
# 2D
("tag_tr_0", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [8, 8], [8, 8], [8, 8, 8], 0.5, False, 1),
("tag_tr_1", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [16, 16], [16, 16], [8, 8, 8], 0.1, False, 1),
("tag_tr_2", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [32, 32], [32, 32], [8, 8, 8], 0.8, False, 1),
("tag_tr_3", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [64, 64], [64, 256], [8, 8, 8], 0.9, False, 1),
("tag_tr_4", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [128, 128], [128, 128], [8, 8, 8], 0.2, False, 1),
# 3D,
("tag_tr_5", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [2, 16, 16], [2, 16, 16], [8, 8, 8], 0.1, False, 1),
("tag_tr_6", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [5, 16, 16], [5, 16, 16], [8, 8, 8], 0.3, False, 1),
("tag_tr_7", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [7, 32, 32], [7, 32, 32], [8, 8, 8], 0.5, False, 1),
("tag_tr_8", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [11, 64, 64], [11, 64, 64], [8, 8, 8], 0.6, False, 1),
("tag_tr_9", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [12, 128, 128], [12, 128, 128], [8, 8, 8], 0.8, False, 1),
# 4D,
("tag_tr_10", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [1, 1, 8, 8], [1, 1, 8, 8], [8, 8, 8], 0.5, False, 1),
("tag_tr_11", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [1, 1, 16, 16], [1, 1, 16, 16], [8, 8, 8], 0.8, False, 1),
("tag_tr_12", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [1, 1, 32, 32], [1, 1, 32, 32], [8, 8, 8], 0.5, False, 1),
("tag_tr_13", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [1, 1, 64, 128], [1, 1, 128, 256], [8, 8, 8], 0.5, False, 1),
("tag_tr_14", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [1, 1, 128, 64], [1, 1, 64, 128], [8, 8, 8], 0.5, False, 1),
("tag_tr_15", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [1, 2, 8, 8], [1, 2, 8, 8], [8, 8, 8], 0.5, False, 1),
("tag_tr_16", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [1, 5, 16, 16], [1, 5, 16, 16], [8, 8, 8], 0.8, False, 1),
("tag_tr_17", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [1, 7, 32, 32], [1, 7, 32, 32], [8, 8, 8], 0.5, False, 1),
("tag_tr_18", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [1, 11, 64, 128], [1, 11, 128, 256], [8, 8, 8], 0.5, False, 1),
("tag_tr_19", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [1, 12, 128, 64], [1, 12, 64, 128], [8, 8, 8], 0.5, False, 1),
("tag_tr_20", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [2, 2, 8, 8], [2, 2, 8, 8], [8, 8, 8], 0.5, False, 1),
("tag_tr_21", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [5, 5, 16, 16], [5, 5, 16, 16], [8, 8, 8], 0.8, False, 1),
("tag_tr_22", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [13, 7, 32, 32], [13, 7, 32, 32], [8, 8, 8], 0.5, False, 1),
("tag_tr_23", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [3, 11, 64, 128], [3, 11, 128, 256], [8, 8, 8], 0.5, False, 1),
("tag_tr_24", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [1, 12, 128, 64], [1, 12, 64, 128], [8, 8, 8], 0.5, False, 1),
# 2D, lhs has to be square to take inverse
("tag_tr_25", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [8, 8], [8, 8], [8, 8, 8], 0.5, False, 1),
("tag_tr_26", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [16, 16], [16, 16], [8, 8, 8], 0.1, False, 1),
("tag_tr_27", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [32, 32], [32, 32], [8, 8, 8], 0.8, False, 1),
("tag_tr_28", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [64, 64], [64, 256], [8, 8, 8], 0.9, False, 1),
("tag_tr_29", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [128, 128], [128, 128], [8, 8, 8], 0.2, False, 1),
# 3D, lhs has to be square to take
("tag_tr_30", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [2, 8, 8], [2, 8, 8], [8, 8, 8], 0.1, False, 1),
("tag_tr_31", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [5, 16, 16], [5, 16, 16], [8, 8, 8], 0.3, False, 1),
("tag_tr_32", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [7, 32, 32], [7, 32, 32], [8, 8, 8], 0.5, False, 1),
("tag_tr_33", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [11, 64, 64], [11, 64, 64], [8, 8, 8], 0.6, False, 1),
("tag_tr_34", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [12, 128, 128], [12, 128, 128], [8, 8, 8], 0.3, False, 1),
# 4D, lhs has to be square to take
("tag_tr_35", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [1, 1, 8, 8], [1, 1, 8, 8], [8, 8, 8], 0.5, False, 1),
("tag_tr_36", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [1, 1, 16, 16], [1, 1, 16, 16], [8, 8, 8], 0.8, False, 1),
("tag_tr_37", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [1, 1, 32, 32], [1, 1, 32, 32], [8, 8, 8], 0.5, False, 1),
("tag_tr_38", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [1, 1, 64, 64], [1, 1, 64, 256], [8, 8, 8], 0.5, False, 1),
("tag_tr_39", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [1, 1, 128, 128], [1, 1, 128, 128], [8, 8, 8], 0.5, False, 1),
("tag_tr_40", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [1, 2, 8, 8], [1, 2, 8, 8], [8, 8, 8], 0.5, False, 1),
("tag_tr_41", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [1, 5, 16, 16], [1, 5, 16, 16], [8, 8, 8], 0.8, False, 1),
("tag_tr_42", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [1, 7, 32, 32], [1, 7, 32, 32], [8, 8, 8], 0.5, False, 1),
("tag_tr_43", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [1, 11, 64, 64], [1, 11, 64, 256], [8, 8, 8], 0.5, False, 1),
("tag_tr_44", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [1, 12, 128, 128], [1, 12, 128, 128], [8, 8, 8], 0.5, False, 1),
("tag_tr_45", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [2, 2, 8, 8], [2, 2, 8, 8], [8, 8, 8], 0.5, False, 1),
("tag_tr_46", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [5, 5, 16, 16], [5, 5, 16, 16], [8, 8, 8], 0.8, False, 1),
("tag_tr_47", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [13, 7, 32, 32], [13, 7, 32, 32], [8, 8, 8], 0.5, False, 1),
("tag_tr_48", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [3, 11, 64, 64], [3, 11, 64, 256], [8, 8, 8], 0.5, False, 1),
("tag_tr_49", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [1, 12, 128, 128], [1, 12, 128, 1024], [8, 8, 8], 0.5, False, 1),
# For transpose_rhs True case, last 2 dimensions of block_size must be 8
("tag_tr_50", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [8, 8], [8, 8], [8, 8, 8], 0.5, True, 1),
("tag_tr_51", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [16, 16], [16, 16], [8, 8, 8], 0.1, True, 1),
("tag_tr_52", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [32, 32], [32, 32], [8, 8, 8], 0.8, True, 1),
("tag_tr_53", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [64, 64], [256, 64], [8, 8, 8], 0.9, True, 1),
("tag_tr_54", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [128, 128], [128, 128], [8, 8, 8], 0.2, True, 1),
("tag_tr_55", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [2, 8, 8], [2, 8, 8], [8, 8, 8], 0.5, True, 1),
("tag_tr_56", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [3, 16, 16], [3, 16, 16], [8, 8, 8], 0.1, True, 1),
("tag_tr_57", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [7, 128, 128], [7, 128, 128], [8, 8, 8], 0.2, True, 1),
("tag_tr_58", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [3, 5, 8, 8], [3, 5, 8, 8], [8, 8, 8], 0.5, True, 1),
("tag_tr_59", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [1, 6, 16, 16], [1, 6, 16, 16], [8, 8, 8], 0.1, True, 1),
("tag_tr_60", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [4, 4, 32, 32], [4, 4, 32, 32], [8, 8, 8], 0.8, True, 1),
# 3D, inner group size > 1
("tag_tr_61", g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'], [12, 128, 128], [12, 128, 128], [8, 8, 8], 0.8, False, 3),
("tag_tr_62", g_sparseMatMulTypeLookup['DENSE_LHS_DENSE_RHS_SPARSE_OUT'], [12, 128, 128], [12, 128, 128], [8, 8, 8], 0.1, False, 4),
]
@pytest.mark.parametrize("tag, matmul_type, lhs_dims, rhs_dims, block_size, sparsity_level, transpose_rhs, inner_group_size", test_data_train)
def test_bsmatmul_train(custom_ops, tag, matmul_type, lhs_dims, rhs_dims, block_size, sparsity_level, transpose_rhs, inner_group_size):
print("Running test_bsmatmul_train() with tag: {}, matmul_type:{}, lhs_dims:{}, rhs_dims:{}, block_size:{}, sparsity_level:{}, transpose_rhs:{}, inner_group_size {}"
.format(tag, "DENSE_LHS_SPARSE_RHS_DENSE_OUT" if matmul_type == g_sparseMatMulTypeLookup['DENSE_LHS_SPARSE_RHS_DENSE_OUT'] else "DENSE_LHS_DENSE_RHS_SPARSE_OUT",
lhs_dims, rhs_dims, block_size, sparsity_level, transpose_rhs, inner_group_size))
memory_cycle_ratio = 1.0
sparse_mm_train(matmul_type,
lhs_dims,
rhs_dims,
block_size,
sparsity_level,
transpose_rhs,
memory_cycle_ratio,
inner_group_size)
# test_data_softmax tuple --> (dims, block_size, sparsity, inner_group_size)
test_data_softmax = [
# 2D
("tag_sm_0", [8, 8], [8, 8], 0.0, 1),
# 3D
("tag_sm_1", [16, 16], [8, 8], 0.4, 1),
# 4D
("tag_sm_2", [2, 2, 16, 16], [8, 8], 0.3, 1),
# 5D, inner group size = 1
("tag_sm_3", [2, 3, 2, 16, 16], [8, 8], 0.1, 1),
# 5D, inner group size > 1
("tag_sm_4", [2, 3, 2, 16, 16], [8, 8], 0.1, 0),
("tag_sm_5", [2, 3, 2, 16, 16], [8, 8], 0.1, 6),
]
@pytest.mark.parametrize("tag, dims, block_size, sparsity_level, inner_group_size", test_data_softmax)
def test_bs_softmax(custom_ops, tag, dims, block_size, sparsity_level, inner_group_size):
print("Running test_bs_softmax() with tag: {}, dims:{}, block_size:{}, sparsity_level:{}, inner_group_size {}"
.format(tag, dims, block_size, sparsity_level, inner_group_size))
ipu_output, gold_output = sparse_softmax(dims,
block_size,
sparsity_level,
inner_group_size)
np.testing.assert_allclose(ipu_output, gold_output, rtol=1e-2, atol=1e-2)
if __name__ == "__main__":
ipu_output, gold_output = sparse_softmax([2, 2, 16, 16],
[8, 8],
0.3,
1)
np.testing.assert_allclose(ipu_output, gold_output, rtol=1e-2, atol=1e-2)
|
tests/test_clean_api.py
|
bsekiewicz/dateparser
| 1,804 |
105357
|
from datetime import date, datetime
from pytz import utc
from parameterized import parameterized, param
import dateparser
from tests import BaseTestCase
class TestParseFunction(BaseTestCase):
def setUp(self):
super().setUp()
self.result = NotImplemented
@parameterized.expand([
param(date_string="24 de Janeiro de 2014", expected_date=date(2014, 1, 24)),
param(date_string="2 de Enero de 2013", expected_date=date(2013, 1, 2)),
param(date_string="January 25, 2014", expected_date=date(2014, 1, 25)),
])
def test_parse_dates_in_different_languages(self, date_string, expected_date):
self.when_date_is_parsed_with_defaults(date_string)
self.then_parsed_date_is(expected_date)
@parameterized.expand([
param(date_string="May 5, 2000 13:00",
expected_date=datetime(2000, 5, 5, 13, 0)),
param(date_string="August 8, 2018 5 PM",
expected_date=datetime(2018, 8, 8, 17, 0)),
param(date_string="February 26, 1981 5 am UTC",
expected_date=datetime(1981, 2, 26, 5, 0, tzinfo=utc)),
])
def test_parse_dates_with_specific_time(self, date_string, expected_date):
self.when_date_is_parsed_with_defaults(date_string)
self.then_parsed_date_and_time_is(expected_date)
@parameterized.expand([
param(date_string="May 5, 2000 13:00",
expected_date=datetime(2000, 5, 5, 13, 0),
relative=datetime(2000, 1, 1, 0, 0, tzinfo=utc)),
param(date_string="August 8, 2018 5 PM",
expected_date=datetime(2018, 8, 8, 17, 0),
relative=datetime(1900, 5, 5, 0, 0, tzinfo=utc)),
param(date_string="February 26, 1981 5 am UTC",
expected_date=datetime(1981, 2, 26, 5, 0, tzinfo=utc),
relative=datetime(1981, 2, 26, 5, 0, tzinfo=utc)),
])
def test_parse_dates_with_specific_time_and_settings(self, date_string, expected_date, relative):
self.when_date_is_parsed_with_settings(date_string, settings={'RELATIVE_BASE': relative})
self.then_parsed_date_and_time_is(expected_date)
@parameterized.expand([
param(date_string="24 de Janeiro de 2014", languages=['pt'], expected_date=date(2014, 1, 24)),
])
def test_dates_which_match_languages_are_parsed(self, date_string, languages, expected_date):
self.when_date_is_parsed(date_string, languages=languages)
self.then_parsed_date_is(expected_date)
@parameterized.expand([
param(date_string="January 24, 2014", languages=['pt']),
])
def test_dates_which_do_not_match_languages_are_not_parsed(self, date_string, languages):
self.when_date_is_parsed(date_string, languages=languages)
self.then_date_was_not_parsed()
@parameterized.expand([
param(date_string="24 de Janeiro de 2014", locales=['pt-TL'], expected_date=date(2014, 1, 24)),
])
def test_dates_which_match_locales_are_parsed(self, date_string, locales, expected_date):
self.when_date_is_parsed(date_string, locales=locales)
self.then_parsed_date_is(expected_date)
@parameterized.expand([
param(date_string="January 24, 2014", locales=['pt-AO']),
])
def test_dates_which_do_not_match_locales_are_not_parsed(self, date_string, locales):
self.when_date_is_parsed(date_string, locales=locales)
self.then_date_was_not_parsed()
def when_date_is_parsed_with_defaults(self, date_string):
self.result = dateparser.parse(date_string)
def when_date_is_parsed(self, date_string, languages=None, locales=None):
self.result = dateparser.parse(date_string, languages=languages, locales=locales)
def when_date_is_parsed_with_settings(self, date_string, settings=None):
self.result = dateparser.parse(date_string, settings=settings)
def then_parsed_date_is(self, expected_date):
self.assertEqual(self.result, datetime.combine(expected_date, datetime.min.time()))
def then_parsed_date_and_time_is(self, expected_date):
self.assertEqual(self.result, expected_date)
def then_date_was_not_parsed(self):
self.assertIsNone(self.result)
|
vdb/extensions/amd64.py
|
rnui2k/vivisect
| 716 |
105363
|
<reponame>rnui2k/vivisect
import vdb.extensions.i386 as v_ext_i386
import vdb.extensions.i386 as vdb_ext_i386
def vdbExtension(vdb, trace):
vdb.addCmdAlias('db','mem -F bytes')
vdb.addCmdAlias('dw','mem -F u_int_16')
vdb.addCmdAlias('dd','mem -F u_int_32')
vdb.addCmdAlias('dq','mem -F u_int_64')
vdb.addCmdAlias('dr','mem -F "Deref View"')
vdb.addCmdAlias('ds','mem -F "Symbols View"')
vdb.registerCmdExtension(vdb_ext_i386.eflags,'amd64')
|
sionna/channel/tr38901/lsp.py
|
NVlabs/sionna
| 163 |
105441
|
<gh_stars>100-1000
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""
Class for sampling large scale parameters (LSPs) and pathloss following the
3GPP TR38.901 specifications and according to a channel simulation scenario.
"""
import tensorflow as tf
from sionna.utils import log10
from sionna.utils import matrix_sqrt
class LSP:
r"""
Class for conveniently storing LSPs
Parameters
-----------
ds : [batch size, num tx, num rx], tf.float
RMS delay spread [s]
asd : [batch size, num tx, num rx], tf.float
azimuth angle spread of departure [deg]
asa : [batch size, num tx, num rx], tf.float
azimuth angle spread of arrival [deg]
sf : [batch size, num tx, num rx], tf.float
shadow fading
k_factor : [batch size, num tx, num rx], tf.float
Rician K-factor. Only used for LoS.
zsa : [batch size, num tx, num rx], tf.float
Zenith angle spread of arrival [deg]
zsd: [batch size, num tx, num rx], tf.float
Zenith angle spread of departure [deg]
"""
def __init__(self, ds, asd, asa, sf, k_factor, zsa, zsd):
self.ds = ds
self.asd = asd
self.asa = asa
self.sf = sf
self.k_factor = k_factor
self.zsa = zsa
self.zsd = zsd
class LSPGenerator:
"""
Sample large scale parameters (LSP) and pathloss given a channel scenario,
e.g., UMa, UMi, RMa.
This class implements steps 1 to 4 of the TR 38.901 specifications
(section 7.5), as well as path-loss generation (Section 7.4.1) with O2I
low- and high- loss models (Section 7.4.3).
Note that a global scenario is set for the entire batches when instantiating
this class (UMa, UMi, or RMa). However, each UT-BS link can have its
specific state (LoS, NLoS, or indoor).
The batch size is set by the ``scenario`` given as argument when
constructing the class.
Parameters
----------
scenario : :class:`~sionna.channel.tr38901.SystemLevelScenario``
Scenario used to generate LSPs
Input
-----
None
Output
------
An `LSP` instance storing realization of LSPs.
"""
def __init__(self, scenario):
self._scenario = scenario
def sample_pathloss(self):
"""
Generate pathlosses [dB] for each BS-UT link.
Input
------
None
Output
-------
A tensor with shape [batch size, number of BSs, number of UTs] of
pathloss [dB] for each BS-UT link
"""
# Pre-computed basic pathloss
pl_b = self._scenario.basic_pathloss
## O2I penetration
if self._scenario.o2i_model == 'low':
pl_o2i = self._o2i_low_loss()
elif self._scenario.o2i_model == 'high':
pl_o2i = self._o2i_high_loss()
## Total path loss, including O2I penetration
pl = pl_b + pl_o2i
return pl
def __call__(self):
# LSPs are assumed to follow a log-normal distribution.
# They are generated in the log-domain (where they follow a normal
# distribution), where they are correlated as indicated in TR38901
# specification (Section 7.5, step 4)
s = tf.random.normal(shape=[self._scenario.batch_size,
self._scenario.num_bs, self._scenario.num_ut, 7],
dtype=self._scenario.dtype.real_dtype)
## Applyting cross-LSP correlation
s = tf.expand_dims(s, axis=4)
s = self._cross_lsp_correlation_matrix_sqrt@s
s = tf.squeeze(s, axis=4)
## Applying spatial correlation
s = tf.expand_dims(tf.transpose(s, [0, 1, 3, 2]), axis=3)
s = tf.matmul(s, self._spatial_lsp_correlation_matrix_sqrt,
transpose_b=True)
s = tf.transpose(tf.squeeze(s, axis=3), [0, 1, 3, 2])
## Scaling and transposing LSPs to the right mean and variance
lsp_log_mean = self._scenario.lsp_log_mean
lsp_log_std = self._scenario.lsp_log_std
lsp_log = lsp_log_std*s + lsp_log_mean
## Mapping to linear domain
lsp = tf.math.pow(tf.constant(10., self._scenario.dtype.real_dtype),
lsp_log)
# Limit the RMS azimuth arrival (ASA) and azimuth departure (ASD)
# spread values to 104 degrees
# Limit the RMS zenith arrival (ZSA) and zenith departure (ZSD)
# spread values to 52 degrees
lsp = LSP( ds = lsp[:,:,:,0],
asd = tf.math.minimum(lsp[:,:,:,1], 104.0),
asa = tf.math.minimum(lsp[:,:,:,2], 104.0),
sf = lsp[:,:,:,3],
k_factor = lsp[:,:,:,4],
zsa = tf.math.minimum(lsp[:,:,:,5], 52.0),
zsd = tf.math.minimum(lsp[:,:,:,6], 52.0)
)
return lsp
def topology_updated_callback(self):
"""
Updates internal quantities. Must be called at every update of the
scenario that changes the state of UTs or their locations.
Input
------
None
Output
------
None
"""
# Pre-computing these quantities avoid unnecessary calculations at every
# generation of new LSPs
# Compute cross-LSP correlation matrix
self._compute_cross_lsp_correlation_matrix()
# Compute LSP spatial correlation matrix
self._compute_lsp_spatial_correlation_sqrt()
########################################
# Internal utility methods
########################################
def _compute_cross_lsp_correlation_matrix(self):
"""
Compute and store as attribute the square-root of the cross-LSPs
correlation matrices for each BS-UT link, and then the corresponding
matrix square root for filtering.
The resulting tensor is of shape
[batch size, number of BSs, number of UTs, 7, 7)
7 being the number of LSPs to correlate.
Input
------
None
Output
-------
None
"""
# The following 7 LSPs are correlated:
# DS, ASA, ASD, SF, K, ZSA, ZSD
# We create the correlation matrix initialized to the identity matrix
cross_lsp_corr_mat = tf.eye(7, 7,batch_shape=[self._scenario.batch_size,
self._scenario.num_bs, self._scenario.num_ut],
dtype=self._scenario.dtype.real_dtype)
# Tensors of bool indicating the state of UT-BS links
# Indoor
indoor_bool = tf.tile(tf.expand_dims(self._scenario.indoor, axis=1),
[1, self._scenario.num_bs, 1])
# LoS
los_bool = self._scenario.los
# NLoS (outdoor)
nlos_bool = tf.logical_and(tf.logical_not(self._scenario.los),
tf.logical_not(indoor_bool))
# Expand to allow broadcasting with the BS dimension
indoor_bool = tf.expand_dims(tf.expand_dims(indoor_bool, axis=3),axis=4)
los_bool = tf.expand_dims(tf.expand_dims(los_bool, axis=3),axis=4)
nlos_bool = tf.expand_dims(tf.expand_dims(nlos_bool, axis=3),axis=4)
# Internal function that adds to the correlation matrix ``mat``
# ``cross_lsp_corr_mat`` the parameter ``parameter_name`` at location
# (m,n)
def _add_param(mat, parameter_name, m, n):
# Mask to put the parameters in the right spot of the 7x7
# correlation matrix
mask = tf.scatter_nd([[m,n],[n,m]],
tf.constant([1.0, 1.0], self._scenario.dtype.real_dtype), [7,7])
mask = tf.reshape(mask, [1,1,1,7,7])
# Get the parameter value according to the link scenario
update = self._scenario.get_param(parameter_name)
update = tf.expand_dims(tf.expand_dims(update, axis=3), axis=4)
# Add update
mat = mat + update*mask
return mat
# Fill off-diagonal elements of the correlation matrices
# ASD vs DS
cross_lsp_corr_mat = _add_param(cross_lsp_corr_mat, 'corrASDvsDS', 0, 1)
# ASA vs DS
cross_lsp_corr_mat = _add_param(cross_lsp_corr_mat, 'corrASAvsDS', 0, 2)
# ASA vs SF
cross_lsp_corr_mat = _add_param(cross_lsp_corr_mat, 'corrASAvsSF', 3, 2)
# ASD vs SF
cross_lsp_corr_mat = _add_param(cross_lsp_corr_mat, 'corrASDvsSF', 3, 1)
# DS vs SF
cross_lsp_corr_mat = _add_param(cross_lsp_corr_mat, 'corrDSvsSF', 3, 0)
# ASD vs ASA
cross_lsp_corr_mat = _add_param(cross_lsp_corr_mat, 'corrASDvsASA', 1,2)
# ASD vs K
cross_lsp_corr_mat = _add_param(cross_lsp_corr_mat, 'corrASDvsK', 1, 4)
# ASA vs K
cross_lsp_corr_mat = _add_param(cross_lsp_corr_mat, 'corrASAvsK', 2, 4)
# DS vs K
cross_lsp_corr_mat = _add_param(cross_lsp_corr_mat, 'corrDSvsK', 0, 4)
# SF vs K
cross_lsp_corr_mat = _add_param(cross_lsp_corr_mat, 'corrSFvsK', 3, 4)
# ZSD vs SF
cross_lsp_corr_mat = _add_param(cross_lsp_corr_mat, 'corrZSDvsSF', 3, 6)
# ZSA vs SF
cross_lsp_corr_mat = _add_param(cross_lsp_corr_mat, 'corrZSAvsSF', 3, 5)
# ZSD vs K
cross_lsp_corr_mat = _add_param(cross_lsp_corr_mat, 'corrZSDvsK', 6, 4)
# ZSA vs K
cross_lsp_corr_mat = _add_param(cross_lsp_corr_mat, 'corrZSAvsK', 5, 4)
# ZSD vs DS
cross_lsp_corr_mat = _add_param(cross_lsp_corr_mat, 'corrZSDvsDS', 6, 0)
# ZSA vs DS
cross_lsp_corr_mat = _add_param(cross_lsp_corr_mat, 'corrZSAvsDS', 5, 0)
# ZSD vs ASD
cross_lsp_corr_mat = _add_param(cross_lsp_corr_mat, 'corrZSDvsASD', 6,1)
# ZSA vs ASD
cross_lsp_corr_mat = _add_param(cross_lsp_corr_mat, 'corrZSAvsASD', 5,1)
# ZSD vs ASA
cross_lsp_corr_mat = _add_param(cross_lsp_corr_mat, 'corrZSDvsASA', 6,2)
# ZSA vs ASA
cross_lsp_corr_mat = _add_param(cross_lsp_corr_mat, 'corrZSAvsASA', 5,2)
# ZSD vs ZSA
cross_lsp_corr_mat = _add_param(cross_lsp_corr_mat, 'corrZSDvsZSA', 5,6)
# Compute and store the square root of the cross-LSP correlation
# matrix
self._cross_lsp_correlation_matrix_sqrt = matrix_sqrt(
cross_lsp_corr_mat)
def _compute_lsp_spatial_correlation_sqrt(self):
"""
Compute the square root of the spatial correlation matrices of LSPs.
The LSPs are correlated accross users according to the distance between
the users. Each LSP is spatially correlated according to a different
spatial correlation matrix.
The links involving different BSs are not correlated.
UTs in different state (LoS, NLoS, O2I) are not assumed to be
correlated.
The correlation of the LSPs X of two UTs in the same state related to
the links of these UTs to a same BS is
.. math::
C(X_1,X_2) = exp(-d/D_X)
where :math:`d` is the distance between the UTs in the X-Y plane (2D
distance) and D_X the correlation distance of LSP X.
The resulting tensor if of shape
[batch size, number of BSs, 7, number of UTs, number of UTs)
7 being the number of LSPs.
Input
------
None
Output
-------
None
"""
# Tensors of bool indicating which pair of UTs to correlate.
# Pairs of UTs that are correlated are those that share the same state
# (indoor, LoS, or NLoS).
# Indoor
indoor = tf.tile(tf.expand_dims(self._scenario.indoor, axis=1),
[1, self._scenario.num_bs, 1])
# LoS
los_ut = self._scenario.los
los_pair_bool = tf.logical_and(tf.expand_dims(los_ut, axis=3),
tf.expand_dims(los_ut, axis=2))
# NLoS
nlos_ut = tf.logical_and(tf.logical_not(self._scenario.los),
tf.logical_not(indoor))
nlos_pair_bool = tf.logical_and(tf.expand_dims(nlos_ut, axis=3),
tf.expand_dims(nlos_ut, axis=2))
# O2I
o2i_pair_bool = tf.logical_and(tf.expand_dims(indoor, axis=3),
tf.expand_dims(indoor, axis=2))
# Stacking the correlation matrix
# One correlation matrix per LSP
filtering_matrices = []
distance_scaling_matrices = []
for parameter_name in ('corrDistDS', 'corrDistASD', 'corrDistASA',
'corrDistSF', 'corrDistK', 'corrDistZSA', 'corrDistZSD'):
# Matrix used for filtering and scaling the 2D distances
# For each pair of UTs, the entry is set to 0 if the UTs are in
# different states, -1/(correlation distance) otherwise.
# The correlation distance is different for each LSP.
filtering_matrix = tf.eye(self._scenario.num_ut,
self._scenario.num_ut, batch_shape=[self._scenario.batch_size,
self._scenario.num_bs], dtype=self._scenario.dtype.real_dtype)
distance_scaling_matrix = self._scenario.get_param(parameter_name)
distance_scaling_matrix = tf.tile(tf.expand_dims(
distance_scaling_matrix, axis=3),
[1, 1, 1, self._scenario.num_ut])
distance_scaling_matrix = -1./distance_scaling_matrix
# LoS
filtering_matrix = tf.where(los_pair_bool,
tf.constant(1.0, self._scenario.dtype.real_dtype),
filtering_matrix)
# NLoS
filtering_matrix = tf.where(nlos_pair_bool,
tf.constant(1.0, self._scenario.dtype.real_dtype),
filtering_matrix)
# indoor
filtering_matrix = tf.where(o2i_pair_bool,
tf.constant(1.0, self._scenario.dtype.real_dtype),
filtering_matrix)
# Stacking
filtering_matrices.append(filtering_matrix)
distance_scaling_matrices.append(distance_scaling_matrix)
filtering_matrices = tf.stack(filtering_matrices, axis=2)
distance_scaling_matrices = tf.stack(distance_scaling_matrices, axis=2)
ut_dist_2d = self._scenario.matrix_ut_distance_2d
# Adding a dimension for broadcasting with BS
ut_dist_2d = tf.expand_dims(tf.expand_dims(ut_dist_2d, axis=1), axis=2)
# Correlation matrix
spatial_lsp_correlation = (tf.math.exp(
ut_dist_2d*distance_scaling_matrices)*filtering_matrices)
# Compute and store the square root of the spatial correlation matrix
self._spatial_lsp_correlation_matrix_sqrt = matrix_sqrt(
spatial_lsp_correlation)
def _o2i_low_loss(self):
"""
Compute for each BS-UT link the pathloss due to the O2I penetration loss
in dB with the low-loss model.
See section 7.4.3.1 of 38.901 specification.
UTs located outdoor (LoS and NLoS) get O2I pathloss of 0dB.
Input
-----
None
Output
-------
Tensor with shape
[batch size, number of BSs, number of UTs]
containing the O2I penetration low-loss in dB for each BS-UT link
"""
fc = self._scenario.carrier_frequency/1e9 # Carrier frequency (GHz)
batch_size = self._scenario.batch_size
num_ut = self._scenario.num_ut
num_bs = self._scenario.num_bs
# Material penetration losses
# fc must be in GHz
l_glass = 2. + 0.2*fc
l_concrete = 5. + 4.*fc
# Path loss through external wall
pl_tw = 5.0 - 10.*log10(0.3*tf.math.pow(tf.constant(10.,
self._scenario.dtype.real_dtype), -l_glass/10.0) + 0.7*tf.math.pow(
tf.constant(10., self._scenario.dtype.real_dtype),
-l_concrete/10.0))
# Filtering-out the O2I pathloss for UTs located outdoor
indoor_mask = tf.where(self._scenario.indoor, tf.constant(1.0,
self._scenario.dtype.real_dtype), tf.zeros([batch_size, num_ut],
self._scenario.dtype.real_dtype))
indoor_mask = tf.expand_dims(indoor_mask, axis=1)
pl_tw = pl_tw*indoor_mask
# Pathloss due to indoor propagation
# The indoor 2D distance for outdoor UTs is 0
pl_in = 0.5*self._scenario.distance_2d_in
# Random path loss component
# Gaussian distributed with standard deviation 4.4 in dB
pl_rnd = tf.random.normal(shape=[batch_size, num_bs, num_ut],
mean=0.0, stddev=4.4, dtype=self._scenario.dtype.real_dtype)
pl_rnd = pl_rnd*indoor_mask
return pl_tw + pl_in + pl_rnd
def _o2i_high_loss(self):
"""
Compute for each BS-UT link the pathloss due to the O2I penetration loss
in dB with the high-loss model.
See section 7.4.3.1 of 38.901 specification.
UTs located outdoor (LoS and NLoS) get O2I pathloss of 0dB.
Input
-----
None
Output
-------
Tensor with shape
[batch size, number of BSs, number of UTs]
containing the O2I penetration low-loss in dB for each BS-UT link
"""
fc = self._scenario.carrier_frequency/1e9 # Carrier frequency (GHz)
batch_size = self._scenario.batch_size
num_ut = self._scenario.num_ut
num_bs = self._scenario.num_bs
# Material penetration losses
# fc must be in GHz
l_iirglass = 23. + 0.3*fc
l_concrete = 5. + 4.*fc
# Path loss through external wall
pl_tw = 5.0 - 10.*log10(0.7*tf.math.pow(tf.constant(10.,
self._scenario.dtype.real_dtype), -l_iirglass/10.0)
+ 0.3*tf.math.pow(tf.constant(10.,
self._scenario.dtype.real_dtype), -l_concrete/10.0))
# Filtering-out the O2I pathloss for outdoor UTs
indoor_mask = tf.where(self._scenario.indoor, 1.0,
tf.zeros([batch_size, num_ut], self._scenario.dtype.real_dtype))
indoor_mask = tf.expand_dims(indoor_mask, axis=1)
pl_tw = pl_tw*indoor_mask
# Pathloss due to indoor propagation
# The indoor 2D distance for outdoor UTs is 0
pl_in = 0.5*self._scenario.distance_2d_in
# Random path loss component
# Gaussian distributed with standard deviation 6.5 in dB for the
# high loss model
pl_rnd = tf.random.normal(shape=[batch_size, num_bs, num_ut],
mean=0.0, stddev=6.5,
dtype=self._scenario.dtype.real_dtype)
pl_rnd = pl_rnd*indoor_mask
return pl_tw + pl_in + pl_rnd
|
scripts/irods/test/test_client_hints.py
|
JustinKyleJames/irods
| 333 |
105478
|
from __future__ import print_function
import sys
import shutil
import os
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
import os
import datetime
import socket
from .. import test
from . import settings
from .. import lib
from . import resource_suite
from ..configuration import IrodsConfig
class Test_ClientHints(resource_suite.ResourceBase, unittest.TestCase):
def setUp(self):
super(Test_ClientHints, self).setUp()
def tearDown(self):
super(Test_ClientHints, self).tearDown()
def test_client_hints(self):
self.admin.assert_icommand('iclienthints', 'STDOUT_SINGLELINE', 'plugins')
|
presidio-analyzer/tests/test_us_driver_license_recognizer.py
|
vtols/presidio
| 1,408 |
105495
|
<filename>presidio-analyzer/tests/test_us_driver_license_recognizer.py
import pytest
from presidio_analyzer.predefined_recognizers import UsLicenseRecognizer
from tests import assert_result_within_score_range
@pytest.fixture(scope="module")
def recognizer():
return UsLicenseRecognizer()
@pytest.fixture(scope="module")
def entities():
return ["US_DRIVER_LICENSE"]
@pytest.mark.parametrize(
"text, expected_len, expected_positions, expected_score_ranges",
[
# fmt: off
# WA license tests
(
"AA1B2**9ABA7 A*1234AB*CD9",
2,
((0, 12), (13, 25),),
((0.3, 0.4), (0.3, 0.4),),
),
("3A1B2**9ABA7", 0, (), (),),
# Other states license weak tests
("H12234567", 1, ((0, 9),), ((0.3, 0.4),),),
("C12T345672", 0, (), (),),
# invalid license that should fail, but doesn't do to context
# ("my driver's license is C12T345672", 0, (), (),),
# Other states license very weak tests
(
"123456789 1234567890 12345679012 123456790123 1234567901234 1234",
5,
((0, 9), (10, 20), (21, 32), (33, 45), (46, 59),),
((0.0, 0.02), (0.0, 0.02), (0.0, 0.02), (0.0, 0.02), (0.0, 0.02),),
),
("ABCDEFG ABCDEFGH ABCDEFGHI", 0, (), (),),
("ABCD ABCDEFGHIJ", 0, (), (),),
# The following fails due to keyphrases not yet supported
# ("my driver license: ABCDEFG", 1, ((19, 25),), ((0.5, 0.91),),),
# fmt: on
],
)
def test_when_driver_licenes_in_text_then_all_us_driver_licenses_found(
text,
expected_len,
expected_positions,
expected_score_ranges,
recognizer,
entities,
max_score,
):
results = recognizer.analyze(text, entities)
assert len(results) == expected_len
for res, (st_pos, fn_pos), (st_score, fn_score) in zip(
results, expected_positions, expected_score_ranges
):
if fn_score == "max":
fn_score = max_score
assert_result_within_score_range(
res, entities[0], st_pos, fn_pos, st_score, fn_score
)
|
util/generate_loading_gif.py
|
Tarsnap/tarsnap-gui
| 270 |
105520
|
#!/usr/bin/env python3
""" Generate a "loading" or "waiting" animated gif. """
import math
import PIL.Image
import PIL.ImageDraw
SIZE = 16
TOTAL_DOTS = 8
VISUAL_DOTS = 4 # how many dots are visible in each frame.
DIAMETER = SIZE / 8.0
SECONDS = 1.25 # how long it takes to do a complete cycle.
OUTPUT = "loading.gif"
def draw_dot(draw, i, color):
""" Draw a dot around a circle with a color. """
# Positions around the big circle.
unit_x = 1 + math.sin(i/TOTAL_DOTS * 2*math.pi)
unit_y = 1 - math.cos(i/TOTAL_DOTS * 2*math.pi)
# Scale to the desired size and circle diameter.
x = round(((SIZE - DIAMETER)/2 - 1) * unit_x)
y = round(((SIZE - DIAMETER)/2 - 1) * unit_y)
# Center the pixels for the library. (Doesn't appear to have
# any effect at 16x16, but might possibly matter if we add
# some kind of filter.)
x += 0.5
y += 0.5
# These are drawn as rectangles in a 16x16 image, but if we
# increase the size then they're circles.
draw.ellipse((x, y, x + DIAMETER, y + DIAMETER), fill=color, width=1)
def draw_frame(framenum):
""" Draw a frame of the animation. """
# Create new image and drawing surface.
image = PIL.Image.new('LA', (SIZE, SIZE), (1, 255))
draw = PIL.ImageDraw.Draw(image)
# Draw the dots.
for i in range(VISUAL_DOTS):
pos = ((framenum - i) % TOTAL_DOTS)
# The Qt background is (239,239,239) so this fades from 0
# to 240 (but stops at 180).
gray = round(240/4*i)
draw_dot(draw, pos, (gray, 0))
return image
def main(filename):
""" Generate the animation and save it to the filename. """
# Generate frames.
images = []
for framenum in range(TOTAL_DOTS):
image = draw_frame(framenum)
images.append(image)
# Write gif.
images[0].save(filename, save_all=True, append_images=images[1:],
duration=SECONDS / TOTAL_DOTS * 1000,
loop=0,
transparency=1,
disposal=2)
if __name__ == "__main__":
main(OUTPUT)
|
ivy/doc/examples/udp_test_expect.py
|
b1f6c1c4/cfg-enum
| 113 |
105528
|
<reponame>b1f6c1c4/cfg-enum
import pexpect
import sys
def run(name,opts,res):
child = pexpect.spawn('./{}'.format(name))
child.logfile = sys.stdout
try:
child.expect('>')
child.sendline('foo.send(0,1,2)')
child.expect(r'< foo.recv\(1,2\)')
child.sendline('foo.send(1,0,3)')
child.expect(r'foo.recv\(0,3\)')
return True
except pexpect.EOF:
print child.before
return False
|
quickumls/__init__.py
|
equipe22/QuickUMLS
| 283 |
105550
|
<gh_stars>100-1000
from .core import QuickUMLS
from .client import get_quickumls_client
from .about import *
|
mqbench/custom_quantizer/tensorrt_quantizer.py
|
ModelTC/MQBench
| 179 |
105602
|
import operator
from typing import List
import torch
from torch.fx import GraphModule
import mqbench.nn.qat as qnnqat
from mqbench.utils.logger import logger
from mqbench.utils.registry import register_model_quantizer
from mqbench.prepare_by_platform import BackendType
from mqbench.custom_quantizer import ModelQuantizer
class TRTModelQuantizer(ModelQuantizer):
"""The different points of TRT quantizer are how to deal with add op
and the last layer.
"""
def __init__(self, extra_quantizer_dict, extra_fuse_dict):
super().__init__(extra_quantizer_dict, extra_fuse_dict)
@property
def _merge_add_type(self):
return (torch.nn.Conv2d, torch.nn.Linear)
def _find_act_quants(self, model: GraphModule) -> set:
nodes = list(model.graph.nodes)
modules = dict(model.named_modules())
node_need_to_quantize_output = []
for node in nodes:
if ((node.op == "call_module" and node.target in self.exclude_module_name) or
((node.op == 'call_function' or node.op == 'call_method') and
node.target in self.exclude_function_type) or
node.name in self.exclude_node_name) and node.name not in self.additional_node_name:
continue
if (node.op == "call_module" and isinstance(modules[node.target], self.module_type_to_quant_input)) or \
((node.op == 'call_function' or node.op == 'call_method') and
node.target in self.function_type_to_quant_input) or node.name in self.additional_node_name:
# Add will be merged with previous conv.
input_node_list = list(filter(lambda x: isinstance(x, torch.fx.node.Node),
self._flatten_args(node.args)))
if node.target is operator.add:
merge_node = self._find_add_merge_node(model, input_node_list, node)
if merge_node:
input_node_list.remove(merge_node)
node_need_to_quantize_output.extend(input_node_list)
else:
for _node in input_node_list:
if self._is_implicit_merge(modules, (node, _node)):
continue
if isinstance(_node, torch.fx.node.Node):
node_need_to_quantize_output.append(_node)
return node_need_to_quantize_output
def _find_add_merge_node(self, model, input_node_list, node):
"""Find the first input node which has only one successor from the last.
This kind of node can be merge with add.
"""
input_node_list.reverse()
modules = dict(model.named_modules())
for input_node in input_node_list:
if input_node.op == 'call_module' and type(modules[input_node.target]) in self._merge_add_type:
succ = 0
for _node in list(model.graph.nodes):
_node_input_list = self._flatten_args(_node.args)
if input_node in _node_input_list:
succ += 1
if succ == 1:
return input_node
return None
@register_model_quantizer(BackendType.Tensorrt_NLP)
class TensorrtNLPQuantizer(ModelQuantizer):
"""
NLP model quantizer for Tensorrt settings.
We should quantize Linear / Embedding weights.
Linear / Matmul / Add layer inputs(activations).
We notice embedding add(word + pos + token_type) is not quantized,
so we find and skiped.
Add in MSA(add mask) should not be quantized either, we skipped it
by implicit_merge.
"""
@property
def implicit_merge_patterns(self) -> list:
# Layers which do not need quantize among them.
# In reversed order!
return [
(operator.add, operator.mul),
# Add in MSA block should not be quantized.
(operator.add, operator.truediv)
]
@property
def function_type_to_quant_input(self) -> list:
return [
operator.add,
# Matmul in MSA
torch.matmul
] + self.additional_function_type
@property
def module_type_to_quant_input(self) -> tuple:
return (
# Linear
torch.nn.qat.modules.linear.Linear,
) + self.additional_module_type
def _find_act_quants(self, model: GraphModule) -> List:
nodes = list(model.graph.nodes)
modules = dict(model.named_modules())
node_need_to_quantize_output = []
for node in nodes:
if ((node.op == "call_module" and node.target in self.exclude_module_name) or
((node.op == "call_function" or node.op == "all_method") and
node.target in self.exclude_function_type) or
node.name in self.exclude_node_name) and node.name not in self.additional_node_name:
logger.info("Exclude skip: {}".format(node.name))
continue
if (node.op == "call_module" and isinstance(modules[node.target], self.module_type_to_quant_input)) or \
((node.op == "call_function" or node.op == "call_method") and
node.target in self.function_type_to_quant_input) or node.name in self.additional_node_name:
input_node_list = self._flatten_args(node.args)
# Means this is not Tensor + Tensor.
if not all([isinstance(_node, torch.fx.node.Node) for _node in input_node_list]):
continue
# Embedding Add and MSA mask Add should be skipped.
if node.op == "call_function" and node.target == operator.add and \
self._is_skiped_add(node, modules, input_node_list):
continue
if node.op == "call_function" and node.target == operator.add:
import pdb
pdb.set_trace()
for _node in input_node_list:
if self._is_implicit_merge(modules, (node, _node)):
logger.info("Implicit merge: {} + {}".format(_node.name, node.name))
continue
node_need_to_quantize_output.append(_node)
return node_need_to_quantize_output
def _is_skiped_add(self, node, modules, input_node_list):
for _node in input_node_list:
if _node.op == "call_module" and isinstance(modules[_node.target], (qnnqat.Embedding, torch.nn.Embedding)):
logger.info("Skip embedding add: {}".format(node.name))
return True
|
lib/plugins/1024.py
|
ikstream/Zeus-Scanner
| 841 |
105605
|
<reponame>ikstream/Zeus-Scanner<filename>lib/plugins/1024.py
import re
__product__ = "1024-CMS"
__description__ = (
"1024 is one of a few CMS's leading the way with "
"the implementation of the AJAX technology into "
"all its areas. This includes dynamic administration "
"and user interaction. 1024 offers you to ability to "
"set up your own community forums, download area, news "
"posts, member management and more."
)
def search(html, **kwargs):
html = str(html)
plugin_detection_schema = (
re.compile(r".1024cms.", re.I),
re.compile(r"<.+>powered.by.1024.cms<.+.>", re.I),
re.compile(r"1024.cms", re.I)
)
for plugin in plugin_detection_schema:
if plugin.search(html) is not None:
return True
|
moto/stepfunctions/exceptions.py
|
gtourkas/moto
| 5,460 |
105647
|
<filename>moto/stepfunctions/exceptions.py<gh_stars>1000+
from moto.core.exceptions import AWSError
class ExecutionAlreadyExists(AWSError):
TYPE = "ExecutionAlreadyExists"
STATUS = 400
class ExecutionDoesNotExist(AWSError):
TYPE = "ExecutionDoesNotExist"
STATUS = 400
class InvalidArn(AWSError):
TYPE = "InvalidArn"
STATUS = 400
class InvalidName(AWSError):
TYPE = "InvalidName"
STATUS = 400
class InvalidExecutionInput(AWSError):
TYPE = "InvalidExecutionInput"
STATUS = 400
class StateMachineDoesNotExist(AWSError):
TYPE = "StateMachineDoesNotExist"
STATUS = 400
class InvalidToken(AWSError):
TYPE = "InvalidToken"
STATUS = 400
def __init__(self, message="Invalid token"):
super(InvalidToken, self).__init__("Invalid Token: {}".format(message))
class ResourceNotFound(AWSError):
TYPE = "ResourceNotFound"
STATUS = 400
def __init__(self, arn):
super(ResourceNotFound, self).__init__("Resource not found: '{}'".format(arn))
|
neurolib/models/bold/__init__.py
|
leonidas228/neurolib
| 258 |
105710
|
from .model import BOLDModel
from .timeIntegration import simulateBOLD
|
pyqtgraph/parametertree/parameterTypes/colormap.py
|
hishizuka/pyqtgraph
| 2,762 |
105765
|
<reponame>hishizuka/pyqtgraph
from .basetypes import WidgetParameterItem, SimpleParameter
from ...Qt import QtCore
from ...colormap import ColorMap
from ...widgets.GradientWidget import GradientWidget
class ColorMapParameterItem(WidgetParameterItem):
"""Registered parameter type which displays a :class:`GradientWidget <pyqtgraph.GradientWidget>`"""
def makeWidget(self):
w = GradientWidget(orientation='bottom')
w.sizeHint = lambda: QtCore.QSize(300, 35)
w.sigChanged = w.sigGradientChangeFinished
w.sigChanging = w.sigGradientChanged
w.value = w.colorMap
w.setValue = w.setColorMap
self.hideWidget = False
self.asSubItem = True
return w
class ColorMapParameter(SimpleParameter):
itemClass = ColorMapParameterItem
def _interpretValue(self, v):
if v is not None and not isinstance(v, ColorMap):
raise TypeError("Cannot set colormap parameter from object %r" % v)
return v
|
submission.py
|
gengshan-y/expansion
| 132 |
105775
|
from __future__ import print_function
import sys
import cv2
import pdb
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
import time
from utils.io import mkdir_p
from utils.util_flow import write_flow, save_pfm
from utils.flowlib import point_vec, warp_flow
cudnn.benchmark = False
parser = argparse.ArgumentParser(description='VCN+expansion')
parser.add_argument('--dataset', default='2015',
help='KITTI version')
parser.add_argument('--datapath', default='/ssd/kitti_scene/training/',
help='dataset path')
parser.add_argument('--loadmodel', default=None,
help='model path')
parser.add_argument('--outdir', default='output',
help='output dir')
parser.add_argument('--testres', type=float, default=1,
help='resolution')
parser.add_argument('--maxdisp', type=int ,default=256,
help='maxium disparity. Only affect the coarsest cost volume size')
parser.add_argument('--fac', type=float ,default=1,
help='controls the shape of search grid. Only affect the coarse cost volume size')
args = parser.parse_args()
# dataloader
if args.dataset == '2015':
from dataloader import kitti15list as DA
maxw,maxh = [int(args.testres*1280), int(args.testres*384)]
test_left_img, test_right_img ,_= DA.dataloader(args.datapath)
elif args.dataset == '2015val':
from dataloader import kitti15list_val as DA
maxw,maxh = [int(args.testres*1280), int(args.testres*384)]
test_left_img, test_right_img ,_= DA.dataloader(args.datapath)
elif args.dataset == '2015vallidar':
from dataloader import kitti15list_val_lidar as DA
maxw,maxh = [int(args.testres*1280), int(args.testres*384)]
test_left_img, test_right_img ,_= DA.dataloader(args.datapath)
elif args.dataset == '2015test':
from dataloader import kitti15list as DA
maxw,maxh = [int(args.testres*1280), int(args.testres*384)]
test_left_img, test_right_img ,_= DA.dataloader(args.datapath)
elif args.dataset == 'seq':
from dataloader import seqlist as DA
maxw,maxh = [int(args.testres*1280), int(args.testres*384)]
test_left_img, test_right_img ,_= DA.dataloader(args.datapath)
elif args.dataset == 'sinteltest':
from dataloader import sintellist as DA
maxw,maxh = [int(args.testres*1024), int(args.testres*448)]
test_left_img, test_right_img ,_= DA.dataloader(args.datapath)
elif args.dataset == 'sintel':
from dataloader import sintellist_val as DA
maxw,maxh = [int(args.testres*1024), int(args.testres*448)]
test_left_img, test_right_img ,_= DA.dataloader(args.datapath)
max_h = int(maxh // 64 * 64)
max_w = int(maxw // 64 * 64)
if max_h < maxh: max_h += 64
if max_w < maxw: max_w += 64
maxh = max_h
maxw = max_w
mean_L = [[0.33,0.33,0.33]]
mean_R = [[0.33,0.33,0.33]]
# construct model, VCN-expansion
from models.VCN_exp import VCN
model = VCN([1, maxw, maxh], md=[int(4*(args.maxdisp/256)),4,4,4,4], fac=args.fac,
exp_unc=('robust' in args.loadmodel)) # expansion uncertainty only in the new model
model = nn.DataParallel(model, device_ids=[0])
model.cuda()
if args.loadmodel is not None:
pretrained_dict = torch.load(args.loadmodel)
mean_L=pretrained_dict['mean_L']
mean_R=pretrained_dict['mean_R']
pretrained_dict['state_dict'] = {k:v for k,v in pretrained_dict['state_dict'].items()}
model.load_state_dict(pretrained_dict['state_dict'],strict=False)
else:
print('dry run')
print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
mkdir_p('%s/%s/'% (args.outdir, args.dataset))
def main():
model.eval()
ttime_all = []
for inx in range(len(test_left_img)):
print(test_left_img[inx])
imgL_o = cv2.imread(test_left_img[inx])[:,:,::-1]
imgR_o = cv2.imread(test_right_img[inx])[:,:,::-1]
# for gray input images
if len(imgL_o.shape) == 2:
imgL_o = np.tile(imgL_o[:,:,np.newaxis],(1,1,3))
imgR_o = np.tile(imgR_o[:,:,np.newaxis],(1,1,3))
# resize
maxh = imgL_o.shape[0]*args.testres
maxw = imgL_o.shape[1]*args.testres
max_h = int(maxh // 64 * 64)
max_w = int(maxw // 64 * 64)
if max_h < maxh: max_h += 64
if max_w < maxw: max_w += 64
input_size = imgL_o.shape
imgL = cv2.resize(imgL_o,(max_w, max_h))
imgR = cv2.resize(imgR_o,(max_w, max_h))
# flip channel, subtract mean
imgL = imgL[:,:,::-1].copy() / 255. - np.asarray(mean_L).mean(0)[np.newaxis,np.newaxis,:]
imgR = imgR[:,:,::-1].copy() / 255. - np.asarray(mean_R).mean(0)[np.newaxis,np.newaxis,:]
imgL = np.transpose(imgL, [2,0,1])[np.newaxis]
imgR = np.transpose(imgR, [2,0,1])[np.newaxis]
# modify module according to inputs
from models.VCN_exp import WarpModule, flow_reg
for i in range(len(model.module.reg_modules)):
model.module.reg_modules[i] = flow_reg([1,max_w//(2**(6-i)), max_h//(2**(6-i))],
ent=getattr(model.module, 'flow_reg%d'%2**(6-i)).ent,\
maxdisp=getattr(model.module, 'flow_reg%d'%2**(6-i)).md,\
fac=getattr(model.module, 'flow_reg%d'%2**(6-i)).fac).cuda()
for i in range(len(model.module.warp_modules)):
model.module.warp_modules[i] = WarpModule([1,max_w//(2**(6-i)), max_h//(2**(6-i))]).cuda()
# forward
imgL = Variable(torch.FloatTensor(imgL).cuda())
imgR = Variable(torch.FloatTensor(imgR).cuda())
with torch.no_grad():
imgLR = torch.cat([imgL,imgR],0)
model.eval()
torch.cuda.synchronize()
start_time = time.time()
rts = model(imgLR)
torch.cuda.synchronize()
ttime = (time.time() - start_time); print('time = %.2f' % (ttime*1000) )
ttime_all.append(ttime)
flow, occ, logmid, logexp = rts
# upsampling
occ = cv2.resize(occ.data.cpu().numpy(), (input_size[1],input_size[0]),interpolation=cv2.INTER_LINEAR)
logexp = cv2.resize(logexp.cpu().numpy(), (input_size[1],input_size[0]),interpolation=cv2.INTER_LINEAR)
logmid = cv2.resize(logmid.cpu().numpy(), (input_size[1],input_size[0]),interpolation=cv2.INTER_LINEAR)
flow = torch.squeeze(flow).data.cpu().numpy()
flow = np.concatenate( [cv2.resize(flow[0],(input_size[1],input_size[0]))[:,:,np.newaxis],
cv2.resize(flow[1],(input_size[1],input_size[0]))[:,:,np.newaxis]],-1)
flow[:,:,0] *= imgL_o.shape[1] / max_w
flow[:,:,1] *= imgL_o.shape[0] / max_h
flow = np.concatenate( (flow, np.ones([flow.shape[0],flow.shape[1],1])),-1)
# save predictions
idxname = test_left_img[inx].split('/')[-1]
with open('%s/%s/flo-%s.pfm'% (args.outdir, args.dataset,idxname.split('.')[0]),'w') as f:
save_pfm(f,flow[::-1].astype(np.float32))
flowvis = point_vec(imgL_o, flow)
cv2.imwrite('%s/%s/visflo-%s.jpg'% (args.outdir, args.dataset,idxname),flowvis)
imwarped = warp_flow(imgR_o, flow[:,:,:2])
cv2.imwrite('%s/%s/warp-%s.jpg'% (args.outdir, args.dataset,idxname),imwarped[:,:,::-1])
with open('%s/%s/occ-%s.pfm'% (args.outdir, args.dataset,idxname.split('.')[0]),'w') as f:
save_pfm(f,occ[::-1].astype(np.float32))
with open('%s/%s/exp-%s.pfm'% (args.outdir, args.dataset,idxname.split('.')[0]),'w') as f:
save_pfm(f,logexp[::-1].astype(np.float32))
with open('%s/%s/mid-%s.pfm'% (args.outdir, args.dataset,idxname.split('.')[0]),'w') as f:
save_pfm(f,logmid[::-1].astype(np.float32))
torch.cuda.empty_cache()
print(np.mean(ttime_all))
if __name__ == '__main__':
main()
|
Filters/Extraction/Testing/Python/ExtractTensors.py
|
txwhhny/vtk
| 1,755 |
105790
|
#!/usr/bin/env python
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create tensor ellipsoids
# Create the RenderWindow, Renderer and interactive renderer
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ptLoad = vtk.vtkPointLoad()
ptLoad.SetLoadValue(100.0)
ptLoad.SetSampleDimensions(30,30,30)
ptLoad.ComputeEffectiveStressOn()
ptLoad.SetModelBounds(-10,10,-10,10,-10,10)
extractTensor = vtk.vtkExtractTensorComponents()
extractTensor.SetInputConnection(ptLoad.GetOutputPort())
extractTensor.ScalarIsEffectiveStress()
extractTensor.ScalarIsComponent()
extractTensor.ExtractScalarsOn()
extractTensor.ExtractVectorsOn()
extractTensor.ExtractNormalsOff()
extractTensor.ExtractTCoordsOn()
contour = vtk.vtkContourFilter()
contour.SetInputConnection(extractTensor.GetOutputPort())
contour.SetValue(0,0)
probe = vtk.vtkProbeFilter()
probe.SetInputConnection(contour.GetOutputPort())
probe.SetSourceConnection(ptLoad.GetOutputPort())
su = vtk.vtkLoopSubdivisionFilter()
su.SetInputConnection(probe.GetOutputPort())
su.SetNumberOfSubdivisions(1)
s1Mapper = vtk.vtkPolyDataMapper()
s1Mapper.SetInputConnection(probe.GetOutputPort())
# s1Mapper SetInputConnection [su GetOutputPort]
s1Actor = vtk.vtkActor()
s1Actor.SetMapper(s1Mapper)
#
# plane for context
#
g = vtk.vtkImageDataGeometryFilter()
g.SetInputConnection(ptLoad.GetOutputPort())
g.SetExtent(0,100,0,100,0,0)
g.Update()
#for scalar range
gm = vtk.vtkPolyDataMapper()
gm.SetInputConnection(g.GetOutputPort())
gm.SetScalarRange(g.GetOutput().GetScalarRange())
ga = vtk.vtkActor()
ga.SetMapper(gm)
s1Mapper.SetScalarRange(g.GetOutput().GetScalarRange())
#
# Create outline around data
#
outline = vtk.vtkOutlineFilter()
outline.SetInputConnection(ptLoad.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(0,0,0)
#
# Create cone indicating application of load
#
coneSrc = vtk.vtkConeSource()
coneSrc.SetRadius(.5)
coneSrc.SetHeight(2)
coneMap = vtk.vtkPolyDataMapper()
coneMap.SetInputConnection(coneSrc.GetOutputPort())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMap)
coneActor.SetPosition(0,0,11)
coneActor.RotateY(90)
coneActor.GetProperty().SetColor(1,0,0)
camera = vtk.vtkCamera()
camera.SetFocalPoint(0.113766,-1.13665,-1.01919)
camera.SetPosition(-29.4886,-63.1488,26.5807)
camera.SetViewAngle(24.4617)
camera.SetViewUp(0.17138,0.331163,0.927879)
camera.SetClippingRange(1,100)
ren1.AddActor(s1Actor)
ren1.AddActor(outlineActor)
ren1.AddActor(coneActor)
ren1.AddActor(ga)
ren1.SetBackground(1.0,1.0,1.0)
ren1.SetActiveCamera(camera)
renWin.SetSize(300,300)
renWin.Render()
# prevent the tk window from showing up then start the event loop
# --- end of script --
|
test/test_slimta_logging_socket.py
|
nanojob/python-slimta
| 141 |
105793
|
<gh_stars>100-1000
import unittest
import errno
import logging
import socket
from testfixtures import log_capture
import slimta.logging.socket
from slimta.logging import getSocketLogger
class FakeSocket(object):
def __init__(self, fd, peer=None):
self.fd = fd
self.peer = peer
def fileno(self):
return self.fd
def getpeername(self):
return self.peer
class FakeContext(object):
def session_stats(self):
return {'hits': 13}
class TestSocketLogger(unittest.TestCase):
def setUp(self):
self.log = getSocketLogger('test')
@log_capture()
def test_send(self, l):
sock = FakeSocket(136)
self.log.send(sock, 'test send')
l.check(('test', 'DEBUG', 'fd:136:send data=\'test send\''))
@log_capture()
def test_recv(self, l):
sock = FakeSocket(29193)
self.log.recv(sock, 'test recv')
l.check(('test', 'DEBUG', 'fd:29193:recv data=\'test recv\''))
@log_capture()
def test_accept(self, l):
server = FakeSocket(926)
client = FakeSocket(927, 'testpeer')
self.log.accept(server, client)
self.log.accept(server, client, 'testpeer2')
l.check(('test', 'DEBUG', 'fd:926:accept clientfd=927 peer=\'testpeer\''),
('test', 'DEBUG', 'fd:926:accept clientfd=927 peer=\'testpeer2\''))
@log_capture()
def test_connect(self, l):
sock = FakeSocket(539, 'testpeer')
self.log.connect(sock)
self.log.connect(sock, 'testpeer2')
l.check(('test', 'DEBUG', 'fd:539:connect peer=\'testpeer\''),
('test', 'DEBUG', 'fd:539:connect peer=\'testpeer2\''))
@log_capture()
def test_encrypt(self, l):
sock = FakeSocket(445)
context = FakeContext()
self.log.encrypt(sock, context)
l.check(('test', 'DEBUG', 'fd:445:encrypt hits=13'))
@log_capture()
def test_shutdown(self, l):
sock = FakeSocket(823)
self.log.shutdown(sock, socket.SHUT_RD)
self.log.shutdown(sock, socket.SHUT_WR)
self.log.shutdown(sock, socket.SHUT_RDWR)
l.check(('test', 'DEBUG', 'fd:823:shutdown how=\'read\''),
('test', 'DEBUG', 'fd:823:shutdown how=\'write\''),
('test', 'DEBUG', 'fd:823:shutdown how=\'both\''))
@log_capture()
def test_close(self, l):
sock = FakeSocket(771)
self.log.close(sock)
l.check(('test', 'DEBUG', 'fd:771:close'))
@log_capture()
def test_error(self, l):
sock = FakeSocket(680)
exc = OSError(errno.EPIPE, 'Broken pipe')
self.log.error(sock, exc, 'testaddress')
slimta.logging.socket.socket_error_log_level = logging.WARNING
self.log.error(sock, exc)
l.check(('test', 'ERROR', 'fd:680:error address=\'testaddress\' args=(32, \'Broken pipe\') message=\'[Errno 32] Broken pipe\''),
('test', 'WARNING', 'fd:680:error args=(32, \'Broken pipe\') message=\'[Errno 32] Broken pipe\''))
# vim:et:fdm=marker:sts=4:sw=4:ts=4
|
comicolorization_sr/colorization_task/base.py
|
DwangoMediaVillage/Comicolorization
| 122 |
105831
|
<reponame>DwangoMediaVillage/Comicolorization
from abc import ABCMeta, abstractmethod
import typing
import six
from comicolorization_sr.config import Config
from comicolorization_sr.data_process import BaseDataProcess
@six.add_metaclass(ABCMeta)
class BaseColorizationTask(object):
def __init__(self, config, load_model=True):
# type: (Config, any) -> None
self.config = config
self.load_model = load_model
@abstractmethod
def get_input_process(self):
# type: (any) -> BaseDataProcess
pass
@abstractmethod
def get_concat_process(self):
# type: (any) -> BaseDataProcess
pass
@abstractmethod
def get_colorizer(self):
# type: (any) -> typing.Callable[[typing.Any, bool], typing.Any]
pass
|
examples/sas_logical_interconnect_groups.py
|
doziya/hpeOneView
| 107 |
105850
|
# -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2017) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from pprint import pprint
from hpOneView.oneview_client import OneViewClient
from config_loader import try_load_from_file
# This resource is only available on HPE Synergy
config = {
"ip": "<oneview_ip>",
"credentials": {
"userName": "<username>",
"password": "<password>"
}
}
# Try load config from a file (if there is a config file)
config = try_load_from_file(config)
oneview_client = OneViewClient(config)
# The Interconnect Type URI which is permitted to form SAS interconnect map must be defined to run this example
permittedInterconnectTypeUri = '/rest/sas-interconnect-types/Synergy12GbSASConnectionModule'
# Create a SAS Logical Interconnect Group
data = {
"name": "Test SAS Logical Interconnect Group",
"state": "Active",
"interconnectMapTemplate": {
"interconnectMapEntryTemplates": [{
"logicalLocation": {
"locationEntries": [
{
"type": "Bay",
"relativeValue": 1
}, {
"type": "Enclosure",
"relativeValue": 1
}
]
},
"enclosureIndex": 1,
"permittedInterconnectTypeUri": permittedInterconnectTypeUri
}, {
"logicalLocation": {
"locationEntries": [
{
"type": "Bay",
"relativeValue": 4
}, {
"type": "Enclosure",
"relativeValue": 1
}
]
},
"enclosureIndex": 1,
"permittedInterconnectTypeUri": permittedInterconnectTypeUri
}]
},
"enclosureType": "SY12000",
"enclosureIndexes": [1],
"interconnectBaySet": 1
}
sas_lig_created = oneview_client.sas_logical_interconnect_groups.create(data)
print("\nSAS Logical Interconnect Group '{name}' created successfully.\n uri = '{uri}'".format(**sas_lig_created))
# Get all SAS Logical Interconnect Groups
print("\nGet all SAS Logical Interconnect Groups")
sas_ligs = oneview_client.sas_logical_interconnect_groups.get_all()
for sas_lig in sas_ligs:
print("\n '{name}' at uri: {uri}".format(**sas_lig))
# Get a SAS Logical Interconnect Group by URI
print("\nGet a SAS Logical Interconnect Group by URI")
sas_lig_found_by_uri = oneview_client.sas_logical_interconnect_groups.get(sas_lig_created['uri'])
pprint(sas_lig_found_by_uri)
# Get a SAS Logical Interconnect Group by name
print("\nGet a SAS Logical Interconnect Group by name")
sas_lig_found_by_name = oneview_client.sas_logical_interconnect_groups.get_by('name', data['name'])
print("\n '{name}' at uri: {uri}".format(**sas_lig_found_by_name[0]))
# Get SAS Interconnect Group by scope_uris
if oneview_client.api_version >= 600:
sas_lig_by_scope_uris = oneview_client.sas_logical_interconnect_groups.get_all(
scope_uris="\"'/rest/scopes/3bb0c754-fd38-45af-be8a-4d4419de06e9'\"")
if len(sas_lig_by_scope_uris) > 0:
print("found %d SAS Interconnect Groups" % (len(sas_lig_by_scope_uris)))
i = 0
while i < len(sas_lig_by_scope_uris):
print("Found SAS Interconnect Group by scope_uris: '%s'.\n uri = '%s'" % (sas_lig_by_scope_uris[i]['name'], sas_lig_by_scope_uris[i]['uri']))
i += 1
pprint(sas_lig_by_scope_uris)
else:
print("No SAS Interconnect Group found.")
# Update the SAS Logical Interconnect Group
print("\nUpdate the SAS Logical Interconnect Group")
resource_to_update = sas_lig_created.copy()
resource_to_update['name'] = 'Test SAS Logical Interconnect Group - Renamed'
sas_lig_updated = oneview_client.sas_logical_interconnect_groups.update(resource_to_update)
pprint(sas_lig_updated)
# Delete the SAS Logical Interconnect Group
oneview_client.sas_logical_interconnect_groups.delete(sas_lig_updated)
print("\nSAS Logical Interconnect Group deleted successfully")
|
src/build_hops_dist.py
|
talebia/compute.rhino3d
| 182 |
105863
|
# Build yak packages for publishing
import os
import shutil
import sys
src_dir = os.path.dirname(os.path.realpath(__file__))
dist_dir = os.path.join(src_dir, 'dist')
# clear output dir
if os.path.exists(dist_dir):
shutil.rmtree(dist_dir)
# build hops (inc. self-contained rhino.compute.exe)
os.chdir(src_dir)
build_cmd = 'dotnet publish .\\hops.sln'
build_cmd += ' -c Release'
build_cmd += ' -p:PublishTrimmed=true'
build_cmd += ' --self-contained true'
build_cmd += ' -r win-x64'
rv = os.system(build_cmd)
if (rv != 0): sys.exit(rv)
# build yak package
os.chdir(dist_dir)
os.system('"C:\\Program Files\\Rhino 7\\System\\Yak.exe" build')
# make V8 version as well
for file in os.listdir('.'):
if file.endswith('.yak'):
v8name = file.replace('-rh7', '-rh8')
shutil.copy(file, v8name)
|
tensornets/capsulenets.py
|
mehrdad-shokri/tensornets
| 1,057 |
105864
|
"""Collection of CapsuleNet variants
The reference paper:
- Dynamic Routing Between Capsules
- <NAME>, <NAME>, <NAME>
- https://arxiv.org/abs/1710.09829
The reference implementations:
1. TensorFlow CapsNet
- https://github.com/naturomics/CapsNet-Tensorflow
2. Keras CapsNet
- https://github.com/XifengGuo/CapsNet-Keras
"""
from __future__ import absolute_import
import numpy as np
import tensorflow as tf
from .layers import batch_norm
from .layers import conv2d
from .layers import convrelu as conv
from .ops import *
from .utils import ops_to_outputs
from .utils import set_args
from .utils import var_scope
def __args__(is_training):
return [([batch_norm], {'scale': True, 'is_training': is_training,
'epsilon': 1e-5, 'scope': 'bn'}),
([conv2d], {'padding': 'VALID', 'activation_fn': None,
'biases_initializer': None, 'scope': 'conv'})]
@ops_to_outputs
def squash(x, epsilon=1e-9, name=None):
norm = tf.reduce_sum(tf.square(x), axis=-1, keep_dims=True)
scale = norm / (1. + norm) / tf.sqrt(norm + epsilon)
return tf.multiply(x, scale, name=name)
@var_scope('primary')
def primary(x, filters, length, kernel_size, stride, scope=None):
x = conv(x, filters * length, kernel_size, stride=stride, scope='conv')
pixels = np.prod(x.shape[1:-1].as_list())
x = reshape(x, (-1, pixels * filters, length), name='out')
return x
@var_scope('digit')
def digit(x, filters, length, iters=3, scope=None):
filters0 = int(x.shape[1]) if tf_later_than('2') else x.shape[1].value
length0 = int(x.shape[2]) if tf_later_than('2') else x.shape[2].value
# fully-connected weights between capsules: [1152, 8, 10 * 16]
w = tf.get_variable('weights', shape=(filters0, length0, filters * length),
dtype=tf.float32)
# coupling logits: [1152, 10]
b = tf.zeros((filters0, filters))
# prediction vectors: [None, 1152, 10, 16]
uhat = tf.scan(lambda a, b: tf.matmul(b, w), tf.expand_dims(x, 2),
initializer=tf.zeros([filters0, 1, filters * length]))
uhat = reshape(uhat, (-1, filters0, filters, length), name='predvec')
for r in range(iters):
with tf.variable_scope("iter%d" % r):
# coupling coefficients: [1152, 10]
c = softmax(b, name='softmax')
# activity vector: [None, 10, 16]
v = squash(tf.reduce_sum(uhat * tf.expand_dims(c, -1), axis=1),
name='out')
# agreement: [None, 1152, 10]
a = reduce_sum(tf.multiply(uhat, tf.expand_dims(v, 1)), axis=-1,
name='agreement')
# updates coupling logits
b = b + reduce_sum(a, axis=0, name='delta')
return v
@var_scope('capsulenet')
@set_args(__args__)
def capsulenet_mnist(x, is_training=False, classes=10, scope=None, reuse=None):
x = conv(x, 256, 9, stride=1, scope='conv1')
x = primary(x, 32, 8, 9, stride=2, scope='primary')
x = digit(x, 10, 16, scope='digit')
return x
# Simple alias.
CapsuleNet = capsulenet_mnist
|
pennylane/numpy/__init__.py
|
therooler/pennylane
| 539 |
105865
|
<gh_stars>100-1000
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Overview
--------
The PennyLane NumPy subpackage provides a differentiable wrapper around NumPy, that enables
backpropagation through standard NumPy code.
This version of NumPy **must** be used when using PennyLane with the :doc:`Autograd interface
</introduction/interfaces/numpy>`:
>>> from pennylane import numpy as np
.. note::
If using other interfaces, such as :doc:`TensorFlow </introduction/interfaces/tf>` :doc:`PyTorch
</introduction/interfaces/torch>`, or :doc:`JAX </introduction/interfaces/jax>`, then the
PennyLane-provided NumPy should not be used; instead, simply use the standard NumPy import.
This package is a wrapper around ``autograd.numpy``; for details on all available functions,
please refer to the `Autograd
docs <https://github.com/HIPS/autograd/blob/master/docs/tutorial.md>`__.
PennyLane additionally extends Autograd with the following classes,
errors, and functions:
.. autosummary::
:toctree: api
:nosignatures:
:template: autosummary/class_no_inherited.rst
~wrap_arrays
~extract_tensors
~tensor_wrapper
~tensor
~NonDifferentiableError
Caveats
-------
This package is a wrapper around ``autograd.numpy``, and therefore comes with several caveats
inherited from Autograd:
**Do not use:**
- Assignment to arrays, such as ``A[0, 0] = x``.
..
- Implicit casting of lists to arrays, for example ``A = np.sum([x, y])``.
Make sure to explicitly cast to a NumPy array first, i.e.,
``A = np.sum(np.array([x, y]))`` instead.
..
- ``A.dot(B)`` notation. Use ``np.dot(A, B)`` or ``A @ B`` instead.
..
- In-place operations such as ``a += b``. Use ``a = a + b`` instead.
..
- Some ``isinstance`` checks, like ``isinstance(x, np.ndarray)`` or ``isinstance(x, tuple)``,
without first doing ``from autograd.builtins import isinstance, tuple``.
For more details, please consult the `Autograd
docs <https://github.com/HIPS/autograd/blob/master/docs/tutorial.md>`__.
"""
# pylint: disable=wrong-import-position,wildcard-import,undefined-variable
from autograd import numpy as _np
from autograd.numpy import *
from .wrapper import wrap_arrays, extract_tensors, tensor_wrapper
wrap_arrays(_np.__dict__, globals())
# Delete the unwrapped fft, linalg, random modules
# so that we can re-import our wrapped versions.
del fft
del linalg
del random
from . import fft
from . import linalg
from . import random
from .tensor import tensor, NonDifferentiableError, asarray as _asarray
asarray = tensor_wrapper(_asarray)
|
InvenTree/company/migrations/0006_supplierpricebreak_currency.py
|
ArakniD/InvenTree
| 656 |
105866
|
<gh_stars>100-1000
# Generated by Django 2.2.4 on 2019-09-02 23:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('common', '0003_auto_20190902_2310'),
('company', '0005_auto_20190525_2356'),
]
operations = [
migrations.AddField(
model_name='supplierpricebreak',
name='currency',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='common.Currency'),
),
]
|
paddlex/ppcls/utils/metrics.py
|
xiaolao/PaddleX
| 3,655 |
105873
|
<reponame>xiaolao/PaddleX
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn.metrics import hamming_loss
from sklearn.metrics import accuracy_score as accuracy_metric
from sklearn.metrics import multilabel_confusion_matrix
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import average_precision_score
from sklearn.preprocessing import binarize
import numpy as np
__all__ = [
"multi_hot_encode", "hamming_distance", "accuracy_score",
"precision_recall_fscore", "mean_average_precision"
]
def multi_hot_encode(logits, threshold=0.5):
"""
Encode logits to multi-hot by elementwise for multilabel
"""
return binarize(logits, threshold)
def hamming_distance(output, target):
"""
Soft metric based label for multilabel classification
Returns:
The smaller the return value is, the better model is.
"""
return hamming_loss(target, output)
def accuracy_score(output, target, base="sample"):
"""
Hard metric for multilabel classification
Args:
output:
target:
base: ["sample", "label"], default="sample"
if "sample", return metric score based sample,
if "label", return metric score based label.
Returns:
accuracy:
"""
assert base in ["sample", "label"], 'must be one of ["sample", "label"]'
if base == "sample":
accuracy = accuracy_metric(target, output)
elif base == "label":
mcm = multilabel_confusion_matrix(target, output)
tns = mcm[:, 0, 0]
fns = mcm[:, 1, 0]
tps = mcm[:, 1, 1]
fps = mcm[:, 0, 1]
accuracy = (sum(tps) + sum(tns)) / (
sum(tps) + sum(tns) + sum(fns) + sum(fps))
return accuracy
def precision_recall_fscore(output, target):
"""
Metric based label for multilabel classification
Returns:
precisions:
recalls:
fscores:
"""
precisions, recalls, fscores, _ = precision_recall_fscore_support(target,
output)
return precisions, recalls, fscores
def mean_average_precision(logits, target):
"""
Calculate average precision
Args:
logits: probability from network before sigmoid or softmax
target: ground truth, 0 or 1
"""
if not (isinstance(logits, np.ndarray) and isinstance(target, np.ndarray)):
raise TypeError("logits and target should be np.ndarray.")
aps = []
for i in range(target.shape[1]):
ap = average_precision_score(target[:, i], logits[:, i])
aps.append(ap)
return np.mean(aps)
|
third_party/blink/tools/blinkpy/w3c/pr_cleanup_tool.py
|
zealoussnow/chromium
| 14,668 |
105880
|
<gh_stars>1000+
"""Cleans up PRs that correspond to abandoned CLs in Gerrit."""
import argparse
import logging
from blinkpy.common.system.log_utils import configure_logging
from blinkpy.w3c.wpt_github import WPTGitHub
from blinkpy.w3c.gerrit import GerritAPI, GerritError
from blinkpy.w3c.common import (read_credentials)
_log = logging.getLogger(__name__)
class PrCleanupTool(object):
def __init__(self, host):
self.host = host
self.wpt_github = None
self.gerrit = None
def main(self, argv=None):
"""Closes all PRs that are abandoned in Gerrit."""
options = self.parse_args(argv)
log_level = logging.DEBUG if options.verbose else logging.INFO
configure_logging(logging_level=log_level, include_time=True)
credentials = read_credentials(self.host, options.credentials_json)
gh_user = credentials.get('GH_USER')
gh_token = credentials.get('GH_TOKEN')
if not gh_user or not gh_token:
_log.error('You have not set your GitHub credentials. This '
'script may fail with a network error when making '
'an API request to GitHub.')
_log.error('See https://chromium.googlesource.com/chromium/src'
'/+/master/docs/testing/web_platform_tests.md'
'#GitHub-credentials for instructions on how to set '
'your credentials up.')
return False
gr_user = credentials['GERRIT_USER']
gr_token = credentials['GERRIT_TOKEN']
if not gr_user or not gr_token:
_log.warning('You have not set your Gerrit credentials. This '
'script may fail with a network error when making '
'an API request to Gerrit.')
self.wpt_github = self.wpt_github or WPTGitHub(self.host, gh_user,
gh_token)
self.gerrit = self.gerrit or GerritAPI(self.host, gr_user, gr_token)
pull_requests = self.retrieve_all_prs()
for pull_request in pull_requests:
if pull_request.state != 'open':
continue
change_id = self.wpt_github.extract_metadata(
'Change-Id: ', pull_request.body)
if not change_id:
continue
try:
cl = self.gerrit.query_cl(change_id)
except GerritError as e:
_log.error('Could not query change_id %s: %s', change_id,
str(e))
continue
cl_status = cl.status
if cl_status == 'ABANDONED':
comment = 'Close this PR because the Chromium CL has been abandoned.'
self.log_affected_pr_details(pull_request, comment)
self.close_pr_and_delete_branch(pull_request.number, comment)
elif cl_status == 'MERGED' and (not cl.is_exportable()):
comment = 'Close this PR because the Chromium CL does not have exportable changes.'
self.log_affected_pr_details(pull_request, comment)
self.close_pr_and_delete_branch(pull_request.number, comment)
return True
def parse_args(self, argv):
parser = argparse.ArgumentParser()
parser.description = __doc__
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='log extra details that may be helpful when debugging')
parser.add_argument(
'--credentials-json',
help='A JSON file with GitHub credentials, '
'generally not necessary on developer machines')
return parser.parse_args(argv)
def retrieve_all_prs(self):
"""Retrieves last 1000 PRs."""
return self.wpt_github.all_pull_requests()
def close_pr_and_delete_branch(self, pull_request_number, comment):
"""Closes a PR with a comment and delete the corresponding branch."""
self.wpt_github.add_comment(pull_request_number, comment)
self.wpt_github.update_pr(pull_request_number, state='closed')
branch = self.wpt_github.get_pr_branch(pull_request_number)
self.wpt_github.delete_remote_branch(branch)
def log_affected_pr_details(self, pull_request, comment):
"""Logs details of an affected PR."""
_log.info(comment)
_log.info('https://github.com/web-platform-tests/wpt/pull/%s',
pull_request.number)
_log.info(
self.wpt_github.extract_metadata('Reviewed-on: ',
pull_request.body))
|
tools/bin/ext/figleaf/annotate_cover.py
|
YangHao666666/hawq
| 450 |
105919
|
<reponame>YangHao666666/hawq<gh_stars>100-1000
import figleaf
import os
import re
from annotate import read_exclude_patterns, filter_files, logger
def report_as_cover(coverage, exclude_patterns=[], ):
### now, output.
keys = coverage.keys()
info_dict = {}
for k in filter_files(keys):
try:
pyfile = open(k, 'rU')
lines = figleaf.get_lines(pyfile)
except IOError:
logger.warning('CANNOT OPEN: %s' % k)
continue
except KeyboardInterrupt:
raise
except Exception, e:
logger.error('ERROR: file %s, exception %s' % (pyfile, str(e)))
continue
# ok, got all the info. now annotate file ==> html.
covered = coverage[k]
pyfile = open(k)
(n_covered, n_lines, output) = make_cover_lines(lines, covered, pyfile)
try:
pcnt = n_covered * 100. / n_lines
except ZeroDivisionError:
pcnt = 100
info_dict[k] = (n_lines, n_covered, pcnt)
outfile = make_cover_filename(k)
try:
outfp = open(outfile, 'w')
outfp.write("\n".join(output))
outfp.write("\n")
outfp.close()
except IOError:
logger.warning('cannot open filename %s' % (outfile,))
continue
logger.info('reported on %s' % (outfile,))
### print a summary, too.
info_dict_items = info_dict.items()
def sort_by_pcnt(a, b):
a = a[1][2]
b = b[1][2]
return -cmp(a,b)
info_dict_items.sort(sort_by_pcnt)
logger.info('reported on %d file(s) total\n' % len(info_dict))
return len(info_dict)
def make_cover_lines(line_info, coverage_info, fp):
n_covered = n_lines = 0
output = []
for i, line in enumerate(fp):
is_covered = False
is_line = False
i += 1
if i in coverage_info:
is_covered = True
prefix = '+'
n_covered += 1
n_lines += 1
elif i in line_info:
prefix = '-'
is_line = True
n_lines += 1
else:
prefix = '0'
line = line.rstrip()
output.append(prefix + ' ' + line)
return (n_covered, n_lines, output)
def make_cover_filename(orig):
return orig + '.cover'
def main():
import sys
import logging
from optparse import OptionParser
###
option_parser = OptionParser()
option_parser.add_option('-x', '--exclude-patterns', action="store",
dest="exclude_patterns_file",
help="file containing regexp patterns to exclude")
option_parser.add_option('-q', '--quiet', action='store_true',
dest='quiet',
help="file containig regexp patterns of files to exclude from report")
option_parser.add_option('-D', '--debug', action='store_true',
dest='debug',
help='Show all debugging messages')
(options, args) = option_parser.parse_args()
if options.quiet:
logging.disable(logging.DEBUG)
if options.debug:
logger.setLevel(logging.DEBUG)
### load
if not args:
args = ['.figleaf']
coverage = {}
for filename in args:
logger.debug("loading coverage info from '%s'\n" % (filename,))
d = figleaf.read_coverage(filename)
coverage = figleaf.combine_coverage(coverage, d)
if not coverage:
logger.warning('EXITING -- no coverage info!\n')
sys.exit(-1)
exclude = read_exclude_patterns(options.exclude_patterns_file)
report_as_cover(coverage, exclude)
|
project/auth/views.py
|
infrascloudy/ajax_helpdesk
| 296 |
105921
|
# -*- coding: utf-8 -*-
from flask import Blueprint, render_template, redirect, request, current_app, g, flash, url_for
from flask_login import login_required, logout_user
from flask_babel import gettext as _
from .models import User
from ..extensions import db
from .forms import SettingsForm
auth = Blueprint('auth', __name__, url_prefix='/auth/', template_folder="templates")
@auth.route('login')
def login():
next_url = request.args.get('next') or request.referrer or None
return render_template('auth/index.html', next=next_url)
@auth.route('loggedin')
def loggedin():
return redirect(request.args.get('next') or url_for('frontend.index'))
@auth.route('profile')
@login_required
def profile():
return render_template('auth/profile.html')
@auth.route('set_lang')
@login_required
def set_lang():
if request.args.get('lang') in current_app.config['LANGUAGES']:
user = User.query.get_or_404(g.user.id)
user.ui_lang = request.args.get('lang')
db.session.add(user)
db.session.commit()
return redirect('/')
@auth.route('settings', methods=['GET', 'POST'])
@login_required
def settings():
form = SettingsForm(request.form, g.user)
form.ui_lang.choices = current_app.config['LANGUAGES'].items()
if form.validate_on_submit():
form.populate_obj(g.user)
db.session.add(g.user)
db.session.commit()
flash(_("Settings saved"))
return render_template('auth/settings.html', languages=current_app.config['LANGUAGES'], form=form)
@auth.route('logout')
def logout():
logout_user()
return redirect('/')
|
learnpy_ecourse/class3/ex4_ip_address_valid.py
|
fallenfuzz/pynet
| 528 |
105939
|
#!/usr/bin/env python
'''
Disclaimer - This is a solution to the below problem given the content we have
discussed in class. It is not necessarily the best solution to the problem.
In other words, I only use things we have covered up to this point in the class.
Well, with some exceptions (I use try / except below).
Python for Network Engineers
https://pynet.twb-tech.com
Learning Python
Class#3
V. Create a script that checks the validity of an IP address. The IP address
should be supplied on the command line.
A. Check that the IP address contains 4 octets.
B. The first octet must be between 1 - 223.
C. The first octet cannot be 127.
D. The IP address cannot be in the 169.254.X.X address space.
E. The last three octets must range between 0 - 255.
For output, print the IP and whether it is valid or not.
'''
import sys
if len(sys.argv) != 2:
# Exit the script
sys.exit("Usage: ./ex4_ip_address_valid.py <ip_address>")
ip_addr = sys.argv.pop()
valid_ip = True
# Make sure IP has four octets
octets = ip_addr.split('.')
if len(octets) != 4:
sys.exit("\n\nInvalid IP address: %s\n" % ip_addr)
# convert octet from string to int
for i, octet in enumerate(octets):
# I haven't told you about exception handling yet (soon)
# You could do without this, the script will just crash
# on certain invalid input (for example, '1.1.1.')
try:
octets[i] = int(octet)
except ValueError:
# couldn't convert octet to an integer
sys.exit("\n\nInvalid IP address: %s\n" % ip_addr)
# map variables to elements of octets list
first_octet, second_octet, third_octet, fourth_octet = octets
# Check first_octet meets conditions
if first_octet < 1:
valid_ip = False
elif first_octet > 223:
valid_ip = False
elif first_octet == 127:
valid_ip = False
# Check 169.254.X.X condition
if first_octet == 169 and second_octet == 254:
valid_ip = False
# Check 2nd - 4th octets
for octet in (second_octet, third_octet, fourth_octet):
if (octet < 0) or (octet > 255):
valid_ip = False
if valid_ip:
print "\n\nThe IP address is valid: %s\n" % ip_addr
else:
sys.exit("\n\nInvalid IP address: %s\n" % ip_addr)
|
querybook/server/lib/notify/utils.py
|
shivammmmm/querybook
| 1,144 |
105944
|
import jinja2
from lib.notify.all_notifiers import get_notifier_class, DEFAULT_NOTIFIER
from logic import user as user_logic
from app.db import with_session
@with_session
def notify_user(user, template_name, template_params, notifier_name=None, session=None):
if notifier_name is None:
notification_preference = user_logic.get_user_settings(
user.id, "notification_preference", session=session
)
notifier_name = (
notification_preference.value
if notification_preference is not None
else DEFAULT_NOTIFIER
)
if notifier_name is None:
return
notifier = get_notifier_class(notifier_name)
markdown_message = render_message(template_name, template_params)
notifier.notify(user=user, message=markdown_message)
def render_message(template_name, context):
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader("./querybook/notification_templates/")
)
template = jinja_env.get_template(f"{template_name}.md")
return template.render(context)
|
fountain/resources/builtin.py
|
tzano/fountain
| 108 |
105966
|
# constants
PERCENTAGE_BUILTIN_SLOTS = 0.20
# Time
FOUNTAIN_MONTH = 'FOUNTAIN:MONTH'
FOUNTAIN_WEEKDAY = 'FOUNTAIN:WEEKDAY'
FOUNTAIN_HOLIDAYS = 'FOUNTAIN:HOLIDAYS'
FOUNTAIN_MONTH_DAY = 'FOUNTAIN:MONTH_DAY'
FOUNTAIN_TIME = 'FOUNTAIN:TIME'
FOUNTAIN_NUMBER = 'FOUNTAIN:NUMBER'
FOUNTAIN_DATE = 'FOUNTAIN:DATE'
# Location
FOUNTAIN_CITY = 'FOUNTAIN:CITY'
FOUNTAIN_COUNTRY = 'FOUNTAIN:COUNTRY'
FOUNTAIN_EDU_ORGANIZATION = 'FOUNTAIN:EDU_ORGANIZATION'
FOUNTAIN_ADMIN_ORGANIZATION = 'FOUNTAIN:ADMIN_ORGANIZATION'
FOUNTAIN_NONPROFIT_ORGANIZATION = 'FOUNTAIN:NONPROFIT_ORGANIZATION'
FOUNTAIN_ROOM = 'FOUNTAIN:ROOM'
# Demographics
FOUNTAIN_FAMOUSPEOPLE = 'FOUNTAIN:FAMOUSPEOPLE'
FOUNTAIN_MALE_FIRSTNAME = 'FOUNTAIN:MALE_FIRSTNAME'
FOUNTAIN_FEMALE_FIRSTNAME = 'FOUNTAIN:FEMALE_FIRSTNAME'
FOUNTAIN_NAME = 'FOUNTAIN:FEMALE_FIRSTNAME'
FOUNTAIN_LANGUAGE = 'FOUNTAIN:LANGUAGE'
FOUNTAIN_PROFESSION = 'FOUNTAIN:PROFESSION'
# Culture
FOUNTAIN_BOOK = 'FOUNTAIN:BOOK'
FOUNTAIN_ARTIST = 'FOUNTAIN:ARTIST'
FOUNTAIN_AUTHOR = 'FOUNTAIN:AUTHOR'
# Media
FOUNTAIN_TV_CHANNEL = 'FOUNTAIN:TV_CHANNEL'
FOUNTAIN_TV_SHOW = 'FOUNTAIN:TV_SHOW'
FOUNTAIN_RADIO_CHANNEL = 'FOUNTAIN:RADIO_CHANNEL'
FOUNTAIN_MOVIE = 'FOUNTAIN:MOVIE'
FOUNTAIN_MOVIE_SERIE = 'FOUNTAIN:MOVIE_SERIE'
FOUNTAIN_MUSIC_ALBUM = 'FOUNTAIN:MUSIC_ALBUM'
FOUNTAIN_MUSIC_EVENT = 'FOUNTAIN:MUSIC_EVENT'
# Food
FOUNTAIN_FRUIT = 'FOUNTAIN:FRUITS'
FOUNTAIN_DRINK = 'FOUNTAIN:DRINK'
FOUNTAIN_MEAL = 'FOUNTAIN:MEAL'
# EVENTS
FOUNTAIN_EVENT_TYPE = 'FOUNTAIN:EVENT_TYPE'
FOUNTAIN_GENRE = 'FOUNTAIN:GENRE'
# Tech
FOUNTAIN_MOBILE_APP = 'FOUNTAIN:MOBILE_APP'
FOUNTAIN_GAME = 'FOUNTAIN:GAME'
FOUNTAIN_SOCIAL_PLATFORM = 'FOUNTAIN:SOCIAL_PLATFORM'
# MISC
FOUNTAIN_COLOR = 'FOUNTAIN:COLOR'
FOUNTAIN_DEVICE = 'FOUNTAIN:DEVICE'
FOUNTAIN_SPORT = 'FOUNTAIN:SPORT'
FOUNTAIN_BUILTIN = {FOUNTAIN_FEMALE_FIRSTNAME, FOUNTAIN_MALE_FIRSTNAME, FOUNTAIN_FAMOUSPEOPLE, FOUNTAIN_CITY, FOUNTAIN_MONTH, FOUNTAIN_WEEKDAY, FOUNTAIN_HOLIDAYS, FOUNTAIN_MONTH_DAY}
RESOURCES = {
FOUNTAIN_MONTH: "month.csv",
FOUNTAIN_WEEKDAY: "weekday.csv",
FOUNTAIN_HOLIDAYS: "holidays.csv",
FOUNTAIN_MONTH_DAY: "month_day.csv",
FOUNTAIN_CITY: "city.csv",
FOUNTAIN_FAMOUSPEOPLE: "famous_people.csv",
FOUNTAIN_FEMALE_FIRSTNAME: "female_firstnames.csv",
FOUNTAIN_MALE_FIRSTNAME: "male_firstnames.csv"
}
|
flan/utils.py
|
purn3ndu/FLAN
| 206 |
106002
|
# Copyright 2021 The FLAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for FLAN."""
import abc
import re
from typing import Optional
import numpy as np
from flan import templates
def is_classification(flan_pattern_name: str):
"""Returns if the task is a classification task."""
# ReCoRD task has variable length options, so it is not called options in
# the input pattern. But it is classification.
if flan_pattern_name == 'record':
return True
input_patterns = [p[0] for p in templates.PATTERNS[flan_pattern_name]]
return np.any(['{options_}' in pattern for pattern in input_patterns])
class SeqioTaskName(metaclass=abc.ABCMeta):
"""Abstract class for seqio task name."""
@abc.abstractclassmethod
def get(cls, *args):
"""Returns task name."""
raise NotImplementedError
@abc.abstractclassmethod
def parse(cls, task_name: str):
"""Returns task name."""
raise NotImplementedError
@abc.abstractclassmethod
def match(cls, task_name: str) -> Optional[re.Match]:
"""Returns the match object if `task_name` matches the name pattern."""
raise NotImplementedError
class ZeroshotEvalTaskName(SeqioTaskName):
"""Task name for zeroshot eval."""
@classmethod
def get(cls, t_name: str, template_id: int) -> str:
return f'{t_name}_type_{template_id}'
@classmethod
def parse(cls, task_name):
match = cls.match(task_name)
return match[1], int(match[2])
@classmethod
def match(cls, task_name) -> Optional[re.Match]:
return re.fullmatch(r'^(.+)_type_(\d+)$', task_name)
class ZeroshotScoreEvalTaskName(SeqioTaskName):
"""Task name for zeroshot scoring eval."""
@classmethod
def get(cls, t_name: str, template_id: int) -> str:
return f'{t_name}_type_{template_id}_scoring_eval'
@classmethod
def parse(cls, task_name):
match = cls.match(task_name)
return match[1], int(match[2])
@classmethod
def match(cls, task_name) -> Optional[re.Match]:
return re.fullmatch(r'^(.+)_type_(\d+)_scoring_eval$', task_name)
class ZeroshotScoreEvalNoOptionTaskName(SeqioTaskName):
"""Task name for zeroshot scoring eval without options."""
@classmethod
def get(cls, t_name: str, template_id: int) -> str:
return f'{t_name}_type_{template_id}_score_eval_no_options'
@classmethod
def parse(cls, task_name):
match = cls.match(task_name)
return match[1], int(match[2])
@classmethod
def match(cls, task_name) -> Optional[re.Match]:
return re.fullmatch(r'^(.+)_type_(\d+)_score_eval_no_options$', task_name)
class ZeroshotScoreFLANNoOptionTaskName(SeqioTaskName):
"""Task name for zeroshot scoring eval without options."""
@classmethod
def get(cls, t_name: str, template_id: int) -> str:
return f'{t_name}_type_{template_id}_score_flan_no_options'
@classmethod
def parse(cls, task_name):
match = cls.match(task_name)
return match[1], int(match[2])
@classmethod
def match(cls, task_name) -> Optional[re.Match]:
return re.fullmatch(r'^(.+)_type_(\d+)_score_flan_no_options$', task_name)
class AllPromptsTaskName(SeqioTaskName):
"""Task name for the training job realized from all prompts."""
@classmethod
def get(cls, t_name: str) -> str:
return f'{t_name}_all_prompts'
@classmethod
def parse(cls, task_name):
match = cls.match(task_name)
return match[1]
@classmethod
def match(cls, task_name) -> Optional[re.Match]:
return re.fullmatch(r'^(.+)_all_prompts', task_name)
class ZeroshotTemplatedTaskName(SeqioTaskName):
"""Zeroshot task name with number of realized templates."""
@classmethod
def get(cls, t_name: str, num_templates: int) -> str:
return f'{t_name}_{num_templates}templates'
@classmethod
def parse(cls, task_name):
match = cls.match(task_name)
return match[1], int(match[2])
@classmethod
def match(cls, task_name) -> Optional[re.Match]:
return re.fullmatch(r'^(.+)_(\d+)templates$', task_name)
class XshotTemplatedTaskName(SeqioTaskName):
"""Zeroshot task name with number of realized templates."""
@classmethod
def get(cls, t_name: str, num_templates: int, num_shot: str) -> str:
return f'{t_name}_{num_templates}templates_{num_shot}_shot'
@classmethod
def parse(cls, task_name):
match = cls.match(task_name)
return match[1], int(match[2]), match[3]
@classmethod
def match(cls, task_name) -> Optional[re.Match]:
return re.fullmatch(r'^(.+)_(\d+)templates_([a-z]+)_shot$', task_name)
def remove_input_patterns_options(input_pattern: str) -> str:
"""Remove options from the input pattern."""
no_options_pattern = input_pattern.replace('{options_}', '')
no_options_pattern = no_options_pattern.replace('{options_str}', '').strip()
return no_options_pattern
def t_name_to_flan_pattern_name(t_name: str) -> str:
"""Converts `t_name` to flan `PATTERN` key.
Some seqio tasks use the same flan patterns.
Args:
t_name: Task config name.
Returns:
a key for `PATTERNS`.
"""
if 'para_crawl' in t_name:
return 'para_crawl'
elif 'wmt16_translate' in t_name:
return 'wmt16_translate'
elif t_name in {'arc_challenge', 'arc_easy'}:
return 'arc'
elif t_name in {'anli_r1', 'anli_r2', 'anli_r3'}:
return 'anli'
elif t_name in {'mnli_matched', 'mnli_mismatched'}:
return 'mnli'
return t_name
def get_eval_dir_basename(task: str, split: str) -> str:
"""Returns the basename for eval directory.
Args:
task: a seqio eval task name.
split: split name.
"""
return f'eval_{task}_{split}'
|
m2-modified/ims/common/agentless-system-crawler/crawler/plugins/systems/cpu_host_crawler.py
|
CCI-MOC/ABMI
| 108 |
106060
|
import logging
import psutil
from icrawl_plugin import IHostCrawler
from utils.features import CpuFeature
logger = logging.getLogger('crawlutils')
class CpuHostCrawler(IHostCrawler):
def get_feature(self):
return 'cpu'
def crawl(self, **kwargs):
logger.debug('Crawling %s' % (self.get_feature()))
for (idx, cpu) in enumerate(psutil.cpu_times_percent(percpu=True)):
feature_attributes = CpuFeature(
cpu.idle,
cpu.nice,
cpu.user,
cpu.iowait,
cpu.system,
cpu.irq,
cpu.steal,
100 - int(cpu.idle),
)
feature_key = '{0}-{1}'.format('cpu', idx)
yield (feature_key, feature_attributes, 'cpu')
|
np_ml/adaboost/adaboost.py
|
wwwy-binary/NP_ML
| 237 |
106096
|
<reponame>wwwy-binary/NP_ML<filename>np_ml/adaboost/adaboost.py
import numpy as np
# x > v or x < v
# y = 1 or -1
class TrivialClassification:
def __init__(self):
self.sign = None
self.thres = 0
def __str__(self):
return self.sign + " than " + str(self.thres)
def fit(self, x, y, w=None):
if w is None:
w = np.ones(len(y)) / len(y)
data = zip(x, y, w)
data = sorted(data, key=lambda s: s[0])
[x, y, w] = zip(*data)
y = np.array(y)
w = np.array(w)
correct = np.zeros((2, len(y))) # 0 row for x < v, 1 row for x >= v
for i in range(len(y)):
w_front = w[:i]
w_back = w[i:]
correct[0, i] += np.sum(w_front[y[:i] == 1]) + np.sum(w_back[y[i:] == -1])
correct[1, i] += np.sum(w_front[y[:i] == -1]) + np.sum(w_back[y[i:] == 1])
idx = np.argmax(correct, axis=1)
if correct[0, int(idx[0])] > correct[1, int(idx[1])]:
self.sign = "smaller"
self.thres = x[idx[0]]
else:
self.sign = "equal to or bigger"
self.thres = x[idx[1]]
def predict(self, x):
if self.sign == "smaller":
return (x < self.thres)*2-1
else:
return (x >= self.thres)*2-1
def score(self, x, y, w=None): # the wrong percent
if w is None:
w = np.ones(len(y)) / len(y)
return 1 - np.sum(w[self.predict(x) == y])
class AdaBoost:
def __init__(self, weak_learner, epsilon=0.01):
self.weak_learner_class = weak_learner
self.weak_learners = []
self.alphas = []
self.epsilon = 0.01
@staticmethod
def calcAlpha(e):
return 0.5*np.log((1-e)/e)
def fit(self, x, y, detailed=False):
"""if use detailed, need weak learner has __str__ property"""
w = np.ones(len(y)) / len(y)
score = 1
epoch = 0
while score > self.epsilon:
epoch += 1
wl = self.weak_learner_class()
wl.fit(x, y, w)
alpha = AdaBoost.calcAlpha(wl.score(x, y, w))
self.alphas.append(alpha)
self.weak_learners.append(wl)
w = w*np.exp(-alpha*y*self.predict(x))
w = w/np.sum(w)
score = self.score(x, y)
if detailed:
print("Epoch: {}".format(epoch))
print("Weak learner: {}".format(wl))
print("alpha: {}".format(alpha))
print("accuracy: {}".format(1-score))
print()
def predict(self, x):
ans = np.zeros(x.shape[0])
for i in range(len(self.alphas)):
ans += self.weak_learners[i].predict(x)*self.alphas[i]
return (ans > 0)*2-1
def score(self, x, y):
return 1 - np.sum(self.predict(x) == y)/len(y)
|
dataviva/apps/ask/views.py
|
joelvisroman/dataviva-site
| 126 |
106113
|
from sqlalchemy import and_, or_, func
from datetime import datetime
from flask import Blueprint, request, make_response, render_template, flash, g, session, redirect, url_for, jsonify, abort, current_app
from flask.ext.babel import gettext
from dataviva import db, lm, view_cache
# from config import SITE_MIRROR
from dataviva.apps.user.models import User
from dataviva.apps.ask.models import Question, Reply, Status, Vote, TYPE_QUESTION, TYPE_REPLY, Flag
from dataviva.apps.ask.forms import AskForm, ReplyForm, SearchForm
from dataviva.utils.cached_query import cached_query, api_cache_key
import urllib2, urllib
mod = Blueprint('ask', __name__, url_prefix='/<lang_code>/ask')
RESULTS_PER_PAGE = 10
@mod.url_defaults
def add_language_code(endpoint, values):
values.setdefault('lang_code', g.locale)
@mod.url_value_preprocessor
def pull_lang_code(endpoint, values):
g.locale = values.pop('lang_code')
@mod.route('/questions/', methods=['GET', 'POST'], defaults={'page': 1})
def question_list(page):
# get URL parameters for results per page and ordering options
order = request.args.get('order', 'votes') # options = 'votes' or 'newest'
type = request.args.get('type', 'all') # options = 'all' or 'question' or 'comment' or 'contact'
offset = request.args.get('offset', 0)
search_term = request.args.get('q', None)
if search_term:
search_term = search_term.encode('utf-8')
limit = 25
lang = request.args.get('lang', None) or g.locale
# lets find the questions to load in the page
# only the approved questions
approved = Status.query.filter_by(name='Approved').first()
questions = Question.query.filter_by(status = approved)
# if the user has submitted a search, filter by that term
if search_term:
like_str = "%{0}%".format(search_term)
questions = questions.filter(or_(Question.question.like(like_str),Question.body.like(like_str),Question.status_notes.like(like_str)))
if type == "question":
questions = questions.filter_by(type_id='1')
elif type == "comment":
questions = questions.filter_by(type_id='2')
elif type == "contact":
questions = questions.filter_by(type_id='3')
# if we are ordering the questions by newest get them ordered chronologically
if order == "newest":
if g.locale == "pt":
questions = questions.order_by(Question.timestamp.desc(),Question.language.desc())
else:
questions = questions.order_by(Question.timestamp.desc(),Question.language)
questions = questions.order_by(Question.timestamp.desc())
questions = questions.limit(limit).offset(offset)
questions = [q.serialize() for q in questions.all()]
# otherwise we are ordering the questions by votes
else:
questions = questions.limit(limit).offset(offset)
ids = [q.id for q in questions]
# raise Exception(ids)
votes_subq = db.session.query(Vote, func.count('*').label('vote_count')).group_by(Vote.type_id).subquery()
if lang == "pt":
questions = db.session.query(Question, votes_subq.c.vote_count) \
.outerjoin(votes_subq, and_(Question.id==votes_subq.c.type_id, votes_subq.c.type==TYPE_QUESTION)) \
.filter(Question.status == approved) \
.filter(Question.id.in_(ids)) \
.filter(Question.language==lang) \
.order_by(votes_subq.c.vote_count.desc(),Question.language.desc())
else:
questions = db.session.query(Question, votes_subq.c.vote_count) \
.outerjoin(votes_subq, and_(Question.id==votes_subq.c.type_id, votes_subq.c.type==TYPE_QUESTION)) \
.filter(Question.status == approved) \
.filter(Question.id.in_(ids)) \
.filter(Question.language==lang) \
.order_by(votes_subq.c.vote_count.desc(),Question.language)
# .limit(limit).offset(offset)
questions = [q[0].serialize() for q in questions]
ret = jsonify({"activities":questions})
ret.headers.add('Last-Modified', datetime.now())
ret.headers.add('Expires', '-1')
ret.headers.add('Cache-Control', 'must-revalidate, private')
return ret
@mod.route('/question/<slug>/vote/')
@mod.route('/question/<slug>/vote/<user>/')
def question_vote(slug, user=None):
q = Question.query.filter_by(slug=slug).first_or_404()
if user and request.remote_addr == SITE_MIRROR.split(":")[1][2:]:
g.user = User.query.get(user)
elif g.user is None or not g.user.is_authenticated:
return jsonify({"error": gettext("You need to be logged in to vote.")})
elif user is None and g.user is None:
abort(404)
# if user is None:
# try:
# opener = urllib2.urlopen("{0}ask/question/{1}/vote/{2}/".format(SITE_MIRROR,slug,g.user.id),None,5)
# except:
# return jsonify({"error": gettext("The server is not responding. Please try again later.")})
vote = q.votes.filter_by(user=g.user).first()
if vote:
db.session.delete(vote)
db.session.commit()
return jsonify({"success": -1})
else:
new_vote = Vote(user=g.user, type=TYPE_QUESTION, type_id=q.id)
db.session.add(new_vote)
db.session.commit()
return jsonify({"success": 1})
@mod.route('/reply/<int:id>/vote/')
@mod.route('/reply/<int:id>/vote/<user>/')
def reply_vote(id, user=None):
reply = Reply.query.get_or_404(id)
# if user and request.remote_addr == SITE_MIRROR.split(":")[1][2:]:
# g.user = User.query.get(user)
if g.user is None or not g.user.is_authenticated:
return jsonify({"error": gettext("You need to be logged in to vote.")})
# elif user is None and g.user is None:
# abort(404)
# if user is None:
# try:
# opener = urllib2.urlopen("{0}ask/reply/{1}/vote/{2}/".format(SITE_MIRROR,id,g.user.id),None,5)
# except:
# return jsonify({"error": gettext("The server is not responding. Please try again later.")})
vote = reply.votes.filter_by(user=g.user).first()
if vote:
db.session.delete(vote)
db.session.commit()
return jsonify({"success": -1})
else:
new_vote = Vote(user=g.user, type=TYPE_REPLY, type_id=reply.id)
db.session.add(new_vote)
db.session.commit()
return jsonify({"success": 1})
@mod.route('/reply/<int:id>/flag/')
@mod.route('/reply/<int:id>/flag/<user>/')
def reply_flag(id, user=None):
reply = Reply.query.get_or_404(id)
# if user and request.remote_addr == SITE_MIRROR.split(":")[1][2:]:
# g.user = User.query.get(user)
if g.user is None or not g.user.is_authenticated:
return jsonify({"error": gettext("You need to be logged in to flag replies.")})
# elif user is None and g.user is None:
# abort(404)
# if user is None:
# try:
# opener = urllib2.urlopen("{0}ask/reply/{1}/flag/{2}/".format(SITE_MIRROR,id,g.user.id),None,5)
# except:
# return jsonify({"error": gettext("The server is not responding. Please try again later.")})
flag = reply.flags.filter_by(user=g.user).first()
if flag:
db.session.delete(flag)
db.session.commit()
return jsonify({"success": -1})
else:
new_flag = Flag(user=g.user, reply_id=reply.id)
db.session.add(new_flag)
db.session.commit()
return jsonify({"success": 1})
|
virtual/lib/python3.6/site-packages/pylint/test/functional/yield_from_iterable_py33.py
|
drewheathens/The-Moringa-Tribune
| 463 |
106145
|
<filename>virtual/lib/python3.6/site-packages/pylint/test/functional/yield_from_iterable_py33.py
"""
Check that `yield from`-statement takes an iterable.
"""
# pylint: disable=missing-docstring
def to_ten():
yield from 10 # [not-an-iterable]
|
insights/tests/client/collection_rules/test_get_conf_update.py
|
mglantz/insights-core
| 121 |
106152
|
# -*- coding: UTF-8 -*-
from .helpers import insights_upload_conf
from mock.mock import patch
from pytest import raises
collection_rules = {"version": "1.2.3"}
collection_rules_file = "/tmp/collection-rules"
@patch("insights.client.collection_rules.InsightsUploadConf.get_conf_file")
@patch("insights.client.collection_rules.InsightsUploadConf.get_collection_rules", return_value=None)
def test_load_from_file(get_collection_rules, get_conf_file):
"""
Falls back to file if collection rules are not downloaded.
"""
upload_conf = insights_upload_conf()
result = upload_conf.get_conf_update()
get_collection_rules.assert_called_once_with()
get_conf_file.assert_called_once_with()
assert result is get_conf_file.return_value
@patch("insights.client.collection_rules.InsightsUploadConf.get_conf_file")
@patch("insights.client.collection_rules.InsightsUploadConf.get_collection_rules", return_value={"some": "value"})
def test_no_version_error(get_collection_rules, get_conf_file):
"""
Error is raised if there is no version in the collection rules loaded from URL.
"""
upload_conf = insights_upload_conf()
with raises(ValueError):
upload_conf.get_conf_update()
get_collection_rules.assert_called_once_with()
get_conf_file.assert_not_called()
@patch("insights.client.collection_rules.constants.collection_rules_file", collection_rules_file)
@patch("insights.client.collection_rules.InsightsUploadConf.get_conf_file")
@patch("insights.client.collection_rules.InsightsUploadConf.get_collection_rules", return_value=collection_rules)
def test_load_from_url(get_collection_rules, get_conf_file):
"""
Return collection rules loaded from URL with added file path.
"""
upload_conf = insights_upload_conf()
actual_result = upload_conf.get_conf_update()
get_collection_rules.assert_called_once_with()
get_conf_file.assert_not_called()
expected_result = collection_rules.copy()
expected_result["file"] = collection_rules_file
assert actual_result == expected_result
|
Codeforces/95 Beta Division 2/Problem A/A.py
|
VastoLorde95/Competitive-Programming
| 170 |
106175
|
<filename>Codeforces/95 Beta Division 2/Problem A/A.py
s = raw_input()
if s.upper() == s or s[1:].upper() == s[1:]: print s.swapcase()
else: print s
|
fips-files/generators/Shader.py
|
infancy/oryol
| 1,707 |
106176
|
'''
Code generator for shader libraries.
'''
Version = 49
import os, platform, json
import genutil as util
from util import glslcompiler, shdc
from mod import log
import zlib # only for crc32
if platform.system() == 'Windows' :
from util import hlslcompiler
if platform.system() == 'Darwin' :
from util import metalcompiler
slVersions = {
'GLSL': ['glsl330'],
'GLES': ['glsl100', 'glsles3'],
'MSL': ['metal'],
'HLSL': ['hlsl']
}
oryolSlangTypes = {
'glsl100': 'Oryol::ShaderLang::GLSL100',
'glsl330': 'Oryol::ShaderLang::GLSL330',
'glsles3': 'Oryol::ShaderLang::GLSLES3',
'hlsl': 'Oryol::ShaderLang::HLSL5',
'metal': 'Oryol::ShaderLang::Metal'
}
def isGLSL(sl):
return sl in ['glsl100', 'glsl330', 'glsles3']
def isHLSL(sl):
return sl == 'hlsl'
def isMetal(sl):
return sl == 'metal'
validVsInNames = [
'position', 'normal', 'texcoord0', 'texcoord1', 'texcoord2', 'texcoord3',
'tangent', 'binormal', 'weights', 'indices', 'color0', 'color1',
'instance0', 'instance1', 'instance2', 'instance3'
]
validInOutTypes = [ 'float', 'vec2', 'vec3', 'vec4' ]
validUniformTypes = [ 'mat4', 'mat2', 'vec4', 'vec3', 'vec2', 'float' ]
# size of uniform array types must currently be multiple of 16,
# because of std140 padding rules
validUniformArrayTypes = [ 'mat4', 'mat2', 'vec4' ]
uniformCType = {
'float': 'float',
'vec2': 'glm::vec2',
'vec3': 'glm::vec3',
'vec4': 'glm::vec4',
'mat2': 'glm::mat2',
'mat3': 'glm::mat3',
'mat4': 'glm::mat4',
}
uniformCSize = {
'float': 4,
'vec2': 8,
'vec3': 12,
'vec4': 16,
'mat2': 16,
'mat3': 36,
'mat4': 64,
}
attrOryolType = {
'float': 'Oryol::VertexFormat::Float',
'vec2': 'Oryol::VertexFormat::Float2',
'vec3': 'Oryol::VertexFormat::Float3',
'vec4': 'Oryol::VertexFormat::Float4'
}
attrOryolName = {
'position': 'Oryol::VertexAttr::Position',
'normal': 'Oryol::VertexAttr::Normal',
'texcoord0': 'Oryol::VertexAttr::TexCoord0',
'texcoord1': 'Oryol::VertexAttr::TexCoord1',
'texcoord2': 'Oryol::VertexAttr::TexCoord2',
'texcoord3': 'Oryol::VertexAttr::TexCoord3',
'tangent': 'Oryol::VertexAttr::Tangent',
'binormal': 'Oryol::VertexAttr::Binormal',
'weights': 'Oryol::VertexAttr::Weights',
'indices': 'Oryol::VertexAttr::Indices',
'color0': 'Oryol::VertexAttr::Color0',
'color1': 'Oryol::VertexAttr::Color1',
'instance0': 'Oryol::VertexAttr::Instance0',
'instance1': 'Oryol::VertexAttr::Instance1',
'instance2': 'Oryol::VertexAttr::Instance2',
'instance3': 'Oryol::VertexAttr::Instance3'
}
validTextureTypes = [
'sampler2D', 'samplerCube', 'sampler3D', 'sampler2DArray'
]
texOryolType = {
'sampler2D': 'Oryol::TextureType::Texture2D',
'samplerCube': 'Oryol::TextureType::TextureCube',
'sampler3D': 'Oryol::TextureType::Texture3D',
'sampler2DArray': 'Oryol::TextureType::TextureArray',
}
#-------------------------------------------------------------------------------
class Line :
def __init__(self, content, path='', lineNumber=0) :
self.content = content
self.include = None # name of an included block
self.path = path
self.lineNumber = lineNumber
#-------------------------------------------------------------------------------
class Snippet :
def __init__(self) :
self.name = None
self.lines = []
#-------------------------------------------------------------------------------
class Block(Snippet) :
def __init__(self, name) :
Snippet.__init__(self)
self.name = name
def getTag(self) :
return 'block'
#-------------------------------------------------------------------------------
class Shader(Snippet) :
def __init__(self, name) :
Snippet.__init__(self)
self.name = name
self.slReflection = {} # reflection by shader language
self.generatedSource = None
#-------------------------------------------------------------------------------
class VertexShader(Shader) :
def __init__(self, name) :
Shader.__init__(self, name)
def getTag(self) :
return 'vs'
#-------------------------------------------------------------------------------
class FragmentShader(Shader) :
def __init__(self, name) :
Shader.__init__(self, name)
def getTag(self) :
return 'fs'
#-------------------------------------------------------------------------------
class Program() :
def __init__(self, name, vs, fs, filePath, lineNumber) :
self.name = name
self.vs = vs
self.fs = fs
self.filePath = filePath
self.lineNumber = lineNumber
def getTag(self) :
return 'program'
#-------------------------------------------------------------------------------
class Parser :
def __init__(self, shaderLib) :
self.shaderLib = shaderLib
self.fileName = None
self.lineNumber = 0
self.current = None
self.stack = []
self.inComment = False
def stripComments(self, line) :
'''
Remove comments from a single line, can carry
over to next or from previous line.
'''
done = False
while not done :
# if currently in comment, look for end-of-comment
if self.inComment :
endIndex = line.find('*/')
if endIndex == -1 :
# entire line is comment
if '/*' in line or '//' in line :
util.fmtError('comment in comment!')
else :
return ''
else :
comment = line[:endIndex+2]
if '/*' in comment or '//' in comment :
util.fmtError('comment in comment!')
else :
line = line[endIndex+2:]
self.inComment = False
# clip off winged comment (if exists)
wingedIndex = line.find('//')
if wingedIndex != -1 :
line = line[:wingedIndex]
# look for start of comment
startIndex = line.find('/*')
if startIndex != -1 :
# ...and for the matching end...
endIndex = line.find('*/', startIndex)
if endIndex != -1 :
line = line[:startIndex] + line[endIndex+2:]
else :
# comment carries over to next line
self.inComment = True
line = line[:startIndex]
done = True
else :
# no comment until end of line, done
done = True;
line = line.strip(' \t\n\r')
return line
def push(self, obj) :
self.stack.append(self.current)
self.current = obj
def pop(self) :
self.current = self.stack.pop();
def onBlock(self, args) :
if len(args) != 1 :
util.fmtError("@block must have 1 arg (name)")
if self.current is not None :
util.fmtError("@block must be at top level (missing @end in '{}'?)".format(self.current.name))
name = args[0]
if name in self.shaderLib.blocks :
util.fmtError("@block '{}' already defined".format(name))
block = Block(name)
self.shaderLib.blocks[name] = block
self.push(block)
def onVertexShader(self, args) :
if len(args) != 1:
util.fmtError("@vs must have 1 arg (name)")
if self.current is not None :
util.fmtError("cannot nest @vs (missing @end in '{}'?)".format(self.current.name))
name = args[0]
if name in self.shaderLib.vertexShaders :
util.fmtError("@vs {} already defined".format(name))
vs = VertexShader(name)
self.shaderLib.shaders.append(vs)
self.shaderLib.vertexShaders[name] = vs
self.push(vs)
def onFragmentShader(self, args) :
if len(args) != 1:
util.fmtError("@fs must have 1 arg (name)")
if self.current is not None :
util.fmtError("cannot nest @fs (missing @end in '{}'?)".format(self.current.name))
name = args[0]
if name in self.shaderLib.fragmentShaders :
util.fmtError("@fs {} already defined!".format(name))
fs = FragmentShader(name)
self.shaderLib.shaders.append(fs)
self.shaderLib.fragmentShaders[name] = fs
self.push(fs)
def onProgram(self, args) :
if len(args) != 3:
util.fmtError("@program must have 3 args (name vs fs)")
if self.current is not None :
util.fmtError("cannot nest @program (missing @end tag in '{}'?)".format(self.current.name))
name = args[0]
vs = args[1]
fs = args[2]
prog = Program(name, vs, fs, self.fileName, self.lineNumber)
self.shaderLib.programs[name] = prog
def onInclude(self, args) :
if len(args) != 1:
util.fmtError("@include must have 1 arg (name of included block)")
if not self.current or not self.current.getTag() in ['vs', 'fs'] :
util.fmtError("@include must come after @vs or @fs!")
if self.current:
l = Line(None, self.fileName, self.lineNumber)
l.include = args[0]
self.current.lines.append(l)
def onEnd(self, args) :
if not self.current or not self.current.getTag() in ['block', 'vs', 'fs'] :
util.fmtError("@end must come after @block, @vs or @fs!")
if len(args) != 0:
util.fmtError("@end must not have arguments")
if self.current.getTag() in ['block', 'vs', 'fs'] and len(self.current.lines) == 0 :
util.fmtError("no source code lines in @block, @vs or @fs section")
self.pop()
def parseLine(self, line) :
line = self.stripComments(line)
if line != '':
tagStartIndex = line.find('@')
if tagStartIndex != -1 :
if tagStartIndex > 0 :
util.fmtError("only whitespace allowed in front of tag")
if line.find(';') != -1 :
util.fmtError("no semicolons allowed in tag lines")
tagAndArgs = line[tagStartIndex+1 :].split()
tag = tagAndArgs[0]
args = tagAndArgs[1:]
if tag == 'block':
self.onBlock(args)
elif tag == 'vs':
self.onVertexShader(args)
elif tag == 'fs':
self.onFragmentShader(args)
elif tag == 'include':
self.onInclude(args)
elif tag == 'program':
self.onProgram(args)
elif tag == 'end':
self.onEnd(args)
else :
util.fmtError("unrecognized @ tag '{}'".format(tag))
elif self.current is not None:
self.current.lines.append(Line(line, self.fileName, self.lineNumber))
def parseSource(self, fileName) :
f = open(fileName, 'r')
self.fileName = fileName
self.lineNumber = 0
for line in f :
util.setErrorLocation(self.fileName, self.lineNumber)
self.parseLine(line)
self.lineNumber += 1
f.close()
if self.current is not None :
util.fmtError('missing @end at end of file')
#-------------------------------------------------------------------------------
class ShaderLibrary :
'''
This represents the entire shader lib.
'''
def __init__(self, inputs) :
self.sources = inputs
self.blocks = {}
self.shaders = []
self.vertexShaders = {}
self.fragmentShaders = {}
self.programs = {}
self.current = None
def parseSources(self) :
parser = Parser(self)
for source in self.sources :
parser.parseSource(source)
def validate(self, slangs) :
'''
Runs additional validation check after programs are resolved and before
shader code is generated:
- check whether each vs and fs is part of a program
- check vertex shader inputs for valid types and names
- check whether vertex shader output matches fragment shader input
'''
for shd in self.shaders:
for prog in self.programs.values():
prog_shd = prog.vs if shd.getTag()=='vs' else prog.fs
if shd.name == prog_shd:
break
else:
util.setErrorLocation(shd.lines[0].path, shd.lines[0].lineNumber)
util.fmtError("vertex shader '{}' is not part of a program".format(shd.name), False)
fatalError = True
for slang in slangs:
for vs in self.vertexShaders.values():
refl = vs.slReflection[slang]
util.setErrorLocation(vs.lines[0].path, vs.lines[0].lineNumber)
vs_inputs = refl['inputs']
for vs_input in vs_inputs:
if vs_input['name'] not in validVsInNames:
util.fmtError("invalid vertex shader input name '{}', must be ({})".format(vs_input['name'], ','.join(validVsInNames)))
if vs_input['type'] not in validInOutTypes:
util.fmtError("invalid vertex shader input type '{}', must be ({})".format(vs_input['type'], ','.join(validInOutTypes)))
for ub in refl['uniform_blocks']:
for m in ub['members']:
validTypes = validUniformTypes if m['num']==1 else validUniformArrayTypes
if m['type'] not in validTypes:
util.fmtError("invalid uniform block member type '{}', must be ({})".format(m['type'], ','.join(validTypes)))
for fs in self.fragmentShaders.values():
refl = fs.slReflection[slang]
util.setErrorLocation(fs.lines[0].path, fs.lines[0].lineNumber)
for ub in refl['uniform_blocks']:
for m in ub['members']:
validTypes = validUniformTypes if m['num']==1 else validUniformArrayTypes
if m['type'] not in validTypes:
util.fmtError("invalid uniform block member type '{}', must be ({})".format(m['type'], ','.join(validTypes)))
for prog in self.programs.values():
vs = self.vertexShaders[prog.vs]
fs = self.fragmentShaders[prog.fs]
vs_outputs = vs.slReflection[slang]['outputs']
fs_inputs = fs.slReflection[slang]['inputs']
vs_fs_error = False
if len(vs_outputs) == len(fs_inputs):
for vs_out in vs_outputs:
in_out_match = False
for fs_in in fs_inputs:
if (vs_out['name'] == fs_in['name']) and (vs_out['type'] == fs_in['type']):
in_out_match = True
break
if not in_out_match:
vs_fs_error = True
if vs_fs_error:
# number of inputs/outputs don't match
vs_fs_error = True
util.setErrorLocation(vs.lines[0].path, vs.lines[0].lineNumber)
util.fmtError("outputs of vs '{}' don't match inputs of fs '{}' (unused items might have been removed)".format(vs.name, fs.name))
def generateShaderSources(self):
for shd in self.shaders:
lines = []
for l in shd.lines:
# @include statement?
if l.include:
if l.include not in self.blocks:
util.setErrorLocation(incl.path, incl.lineNumber)
util.fmtError("included block '{}' doesn't exist".format(incl.name))
for lb in self.blocks[l.include].lines:
lines.append(lb)
else:
lines.append(l)
shd.generatedSource = lines
def loadReflection(self, shd, base_path, slangs):
for sl in slangs:
refl_path = '{}.{}.json'.format(base_path, sl)
with open(refl_path, 'r') as f:
shd.slReflection[sl] = json.load(f)
def compileShader(self, input, shd, base_path, slangs, args):
shd_type = shd.getTag()
shd_base_path = base_path + '_' + shd.name
glslcompiler.compile(shd.generatedSource, shd_type, shd_base_path, slangs[0], args)
shdc.compile(input, shd_base_path, slangs)
self.loadReflection(shd, shd_base_path, slangs)
if 'metal' in slangs:
c_name = '{}_{}_metallib'.format(shd.name, shd_type)
metalcompiler.compile(shd.generatedSource, shd_base_path, c_name, args)
if 'hlsl' in slangs:
c_name = '{}_{}_hlsl5'.format(shd.name, shd_type)
hlslcompiler.compile(shd.generatedSource, shd_base_path, shd_type, c_name, args)
def compile(self, input, out_hdr, slangs, args) :
log.info('## shader code gen: {}'.format(input))
base_path = os.path.splitext(out_hdr)[0]
for shd in self.shaders:
self.compileShader(input, shd, base_path, slangs, args)
#-------------------------------------------------------------------------------
def writeHeaderTop(f, shdLib) :
f.write('#pragma once\n')
f.write('//-----------------------------------------------------------------------------\n')
f.write('/* #version:{}#\n'.format(Version))
f.write(' machine generated, do not edit!\n')
f.write('*/\n')
f.write('#include "Gfx/GfxTypes.h"\n')
f.write('#include "glm/vec2.hpp"\n')
f.write('#include "glm/vec3.hpp"\n')
f.write('#include "glm/vec4.hpp"\n')
f.write('#include "glm/mat2x2.hpp"\n')
f.write('#include "glm/mat3x3.hpp"\n')
f.write('#include "glm/mat4x4.hpp"\n')
f.write('#include "Resource/Id.h"\n')
#-------------------------------------------------------------------------------
def writeHeaderBottom(f, shdLib) :
f.write('\n')
#-------------------------------------------------------------------------------
def getUniformBlockTypeHash(ub_refl):
hashString = ''
for member in ub_refl['members']:
hashString += member['type']
hashString += str(member['num'])
return zlib.crc32(hashString.encode('ascii')) & 0xFFFFFFFF
#-------------------------------------------------------------------------------
def roundup(val, round_to):
return (val + (round_to - 1)) & ~(round_to - 1)
#-------------------------------------------------------------------------------
def writeProgramHeader(f, shdLib, prog, slang) :
f.write('namespace ' + prog.name + ' {\n')
for stage in ['VS', 'FS']:
shd = shdLib.vertexShaders[prog.vs] if stage == 'VS' else shdLib.fragmentShaders[prog.fs]
refl = shd.slReflection[slang]
for ub in refl['uniform_blocks']:
cur_offset = 0
f.write(' #pragma pack(push,1)\n')
f.write(' struct {} {{\n'.format(ub['type']))
f.write(' static const int _bindSlotIndex = {};\n'.format(ub['slot']))
f.write(' static const Oryol::ShaderStage::Code _bindShaderStage = Oryol::ShaderStage::{};\n'.format(stage))
f.write(' static const uint32_t _layoutHash = {};\n'.format(getUniformBlockTypeHash(ub)))
for m in ub['members']:
next_offset = m['offset']
if next_offset > cur_offset:
f.write(' uint8_t _pad_{}[{}];\n'.format(cur_offset, next_offset-cur_offset))
cur_offset = next_offset
if m['num'] == 1:
f.write(' {} {};\n'.format(uniformCType[m['type']], m['name']))
else:
f.write(' {} {}[{}];\n'.format(uniformCType[m['type']], m['name'], m['num']))
cur_offset += uniformCSize[m['type']] * m['num']
# on GL, add padding bytes until struct size is multiple of vec4 size
if 'glsl' in slang:
round16 = roundup(cur_offset, 16)
if cur_offset != round16:
f.write(' uint8_t _pad_{}[{}];\n'.format(cur_offset, round16-cur_offset))
f.write(' };\n')
f.write(' #pragma pack(pop)\n')
for tex in refl['textures']:
f.write(' static const int {} = {};\n'.format(tex['name'], tex['slot']))
f.write(' extern Oryol::ShaderSetup Setup();\n')
f.write('}\n')
#-------------------------------------------------------------------------------
def generateHeader(absHeaderPath, shdLib, slangs) :
f = open(absHeaderPath, 'w')
writeHeaderTop(f, shdLib)
for prog in shdLib.programs.values() :
writeProgramHeader(f, shdLib, prog, slangs[0])
writeHeaderBottom(f, shdLib)
f.close()
#-------------------------------------------------------------------------------
def writeSourceTop(f, absSourcePath, shdLib, slang) :
path, hdrFileAndExt = os.path.split(absSourcePath)
hdrFile, ext = os.path.splitext(hdrFileAndExt)
f.write('//-----------------------------------------------------------------------------\n')
f.write('// #version:{}# machine generated, do not edit!\n'.format(Version))
f.write('//-----------------------------------------------------------------------------\n')
f.write('#include "Pre.h"\n')
f.write('#include "' + hdrFile + '.h"\n')
f.write('\n')
if slang == 'hlsl':
f.write('typedef unsigned char BYTE;\n')
#-------------------------------------------------------------------------------
def writeSourceBottom(f, shdLib) :
f.write('\n')
#-------------------------------------------------------------------------------
def writeShaderSource(f, absPath, shdLib, shd, slVersion) :
base_path = os.path.splitext(absPath)[0] + '_' + shd.name
if isGLSL(slVersion):
# GLSL source code is directly inlined for runtime-compilation
f.write('static const char* {}_{}_src = \n'.format(shd.name, slVersion))
glsl_src_path = '{}.{}'.format(base_path, slVersion)
with open(glsl_src_path, 'r') as rf:
lines = rf.read().splitlines()
for line in lines:
f.write('"{}\\n"\n'.format(line))
f.write(';\n')
elif isHLSL(slVersion):
# for HLSL, the actual shader code has been compiled into a header by FXC
# also write the generated shader source into a C comment as
# human-readable version
f.write('/*\n')
hlsl_src_path = base_path + '.hlsl'
hlsl_bin_path = base_path + '.hlsl.h'
with open(hlsl_src_path, 'r') as rf:
lines = rf.read().splitlines()
for line in lines:
line = line.replace('/*', '__').replace('*/', '__')
f.write('"{}\\n"\n'.format(line))
f.write('*/\n')
f.write('#include "{}"\n'.format(hlsl_bin_path))
elif isMetal(slVersion):
# for Metal, the shader has been compiled into a binary shader
# library file, which needs to be embedded into the C header
f.write('/*\n')
metal_src_path = base_path + '.metal'
metal_bin_path = base_path + '.metallib.h'
with open(metal_src_path, 'r') as rf:
lines = rf.read().splitlines()
for line in lines:
line = line.replace('/*', '__').replace('*/', '__')
f.write('"{}\\n"\n'.format(line))
f.write('*/\n')
f.write('#include "{}"\n'.format(metal_bin_path))
else :
util.fmtError("Invalid shader language id")
#-------------------------------------------------------------------------------
def writeInputVertexLayout(f, vs, slang) :
# writes a C++ VertexLayout definition into the generated source
# code, this is used to match mesh vertex layouts with
# vertex shader input signatures (e.g. required in D3D11),
# return the C++ name of the vertex layout
layoutName = '{}_input'.format(vs.name)
f.write(' Oryol::VertexLayout {};\n'.format(layoutName))
for inp in vs.slReflection[slang]['inputs'] :
f.write(' {}.Add({}, {});\n'.format(layoutName, attrOryolName[inp['name']], attrOryolType[inp['type']]))
return layoutName
#-------------------------------------------------------------------------------
def writeProgramSource(f, shdLib, prog, slangs) :
# write the Setup() function
f.write('Oryol::ShaderSetup ' + prog.name + '::Setup() {\n')
f.write(' Oryol::ShaderSetup setup("' + prog.name + '");\n')
vs = shdLib.vertexShaders[prog.vs]
fs = shdLib.fragmentShaders[prog.fs]
vsInputLayout = writeInputVertexLayout(f, vs, slangs[0])
f.write(' setup.SetInputLayout({});\n'.format(vsInputLayout))
vsName = vs.name
fsName = fs.name
for slang in slangs:
slangType = oryolSlangTypes[slang]
vsSource = '{}_{}_src'.format(vsName, slang)
fsSource = '{}_{}_src'.format(fsName, slang)
if isGLSL(slang):
f.write(' setup.SetProgramFromSources({}, {}, {});\n'.format(
slangType, vsSource, fsSource));
elif isHLSL(slang):
vs_c_name = '{}_vs_hlsl5'.format(vs.name)
fs_c_name = '{}_fs_hlsl5'.format(fs.name)
f.write(' setup.SetProgramFromByteCode({}, {}, sizeof({}), {}, sizeof({}));\n'.format(
slangType, vs_c_name, vs_c_name, fs_c_name, fs_c_name))
elif isMetal(slang):
vs_c_name = '{}_vs_metallib'.format(vs.name)
fs_c_name = '{}_fs_metallib'.format(fs.name)
f.write(' setup.SetProgramFromByteCode({}, {}, sizeof({}), {}, sizeof({}), "main0", "main0");\n'.format(
slangType, vs_c_name, vs_c_name, fs_c_name, fs_c_name))
# add uniform layouts to setup object
for stage in ['VS', 'FS']:
shd = shdLib.vertexShaders[prog.vs] if stage == 'VS' else shdLib.fragmentShaders[prog.fs]
refl = shd.slReflection[slang]
# add uniform block layouts
for ub in refl['uniform_blocks']:
ub_size = ub['size']
if 'glsl' in slang:
ub_size = roundup(ub_size, 16)
f.write(' setup.AddUniformBlock("{}", "{}", {}, {}, {}::_bindShaderStage, {}::_bindSlotIndex);\n'.format(
ub['type'], ub['name'], getUniformBlockTypeHash(ub), ub_size, ub['type'], ub['type']))
# add textures layouts to setup objects
for tex in refl['textures']:
f.write(' setup.AddTexture("{}", {}, Oryol::ShaderStage::{}, {});\n'.format(tex['name'], texOryolType[tex['type']], stage, tex['slot']))
f.write(' return setup;\n')
f.write('}\n')
#-------------------------------------------------------------------------------
def generateSource(absSourcePath, shdLib, slangs) :
f = open(absSourcePath, 'w')
writeSourceTop(f, absSourcePath, shdLib, slangs[0])
for slang in slangs :
for vs in shdLib.vertexShaders.values() :
writeShaderSource(f, absSourcePath, shdLib, vs, slang)
for fs in shdLib.fragmentShaders.values() :
writeShaderSource(f, absSourcePath, shdLib, fs, slang)
for prog in shdLib.programs.values() :
writeProgramSource(f, shdLib, prog, slangs)
writeSourceBottom(f, shdLib)
f.close()
#-------------------------------------------------------------------------------
def generate(input, out_src, out_hdr, args) :
if util.isDirty(Version, [input], [out_src, out_hdr]) :
slangs = slVersions[args['slang']]
shaderLibrary = ShaderLibrary([input])
shaderLibrary.parseSources()
shaderLibrary.generateShaderSources()
shaderLibrary.compile(input, out_hdr, slangs, args)
shaderLibrary.validate(slangs)
generateSource(out_src, shaderLibrary, slangs)
generateHeader(out_hdr, shaderLibrary, slangs)
|
deep_qa/layers/backend/collapse_to_batch.py
|
richarajpal/deep_qa
| 459 |
106263
|
from keras import backend as K
from overrides import overrides
from ..masked_layer import MaskedLayer
class CollapseToBatch(MaskedLayer):
"""
Reshapes a higher order tensor, taking the first ``num_to_collapse`` dimensions after the batch
dimension and folding them into the batch dimension. For example, a tensor of shape (2, 4, 5,
3), collapsed with ``num_to_collapse = 2``, would become a tensor of shape (40, 3). We perform
identical computation on the input mask, if there is one.
This is essentially what Keras' ``TimeDistributed`` layer does (and then undoes) to apply a
layer to a higher-order tensor, and that's the intended use for this layer. However,
``TimeDistributed`` cannot handle distributing across dimensions with unknown lengths at graph
compilation time. This layer works even in that case. So, if your actual tensor shape at
graph compilation time looks like (None, None, None, 3), or (None, 4, None, 3), you can still
use this layer (and :class:`~deep_qa.layers.backend.expand_from_batch.ExpandFromBatch`) to get
the same result as ``TimeDistributed``. If your shapes are fully known at graph compilation
time, just use ``TimeDistributed``, as it's a nicer API for the same functionality.
Inputs:
- tensor with ``ndim >= 3``
Output:
- tensor with ``ndim = input_ndim - num_to_collapse``, with the removed dimensions folded
into the first (batch-size) dimension
Parameters
----------
num_to_collapse: int
The number of dimensions to fold into the batch size.
"""
def __init__(self, num_to_collapse: int, **kwargs):
self.num_to_collapse = num_to_collapse
super(CollapseToBatch, self).__init__(**kwargs)
@overrides
def call(self, inputs, mask=None):
return self.__collapse_tensor(inputs)
@overrides
def compute_mask(self, inputs, mask=None):
# pylint: disable=unused-argument
if mask is None:
return None
return self.__collapse_tensor(mask)
@overrides
def compute_output_shape(self, input_shape):
return (None,) + input_shape[1 + self.num_to_collapse:]
@overrides
def get_config(self):
base_config = super(CollapseToBatch, self).get_config()
config = {'num_to_collapse': self.num_to_collapse}
config.update(base_config)
return config
def __collapse_tensor(self, tensor):
# If we were to call K.int_shape(inputs), we would get something back that has None in it
# (other than the batch dimension), because the shape is not fully known at graph
# compilation time. We can't do a reshape with more than one unknown dimension, which is
# why we're doing this whole layer in the first place instead of just using
# TimeDistributed. tf.reshape will let us pass in a tensor that has the shape, instead of
# just some ints. So we can use tf.shape(tensor) to get the actual runtime shape of the
# tensor _as a tensor_, which we then pass to tf.reshape().
new_shape = K.concatenate([[-1], K.shape(tensor)[1 + self.num_to_collapse:]], 0)
return K.reshape(tensor, new_shape)
|
thumt/utils/utils.py
|
Demon-JieHao/Modeling-Structure-for-Transformer-Network
| 145 |
106323
|
# coding=utf-8
# Copyright 2018 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def session_run(monitored_session, args):
# Call raw TF session directly
return monitored_session._tf_sess().run(args)
|
torch2trt_dynamic/converters/LayerNorm.py
|
jinfagang/torch2trt_dynamic
| 155 |
106356
|
<filename>torch2trt_dynamic/converters/LayerNorm.py<gh_stars>100-1000
import numpy as np
import tensorrt as trt
from ..torch2trt_dynamic import (tensor_trt_get_shape_trt, tensorrt_converter,
torch_dim_to_trt_axes, trt_)
@tensorrt_converter('torch.nn.LayerNorm.forward')
def convert_LayerNorm(ctx):
module = ctx.method_args[0]
input = ctx.method_args[1]
normalized_shape = module.normalized_shape
weight = module.weight
bias = module.bias
eps = module.eps
output = ctx.method_return
eps_np = np.array([eps], dtype=np.float32)
keep_dims = True
input_trt = trt_(ctx.network, input)
if len(input.shape) == 3:
input_shape_trt = tensor_trt_get_shape_trt(ctx.network, input_trt)
new_input_shape_trt = ctx.network.add_concatenation(
[trt_(ctx.network, 1), input_shape_trt]).get_output(0)
layer = ctx.network.add_shuffle(input_trt)
layer.set_input(1, new_input_shape_trt)
input_trt = layer.get_output(0)
reduce_axes = torch_dim_to_trt_axes(
tuple(
range(
len(input_trt.shape) - len(normalized_shape),
len(input_trt.shape))))
mean_trt = ctx.network.add_reduce(input_trt, trt.ReduceOperation.AVG,
reduce_axes, keep_dims).get_output(0)
# compute variance over spatial (include eps, to reduce layer count)
delta_trt = ctx.network.add_elementwise(
input_trt, mean_trt, trt.ElementWiseOperation.SUB).get_output(0)
var_trt = ctx.network.add_scale(delta_trt, trt.ScaleMode.UNIFORM,
np.zeros_like(eps_np),
np.ones_like(eps_np),
2 * np.ones_like(eps_np)).get_output(0)
var_trt = ctx.network.add_reduce(var_trt, trt.ReduceOperation.AVG,
reduce_axes, keep_dims).get_output(0)
# compute sqrt(var + eps)
var_trt = ctx.network.add_scale(var_trt, trt.ScaleMode.UNIFORM, eps_np,
np.ones_like(eps_np),
0.5 * np.ones_like(eps_np)).get_output(0)
# compute final result
result_trt = ctx.network.add_elementwise(
delta_trt, var_trt, trt.ElementWiseOperation.DIV).get_output(0)
if len(input.shape) == 3:
layer = ctx.network.add_shuffle(result_trt)
layer.set_input(1, input_shape_trt)
result_trt = layer.get_output(0)
if weight is not None:
assert weight.ndim <= input.ndim
while weight.ndim < input.ndim:
weight = weight.unsqueeze(0)
weight_trt = trt_(ctx.network, weight)
layer = ctx.network.add_elementwise(result_trt, weight_trt,
trt.ElementWiseOperation.PROD)
result_trt = layer.get_output(0)
if bias is not None:
assert bias.ndim <= input.ndim
while bias.ndim < input.ndim:
bias = bias.unsqueeze(0)
bias_trt = trt_(ctx.network, bias)
layer = ctx.network.add_elementwise(result_trt, bias_trt,
trt.ElementWiseOperation.SUM)
result_trt = layer.get_output(0)
output._trt = result_trt
|
petl/test/transform/test_maps.py
|
arturponinski/petl
| 435 |
106369
|
<reponame>arturponinski/petl
from __future__ import absolute_import, print_function, division
from collections import OrderedDict
from petl.test.failonerror import assert_failonerror
from petl.test.helpers import ieq
from petl.transform.maps import fieldmap, rowmap, rowmapmany
from functools import partial
def test_fieldmap():
table = (('id', 'sex', 'age', 'height', 'weight'),
(1, 'male', 16, 1.45, 62.0),
(2, 'female', 19, 1.34, 55.4),
(3, 'female', 17, 1.78, 74.4),
(4, 'male', 21, 1.33, 45.2),
(5, '-', 25, 1.65, 51.9))
mappings = OrderedDict()
mappings['subject_id'] = 'id'
mappings['gender'] = 'sex', {'male': 'M', 'female': 'F'}
mappings['age_months'] = 'age', lambda v: v * 12
mappings['bmi'] = lambda rec: rec['weight'] / rec['height'] ** 2
actual = fieldmap(table, mappings)
expect = (('subject_id', 'gender', 'age_months', 'bmi'),
(1, 'M', 16 * 12, 62.0 / 1.45 ** 2),
(2, 'F', 19 * 12, 55.4 / 1.34 ** 2),
(3, 'F', 17 * 12, 74.4 / 1.78 ** 2),
(4, 'M', 21 * 12, 45.2 / 1.33 ** 2),
(5, '-', 25 * 12, 51.9 / 1.65 ** 2))
ieq(expect, actual)
ieq(expect, actual) # can iteratate twice?
# do it with suffix
actual = fieldmap(table)
actual['subject_id'] = 'id'
actual['gender'] = 'sex', {'male': 'M', 'female': 'F'}
actual['age_months'] = 'age', lambda v: v * 12
actual['bmi'] = '{weight} / {height}**2'
ieq(expect, actual)
# test short rows
table2 = (('id', 'sex', 'age', 'height', 'weight'),
(1, 'male', 16, 1.45, 62.0),
(2, 'female', 19, 1.34, 55.4),
(3, 'female', 17, 1.78, 74.4),
(4, 'male', 21, 1.33, 45.2),
(5, '-', 25, 1.65))
expect = (('subject_id', 'gender', 'age_months', 'bmi'),
(1, 'M', 16 * 12, 62.0 / 1.45 ** 2),
(2, 'F', 19 * 12, 55.4 / 1.34 ** 2),
(3, 'F', 17 * 12, 74.4 / 1.78 ** 2),
(4, 'M', 21 * 12, 45.2 / 1.33 ** 2),
(5, '-', 25 * 12, None))
actual = fieldmap(table2, mappings)
ieq(expect, actual)
def test_fieldmap_record_access():
table = (('id', 'sex', 'age', 'height', 'weight'),
(1, 'male', 16, 1.45, 62.0),
(2, 'female', 19, 1.34, 55.4),
(3, 'female', 17, 1.78, 74.4),
(4, 'male', 21, 1.33, 45.2),
(5, '-', 25, 1.65, 51.9))
mappings = OrderedDict()
mappings['subject_id'] = 'id'
mappings['gender'] = 'sex', {'male': 'M', 'female': 'F'}
mappings['age_months'] = 'age', lambda v: v * 12
mappings['bmi'] = lambda rec: rec.weight / rec.height ** 2
actual = fieldmap(table, mappings)
expect = (('subject_id', 'gender', 'age_months', 'bmi'),
(1, 'M', 16 * 12, 62.0 / 1.45 ** 2),
(2, 'F', 19 * 12, 55.4 / 1.34 ** 2),
(3, 'F', 17 * 12, 74.4 / 1.78 ** 2),
(4, 'M', 21 * 12, 45.2 / 1.33 ** 2),
(5, '-', 25 * 12, 51.9 / 1.65 ** 2))
ieq(expect, actual)
ieq(expect, actual) # can iteratate twice?
def test_fieldmap_empty():
table = (('foo', 'bar'),)
expect = (('foo', 'baz'),)
mappings = OrderedDict()
mappings['foo'] = 'foo'
mappings['baz'] = 'bar', lambda v: v * 2
actual = fieldmap(table, mappings)
ieq(expect, actual)
def test_fieldmap_failonerror():
input_ = (('foo',), ('A',), (1,))
mapper_ = {'bar': ('foo', lambda v: v.lower())}
expect_ = (('bar',), ('a',), (None,))
assert_failonerror(
input_fn=partial(fieldmap, input_, mapper_),
expected_output=expect_)
def test_rowmap():
table = (('id', 'sex', 'age', 'height', 'weight'),
(1, 'male', 16, 1.45, 62.0),
(2, 'female', 19, 1.34, 55.4),
(3, 'female', 17, 1.78, 74.4),
(4, 'male', 21, 1.33, 45.2),
(5, '-', 25, 1.65, 51.9))
def rowmapper(row):
transmf = {'male': 'M', 'female': 'F'}
return [row[0],
transmf[row[1]] if row[1] in transmf else row[1],
row[2] * 12,
row[4] / row[3] ** 2]
actual = rowmap(table, rowmapper, header=['subject_id', 'gender',
'age_months', 'bmi'])
expect = (('subject_id', 'gender', 'age_months', 'bmi'),
(1, 'M', 16 * 12, 62.0 / 1.45 ** 2),
(2, 'F', 19 * 12, 55.4 / 1.34 ** 2),
(3, 'F', 17 * 12, 74.4 / 1.78 ** 2),
(4, 'M', 21 * 12, 45.2 / 1.33 ** 2),
(5, '-', 25 * 12, 51.9 / 1.65 ** 2))
ieq(expect, actual)
ieq(expect, actual) # can iteratate twice?
# test short rows
table2 = (('id', 'sex', 'age', 'height', 'weight'),
(1, 'male', 16, 1.45, 62.0),
(2, 'female', 19, 1.34, 55.4),
(3, 'female', 17, 1.78, 74.4),
(4, 'male', 21, 1.33, 45.2),
(5, '-', 25, 1.65))
expect = (('subject_id', 'gender', 'age_months', 'bmi'),
(1, 'M', 16 * 12, 62.0 / 1.45 ** 2),
(2, 'F', 19 * 12, 55.4 / 1.34 ** 2),
(3, 'F', 17 * 12, 74.4 / 1.78 ** 2),
(4, 'M', 21 * 12, 45.2 / 1.33 ** 2))
actual = rowmap(table2, rowmapper, header=['subject_id', 'gender',
'age_months', 'bmi'])
ieq(expect, actual)
def test_rowmap_empty():
table = (('id', 'sex', 'age', 'height', 'weight'),)
def rowmapper(row):
transmf = {'male': 'M', 'female': 'F'}
return [row[0],
transmf[row[1]] if row[1] in transmf else row[1],
row[2] * 12,
row[4] / row[3] ** 2]
actual = rowmap(table, rowmapper, header=['subject_id', 'gender',
'age_months', 'bmi'])
expect = (('subject_id', 'gender', 'age_months', 'bmi'),)
ieq(expect, actual)
def test_rowmap_failonerror():
input_ = (('foo',), ('A',), (1,), ('B',))
mapper = lambda r: [r[0].lower()]
# exceptions in rowmappers do not generate an output row
expect_ = (('foo',), ('a',), ('b',))
assert_failonerror(
input_fn=partial(rowmap, input_, mapper, header=('foo',)),
expected_output=expect_)
def test_recordmap():
table = (('id', 'sex', 'age', 'height', 'weight'),
(1, 'male', 16, 1.45, 62.0),
(2, 'female', 19, 1.34, 55.4),
(3, 'female', 17, 1.78, 74.4),
(4, 'male', 21, 1.33, 45.2),
(5, '-', 25, 1.65, 51.9))
def recmapper(rec):
transmf = {'male': 'M', 'female': 'F'}
return [rec['id'],
transmf[rec['sex']] if rec['sex'] in transmf else rec['sex'],
rec['age'] * 12,
rec['weight'] / rec['height'] ** 2]
actual = rowmap(table, recmapper, header=['subject_id', 'gender',
'age_months', 'bmi'])
expect = (('subject_id', 'gender', 'age_months', 'bmi'),
(1, 'M', 16 * 12, 62.0 / 1.45 ** 2),
(2, 'F', 19 * 12, 55.4 / 1.34 ** 2),
(3, 'F', 17 * 12, 74.4 / 1.78 ** 2),
(4, 'M', 21 * 12, 45.2 / 1.33 ** 2),
(5, '-', 25 * 12, 51.9 / 1.65 ** 2))
ieq(expect, actual)
ieq(expect, actual) # can iteratate twice?
# test short rows
table2 = (('id', 'sex', 'age', 'height', 'weight'),
(1, 'male', 16, 1.45, 62.0),
(2, 'female', 19, 1.34, 55.4),
(3, 'female', 17, 1.78, 74.4),
(4, 'male', 21, 1.33, 45.2),
(5, '-', 25, 1.65))
expect = (('subject_id', 'gender', 'age_months', 'bmi'),
(1, 'M', 16 * 12, 62.0 / 1.45 ** 2),
(2, 'F', 19 * 12, 55.4 / 1.34 ** 2),
(3, 'F', 17 * 12, 74.4 / 1.78 ** 2),
(4, 'M', 21 * 12, 45.2 / 1.33 ** 2))
actual = rowmap(table2, recmapper, header=['subject_id', 'gender',
'age_months', 'bmi'])
ieq(expect, actual)
def test_rowmapmany():
table = (('id', 'sex', 'age', 'height', 'weight'),
(1, 'male', 16, 1.45, 62.0),
(2, 'female', 19, 1.34, 55.4),
(3, '-', 17, 1.78, 74.4),
(4, 'male', 21, 1.33))
def rowgenerator(row):
transmf = {'male': 'M', 'female': 'F'}
yield [row[0], 'gender',
transmf[row[1]] if row[1] in transmf else row[1]]
yield [row[0], 'age_months', row[2] * 12]
yield [row[0], 'bmi', row[4] / row[3] ** 2]
actual = rowmapmany(table, rowgenerator, header=['subject_id', 'variable',
'value'])
expect = (('subject_id', 'variable', 'value'),
(1, 'gender', 'M'),
(1, 'age_months', 16 * 12),
(1, 'bmi', 62.0 / 1.45 ** 2),
(2, 'gender', 'F'),
(2, 'age_months', 19 * 12),
(2, 'bmi', 55.4 / 1.34 ** 2),
(3, 'gender', '-'),
(3, 'age_months', 17 * 12),
(3, 'bmi', 74.4 / 1.78 ** 2),
(4, 'gender', 'M'),
(4, 'age_months', 21 * 12))
ieq(expect, actual)
ieq(expect, actual) # can iteratate twice?
def test_rowmapmany_failonerror():
input_ = (('foo',), ('A',), (1,), ('B',))
mapper = lambda r: [r[0].lower()]
expect_ = (('foo',), ('a',), ('b',),)
assert_failonerror(
input_fn=partial(rowmapmany, input_, mapper, header=('foo',)),
expected_output=expect_)
def test_recordmapmany():
table = (('id', 'sex', 'age', 'height', 'weight'),
(1, 'male', 16, 1.45, 62.0),
(2, 'female', 19, 1.34, 55.4),
(3, '-', 17, 1.78, 74.4),
(4, 'male', 21, 1.33))
def rowgenerator(rec):
transmf = {'male': 'M', 'female': 'F'}
yield [rec['id'], 'gender',
transmf[rec['sex']] if rec['sex'] in transmf else rec['sex']]
yield [rec['id'], 'age_months', rec['age'] * 12]
yield [rec['id'], 'bmi', rec['weight'] / rec['height'] ** 2]
actual = rowmapmany(table, rowgenerator, header=['subject_id', 'variable',
'value'])
expect = (('subject_id', 'variable', 'value'),
(1, 'gender', 'M'),
(1, 'age_months', 16 * 12),
(1, 'bmi', 62.0 / 1.45 ** 2),
(2, 'gender', 'F'),
(2, 'age_months', 19 * 12),
(2, 'bmi', 55.4 / 1.34 ** 2),
(3, 'gender', '-'),
(3, 'age_months', 17 * 12),
(3, 'bmi', 74.4 / 1.78 ** 2),
(4, 'gender', 'M'),
(4, 'age_months', 21 * 12))
ieq(expect, actual)
ieq(expect, actual) # can iteratate twice?
|
game_of_life/05_mixed_sorting.py
|
nicetone/Python
| 28,321 |
106407
|
<reponame>nicetone/Python<gh_stars>1000+
# Mixed sorting
"""
Given a list of integers nums, sort the array such that:
All even numbers are sorted in increasing order
All odd numbers are sorted in decreasing order
The relative positions of the even and odd numbers remain the same
Example 1
Input
nums = [8, 13, 11, 90, -5, 4]
Output
[4, 13, 11, 8, -5, 90]
Explanation
The even numbers are sorted in increasing order, the odd numbers are sorted in
decreasing number, and the relative positions were
[even, odd, odd, even, odd, even] and remain the same after sorting.
"""
# solution
import unittest
def mixed_sorting(nums):
positions = []
odd = []
even = []
sorted_list = []
for i in nums:
if i%2 == 0:
even.append(i)
positions.append("E")
else:
odd.append(i)
positions.append("O")
even.sort()
odd.sort()
odd.reverse()
j,k = 0,0
for i in range(len(nums)):
if positions[i] == "E":
while j < len(even):
sorted_list.append(even[j])
j += 1
break
else:
while k < len(odd):
sorted_list.append(odd[k])
k += 1
break
return sorted_list
# DO NOT TOUCH THE BELOW CODE
class TestMixedSorting(unittest.TestCase):
def test_1(self):
self.assertEqual(mixed_sorting(
[8, 13, 11, 90, -5, 4]), [4, 13, 11, 8, -5, 90])
def test_2(self):
self.assertEqual(mixed_sorting([1, 2, 3, 6, 5, 4]), [5, 2, 3, 4, 1, 6])
if __name__ == '__main__':
unittest.main(verbosity=2)
|
examples/plot_allpsd.py
|
butala/spectrum
| 261 |
106429
|
<filename>examples/plot_allpsd.py
"""
Spectral analysis of a two frequencies signal
==================================================
"""
###########################################################
# Context
# ----------
##############################################
# Example
# --------
#
# In the following example, we use most of the methods available to
# analyse an input signal made of the addition of two sinus and an
# additive gaussian noise
import numpy
import spectrum
from spectrum import tools
from numpy.testing import assert_array_almost_equal
import pylab
data = spectrum.marple_data
from pylab import *
nn = numpy.arange(200)
xx = cos(0.257*pi*nn) + sin(0.2*pi*nn) + 0.01*randn(size(nn));
def create_all_psd():
f = pylab.linspace(0, 1, 4096)
pylab.figure(figsize=(12,8))
# MA model
p = spectrum.pma(xx, 64,128); p(); p.plot()
"""
#ARMA 15 order
a, b, rho = spectrum.arma_estimate(data, 15,15, 30)
psd = spectrum.arma2psd(A=a,B=b, rho=rho)
newpsd = tools.cshift(psd, len(psd)//2) # switch positive and negative freq
pylab.plot(f, 10 * pylab.log10(newpsd/max(newpsd)), label='ARMA 15,15')
"""
# <NAME>
p = spectrum.pyule(xx, 7 , NFFT=4096, scale_by_freq=False); p.plot()
# equivalent to
# plot([x for x in p.frequencies()] , 10*log10(p.psd)); grid(True)
#burg method
p = spectrum.pburg(xx, 7, scale_by_freq=False); p.plot()
#pcovar
p = spectrum.pcovar(xx, 7, scale_by_freq=False); p.plot()
#pmodcovar
p = spectrum.pmodcovar(xx, 7, scale_by_freq=False); p.plot()
# correlogram
p = spectrum.pcorrelogram(xx, lag=60, NFFT=512, scale_by_freq=False); p.plot()
# minvar
p = spectrum.pminvar(xx, 7, NFFT=256, scale_by_freq=False); p.plot()
# pmusic
p = spectrum.pmusic(xx, 10,4, scale_by_freq=False); p.plot()
# pmusic
p = spectrum.pev(xx, 10, 4, scale_by_freq=False); p.plot()
# periodogram
p = spectrum.Periodogram(xx, scale_by_freq=False); p.plot()
#
legend( ["MA 32", "pyule 7", "pburg 7", "pcovar", "pmodcovar", "correlogram",
"minvar", "pmusic", "pev", "periodgram"])
pylab.ylim([-80,80])
create_all_psd()
|
devtools/qcexport/qcexport.py
|
MolSSI/dqm_server
| 113 |
106440
|
<filename>devtools/qcexport/qcexport.py
'''Import/Export of QCArchive data
'''
from dataclasses import dataclass
import typing
from qcexport_extra import extra_children_map
from sqlalchemy.orm import make_transient, Load
from sqlalchemy import inspect
from qcfractal.storage_sockets.models import (
AccessLogORM,
BaseResultORM,
CollectionORM,
DatasetORM,
GridOptimizationProcedureORM,
MoleculeORM,
KeywordsORM,
KVStoreORM,
OptimizationProcedureORM,
QueueManagerLogORM,
QueueManagerORM,
ResultORM,
ServerStatsLogORM,
ServiceQueueORM,
QueueManagerORM,
TaskQueueORM,
TorsionDriveProcedureORM,
Trajectory,
VersionsORM,
WavefunctionStoreORM,
)
from qcfractal.storage_sockets.models.collections_models import DatasetEntryORM
from qcfractal.storage_sockets.models.results_models import GridOptimizationAssociation, TorsionInitMol
_all_orm = [
AccessLogORM,
BaseResultORM,
CollectionORM,
DatasetORM,
DatasetEntryORM,
GridOptimizationProcedureORM,
GridOptimizationAssociation,
MoleculeORM,
KeywordsORM,
KVStoreORM,
OptimizationProcedureORM,
QueueManagerLogORM,
QueueManagerORM,
ResultORM,
ServerStatsLogORM,
ServiceQueueORM,
QueueManagerORM,
TaskQueueORM,
TorsionDriveProcedureORM,
TorsionInitMol,
Trajectory,
VersionsORM,
WavefunctionStoreORM,
]
# Maps table names to sqlalchemy ORM objects
_table_orm_map = {orm.__tablename__: orm for orm in _all_orm}
class RowKeyValues:
'''Generates and stores information about primary and foreign keys of a table
'''
@dataclass(order=True)
class PKInfo:
'''Holds information about a row's primary key.
Holds the column names and the values of the primary key columns.
These are lists in order to handle composite primary keys
'''
table: str
columns: list
values: list
@dataclass(order=True)
class FKInfo:
'''Holds information about a row's foreign key.
For a single foreign key, holds the source and destination/foreign table names and columns. Also
holds the value in the source row.
'''
src_table: str
src_column: str
dest_table: str
dest_column: str
value: 'typing.Any'
def __init__(self, orm_obj):
'''Generates primary and foreign key info given an ORM object'''
self.orm_type = type(orm_obj)
insp = inspect(self.orm_type)
###########################################################
# First, get which columns are primary and foreign keys
###########################################################
# Handle if this is a derived class (polymorphic?)
# This seems poorly documented. But get the table name of the
# base class (if there is one)
base_class = insp.inherits.entity if insp.inherits else None
base_table = base_class.__tablename__ if base_class else None
# Get the columns comprising the primary key
primary_key_columns = [x.name for x in insp.primary_key]
# Now foreign keys. Loop over all the columns.
# Each column has a set() (which may be empty) stored in foreign_keys
foreign_key_info = []
for col in insp.columns:
for fk in sorted(list(col.foreign_keys)):
# Remove foreign keys to base class
# The purpose of this function is to get foreign keys that we need to
# load. But if it is part of the base class, then no need to do that
if not (base_table and fk.column.table.name == base_table):
new_fk = self.FKInfo(col.table.name, col.name, fk.column.table.name, fk.column.name, None)
foreign_key_info.append(new_fk)
# Not sure if order is always preserved, but sort just in case
# so that things are always consistent
primary_key_columns = sorted(primary_key_columns)
foreign_key_info = sorted(foreign_key_info)
# Now store in this class
self.primary_key = self.PKInfo(self.orm_type.__tablename__, primary_key_columns, None)
self.foreign_keys = foreign_key_info
#######################################################
# Obtain values for the primary and foreign key columns
#######################################################
self.primary_key.values = [getattr(orm_obj, column) for column in self.primary_key.columns]
for fk in self.foreign_keys:
fk.value = getattr(orm_obj, fk.src_column)
def is_composite_primary(self):
'''Returns True if this represents a composite primary key'''
return len(self.primary_key.columns) > 1
def as_lookup_key(self):
'''Return a unique string representing the primary key
This is used as a key to a dictionary to store already-copied data.
'''
return repr(self.orm_type) + repr(self.primary_key)
def remove_primary_key(self, orm_obj):
'''Remove primary key values that are integers and not part of
a composite primary key'''
if type(orm_obj) != self.orm_type:
raise RuntimeError("Removing primary keys of type f{type(orm_obj)} but I can only handle {self.orm_type}")
# Don't touch composite primary
if self.is_composite_primary():
return
for pk, old_value in zip(self.primary_key.columns, self.primary_key.values):
if isinstance(old_value, int):
setattr(orm_obj, pk, None)
def _add_children(orm_obj, session_dest, session_src, new_pk_map, options, row_key_info, indent=''):
'''Given an ORM object, adds the dependent data (through foreign keys)
Finds all the foreign keys for the object, and adds the dependent data to the DB.
It then fixes the values of the foreign keys in the ORM object to match the newly-inserted data.
Parameters
----------
orm_obj
An ORM object to add the children of
session_dest
SQLAlchemy session to write data to
session_src
SQLAlchemy session to read data from
new_pk_map : dict
Where to store the mapping of old to new data
options : dict
Various options to be passed into the internal functions
row_key_info : RowKeyValues
Information about the row's primary and foreign keys
indent : str
Prefix to add to all printed output lines
'''
for fk_info in row_key_info.foreign_keys:
# Data in that column may be empty/null
if fk_info.value is None:
continue
print(indent + "+ Handling child: ")
print(
indent +
f" - {fk_info.src_table}.{fk_info.src_column}:{fk_info.value} -> {fk_info.dest_table}.{fk_info.dest_column}"
)
# We need to load from the db (from the foreign/destination table) given the column and value
# in the foreign key info
fk_query = {fk_info.dest_column: fk_info.value}
# Copy the foreign info. This should only return one record
# NOTE: This requires going to the source db for info. It is possible that
# we can check new_pk_map here using the info from the foreign key to see if it
# was already done. However, the hit rate would generally be low, and might be error
# prone, especially with esoteric cases.
new_info = _general_copy(table_name=fk_info.dest_table,
session_dest=session_dest,
session_src=session_src,
new_pk_map=new_pk_map,
options=options,
filter_by=fk_query,
single=True,
indent=indent + ' ')
# Now set the foreign keys to point to the new id
setattr(orm_obj, fk_info.src_column, new_info[fk_info.dest_column])
def _add_tasks_and_services(base_result_id, session_dest, session_src, new_pk_map, options, indent):
'''Adds entries in the task_queue and service_queue given something deriving from base_result
Should only be called after adding the result or procedure.
Parameters
----------
base_result_id
ID of the base_result (result, procedure, ...)
session_dest
SQLAlchemy session to write data to
session_src
SQLAlchemy session to read data from
new_pk_map : dict
Where to store the mapping of old to new data
options : dict
Various options to be passed into the internal functions
indent : str
Prefix to add to all printed output lines
'''
print(indent + f"$ Adding task & service queue entries for base_result_id = {base_result_id}")
# Add anything from the task queue corresponding to the given base result id
# (if calculation is completed, task is deleted)
_general_copy(table_name='task_queue',
session_dest=session_dest,
session_src=session_src,
new_pk_map=new_pk_map,
options=options,
filter_by={'base_result_id': base_result_id},
indent=indent + ' ')
# Do the same for the services queue
#if int(base_result_id) == 17761750:
# breakpoint()
_general_copy(table_name='service_queue',
session_dest=session_dest,
session_src=session_src,
new_pk_map=new_pk_map,
options=options,
filter_by={'procedure_id': base_result_id},
indent=indent + ' ')
def _general_copy(table_name,
session_dest,
session_src,
new_pk_map,
options,
filter_by=None,
filter_in=None,
order_by=None,
limit=None,
single=False,
indent=''):
'''
Given queries, copies all results of the query from session_src to session_dest
Adds data to session_dest, keeping a map of newly-added info and fixing foreign keys
to match newly-inserted data.
Called recursively to add dependent data through foreign keys.
Parameters
----------
table_name : str
Name of the table to copy data from/to
session_dest
SQLAlchemy session to write data to
session_src
SQLAlchemy session to read data from
new_pk_map : dict
Where to store the mapping of old to new data
options : dict
Various options to be passed into the internal functions
filter_by : dict
Filters (column: value) to add to the query. ie, {'id': 123}
filter_in : dict
Filters (column: list(values)) to add to the query using 'in'. ie, {'id': [123,456]}
order_by: dict
How to order the results of the query. ie {'id': 'desc'}
limit : int
Limit the number of records returned
single : bool
If true, expect only one returned record. If not, raise an exception
indent : str
Prefix to add to all printed output lines
'''
orm_type = _table_orm_map[table_name]
# Build the query based on filtering, etc
query = session_src.query(orm_type)
if filter_by is not None:
query = query.filter_by(**filter_by)
if filter_in is not None:
for key, values in filter_in.items():
query = query.filter(getattr(orm_type, key).in_(values))
if order_by:
for column, order in order_by.items():
# Gets, for example, Trajectory.opt_id.desc
# opt_id = column, desc = bound function
o = getattr(orm_type, column)
o = getattr(o, order)
query = query.order_by(o())
if limit is not None:
if single and limit != 1:
raise RuntimeError(f'Limit = {limit} but single return is specified')
query = query.limit(limit)
elif single:
limit = 1
# Disable all relationship loading
query = query.options(Load(orm_type).noload('*'))
data = query.all()
return_info = []
# We have to expunge and make transient everything first
# If not, sqlalchemy tries to be smart. After you add the entries found
# through foreign keys, the rest of the objects in the data list may change.
# But then you will have parts of objects in session_src and parts in session_dest
for d in data:
session_src.expunge(d)
make_transient(d)
for d in data:
# Obtain primary/foreign key columns and values
src_rck = RowKeyValues(d)
# The type of the object may not be the same as we queried (due to polymorphic types)
real_orm_type = type(d)
real_table_name = real_orm_type.__tablename__
# real_orm_type should never be BaseResultORM
assert real_orm_type != BaseResultORM
print(indent +
f'* Copying {table_name} {str(src_rck.primary_key.columns)} = {str(src_rck.primary_key.values)}')
if real_orm_type != orm_type:
print(indent + f'& But actually using table {real_table_name}')
############################################################
############################################################
## TODO - If working with an existing db, do lookups here ##
## (this is for future capability of importing ##
## into an existing db) ##
############################################################
############################################################
src_lookup_key = src_rck.as_lookup_key()
if src_lookup_key in new_pk_map:
print(indent + f' - Already previously done')
return_info.append(new_pk_map[src_lookup_key])
continue
# Save src information for laters. When adding extra children, old ids and stuff may be needed
src_info = d.to_dict()
# Loop through foreign keys and recursively add those
_add_children(d, session_dest, session_src, new_pk_map, options, src_rck, indent + ' ')
# Remove the primary key. We will generate a new one on adding
src_rck.remove_primary_key(d)
# Truncate KV store entries by default
# (but can be overridden)
if table_name == 'kv_store':
truncate_kv_store = options.get('truncate_kv_store', True)
if truncate_kv_store:
d.value = str(d.value)[:2000]
# Now add it to the session
# and obtain the key info
session_dest.add(d)
session_dest.commit()
dest_rck = RowKeyValues(d)
print(indent + f'! adding {real_table_name} {str(src_rck.primary_key.values)} = {str(dest_rck.primary_key.values)}')
# Store the info for the entire row
# (exception: kvstore)
dest_info = d.to_dict()
# Don't store kvstore data in the dictionary (not needed)
if table_name == 'kv_store':
dest_info.pop('value')
# We can't just use primary key, since foreign keys may
# reference non-primary-keys of other tables (as long as they are unique)
new_pk_map[src_lookup_key] = dest_info
return_info.append(dest_info)
########################################################################
# Now handle children that are not specified by foreign keys
# This includes decoupled data like datasets, as well as when foreign
# keys are specified in json
#
# We do that here after adding. Some of these have foreign keys
# to this object, so we need the new id (retrieved through new_pk_map)
########################################################################
if real_orm_type in extra_children_map:
# The function called in extra_children_map may modify the object.
# We let the called function do that, then merge it back into the db
extra_children_map[real_orm_type](d, src_info, session_dest, session_src, new_pk_map, options, indent + ' ')
session_dest.commit()
########################################################################
# Now add tasks/services if this is a result/procedure
########################################################################
if issubclass(real_orm_type, BaseResultORM):
_add_tasks_and_services(src_info['id'], session_dest, session_src, new_pk_map, options, indent + ' ')
# If the caller specified single=True, should only be one record
if single:
if len(return_info) != 1:
raise RuntimeError(f'Wanted single record but got {len(return_info)} instead')
return return_info[0]
else:
return return_info
def general_copy(table_name,
storage_dest,
storage_src,
new_pk_map=None,
options={},
filter_by={},
order_by=None,
limit=None,
indent=''):
''' Copies data from the source db to the destination db
Given queries, copies all results of the query from session_src to session_dest
Handles copying of data required by foreign keys as well.
Parameters
----------
table_name : str
Name of the table to copy data from/to
storage_dest
Storage object to write data to
storage_src
Storage object to read data from
new_pk_map : dict
Where to store the mapping of old to new data
options : dict
Various options to be passed into the internal functions
filter_by : dict
Filters (column: value) to add to the query. ie, {'id': 123}
order_by: dict
How to order the results of the query. ie {'id': 'desc'}
limit : int
Limit the number of records returned
indent : str
Prefix to add to all printed output lines
'''
if new_pk_map is None:
new_pk_map = dict()
with storage_src.session_scope() as session_src:
with storage_dest.session_scope() as session_dest:
_general_copy(table_name,
session_dest,
session_src,
new_pk_map=new_pk_map,
options=options,
filter_by=filter_by,
order_by=order_by,
limit=limit,
indent=indent)
|
core/clients/python/api_bindings/cb2_api/tests/lists_and_gets.py
|
aledbf/digitalrebar
| 103 |
106457
|
<reponame>aledbf/digitalrebar
# Copyright 2014, Dell
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
# Test generic list/get on available endpoints
from cb2_api.api import cb2_Api
from cb2_api.objects import *
class main():
session = cb2_Api("192.168.124.10", "3000", "rebar", "rebar1")
list_methods ={'get_nodes',
'get_deployments',
'get_deployment_roles',
#'get_interfaces', #NOT CURRENTLY AVAILABLE
'get_users',
'get_node_roles',
'get_barclamps',
'get_groups',
#'get_dhcpdatabases', #NOT CURRENTLY AVAILABLE
'get_attributes',
'get_jigs',
}
for method in list_methods :
print "--- Checking list method " + method +" --- "
ls = getattr(session, method)()
if len(ls) > 0 :
for each in ls :
print " -- checking object of type " + str(each.__class__) + " generic get"
remote = session.get(each)
# .. this is not pretty i know, but else we ll get random errors (not sure what triggers the timestamp to be updated)
if str(each.__class__.__name__) == 'User':
each.updated_at = each.updated_at[:-4]
remote.updated_at = remote.updated_at[:-4]
if remote.__dict__ == each.__dict__:
remote.pretty_print()
print " PASS"
else:
print " FAIL"
print " local object instance :"
each.pretty_print()
print " remote object instance :"
remote.pretty_print()
raise AssertionError
else:
print "empty list back, doesn't look right"
raise AssertionError
print "That's all folks"
|
census_data_downloader/core/__init__.py
|
JoeGermuska/census-data-downloader
| 170 |
106487
|
<reponame>JoeGermuska/census-data-downloader<filename>census_data_downloader/core/__init__.py
import collections
# https://www.census.gov/data/developers/data-sets/acs-1year/notes-on-acs-estimate-and-annotation-values.html
ESTIMATE_MAP = collections.OrderedDict({
"-999999999": "too few samples",
"-888888888": "not applicable",
"-666666666": "estimate can't be calculated",
"*": "significantly different from most current year",
"C": "controlled so don't use tests",
# "+": "",
# "-": "falls in the lowest interval"
})
MOE_MAP = collections.OrderedDict({
"N": "too few samples",
"(X)": "not applicable",
"-": "too few samples or ratio of medians cannot be calculated",
"*****": "estimate is controlled",
"***": "falls in lowest interval or highest interval",
"**": "too few samples to calculate standard error",
"-555555555": "estimate is controlled",
"-333333333": "falls in lowest interval or highest interval",
"-222222222": "too few samples to calculate standard error",
"N/A": "significantly different from most current year"
})
|
orchestra/todos/auth.py
|
code-review-doctor/orchestra
| 444 |
106525
|
<reponame>code-review-doctor/orchestra
from rest_framework import permissions
from orchestra.models import Worker
from orchestra.models import Todo
from orchestra.models import TodoQA
class IsAssociatedWithTodosProject(permissions.BasePermission):
"""
Ensures that a user's worker is accoiated with the todo's project.
"""
def has_object_permission(self, request, view, obj):
worker = Worker.objects.get(user=request.user)
if isinstance(obj, Todo):
project = obj.project
elif isinstance(obj, TodoQA):
project = obj.todo.project
else:
project = None
return (
project and
(worker.is_project_admin() or
worker.assignments.filter(task__project=project).exists()))
class IsAssociatedWithProject(permissions.BasePermission):
"""
Ensures that a user's worker is associated with the request's
`project`.
"""
def has_permission(self, request, view):
"""
We pass project_id as a payload in cases when the request
is either POST, PUT or PATCH. It can be passed via query param
not only in a GET request, but also in the requests listed above
(when applying a filter).
"""
worker = Worker.objects.get(user=request.user)
if worker.is_project_admin():
return True
todo_id = request.data.get('todo')
if todo_id is None:
todo_id = view.kwargs.get('pk')
project_id = request.data.get(
'project') or request.data.get('project__id')
if project_id is None:
project_id = request.query_params.get(
'project') or request.query_params.get('project__id')
if project_id is None and todo_id is not None:
project_id = Todo.objects.get(id=todo_id).project.id
return worker.assignments.filter(task__project__id=project_id).exists()
class IsAssociatedWithTask(permissions.BasePermission):
"""
Ensures that a user's worker is associated with the request's
`task`.
"""
def has_permission(self, request, view):
worker = Worker.objects.get(user=request.user)
if worker.is_project_admin():
return True
if request.method == 'GET':
task_id = request.query_params.get('task')
return worker.assignments.filter(task=task_id).exists()
return False
|
mead/hash_config.py
|
sagnik/baseline
| 241 |
106613
|
import argparse
from baseline.utils import read_config_stream
from mead.utils import hash_config, convert_path
def main():
parser = argparse.ArgumentParser(description="Get the mead hash of a config.")
parser.add_argument('config', help='JSON/YML Configuration for an experiment: local file or remote URL', type=convert_path, default="$MEAD_CONFIG")
args = parser.parse_args()
config = read_config_stream(args.config)
print(hash_config(config))
if __name__ == "__main__":
main()
|
tests/test_00_exports.py
|
sharuzzaman/PGPy
| 248 |
106630
|
""" check the export list to ensure only the public API is exported by pgpy.__init__
"""
import pytest
import importlib
import inspect
modules = ['pgpy.constants',
'pgpy.decorators',
'pgpy.errors',
'pgpy.pgp',
'pgpy.symenc',
'pgpy.types',
'pgpy.packet.fields',
'pgpy.packet.packets',
'pgpy.packet.types',
'pgpy.packet.subpackets.signature',
'pgpy.packet.subpackets.types',
'pgpy.packet.subpackets.userattribute']
def get_module_objs(module):
# return a set of strings that represent the names of objects defined in that module
return { n for n, o in inspect.getmembers(module, lambda m: inspect.getmodule(m) is module) } | ({'FlagEnum',} if module is importlib.import_module('pgpy.types') else set()) # dirty workaround until six fixes metaclass stuff to support EnumMeta in Python >= 3.6
def get_module_all(module):
return set(getattr(module, '__all__', set()))
def test_pgpy_all():
import pgpy
# just check that everything in pgpy.__all__ is actually there
assert set(pgpy.__all__) <= { n for n, _ in inspect.getmembers(pgpy) }
@pytest.mark.parametrize('modname', modules)
def test_exports(modname):
module = importlib.import_module(modname)
assert get_module_all(module) == get_module_objs(module)
|
examples/demo_cifar.py
|
rohanraja/cgt_distributed
| 698 |
106636
|
from example_utils import fmt_row, fetch_dataset
import cPickle, numpy as np
import cgt
from cgt import nn
import argparse, time
def rmsprop_updates(cost, params, stepsize=0.001, rho=0.9, epsilon=1e-6):
grads = cgt.grad(cost, params)
updates = []
for p, g in zip(params, grads):
acc = cgt.shared(p.op.get_value() * 0.)
acc_new = rho * acc + (1 - rho) * cgt.square(g)
gradient_scaling = cgt.sqrt(acc_new + epsilon)
g = g / gradient_scaling
updates.append((acc, acc_new))
updates.append((p, p - stepsize * g))
return updates
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--profile",action="store_true")
parser.add_argument("--unittest",action="store_true")
parser.add_argument("--epochs",type=int,default=10)
parser.add_argument("--devtype",choices=["cpu","gpu"],default="cpu")
args = parser.parse_args()
cgt.update_config(default_device=cgt.core.Device(devtype=args.devtype), backend="native")
batchsize = 64
Xshape = (batchsize, 3, 32, 32)
X = cgt.tensor4("X", fixed_shape = Xshape)
y = cgt.vector("y", fixed_shape = (batchsize,), dtype='i4')
conv1 = nn.SpatialConvolution(3, 32, kernelshape=(5,5), pad=(2,2),
weight_init=nn.IIDGaussian(std=1e-4))(X)
relu1 = nn.rectify(conv1)
pool1 = nn.max_pool_2d(relu1, kernelshape=(3,3), stride=(2,2))
conv2 = nn.SpatialConvolution(32, 32, kernelshape=(5,5), pad=(2,2),
weight_init=nn.IIDGaussian(std=0.01))(pool1)
relu2 = nn.rectify(conv2)
pool2 = nn.max_pool_2d(relu2, kernelshape=(3,3), stride=(2,2))
conv3 = nn.SpatialConvolution(32, 64, kernelshape=(5,5), pad=(2,2),
weight_init=nn.IIDGaussian(std=0.01))(pool2)
pool3 = nn.max_pool_2d(conv3, kernelshape=(3,3), stride=(2,2))
relu3 = nn.rectify(pool3)
d0,d1,d2,d3 = relu3.shape
flatlayer = relu3.reshape([d0,d1*d2*d3])
nfeats = cgt.infer_shape(flatlayer)[1]
ip1 = nn.Affine(nfeats, 10)(flatlayer)
logprobs = nn.logsoftmax(ip1)
loss = -logprobs[cgt.arange(batchsize), y].mean()
params = nn.get_parameters(loss)
updates = rmsprop_updates(loss, params, stepsize=1e-3)
train = cgt.function(inputs=[X, y], outputs=[loss], updates=updates)
if args.profile: cgt.profiler.start()
data = fetch_dataset("http://rll.berkeley.edu/cgt-data/cifar10.npz")
Xtrain = data["X_train"]
ytrain = data["y_train"]
print fmt_row(10, ["Epoch","Train NLL","Train Err","Test NLL","Test Err","Epoch Time"])
for i_epoch in xrange(args.epochs):
for start in xrange(0, Xtrain.shape[0], batchsize):
tstart = time.time()
end = start+batchsize
print train(Xtrain[start:end], ytrain[start:end]), time.time()-tstart
if start > batchsize*5: break
# elapsed = time.time() - tstart
# trainerr, trainloss = computeloss(Xtrain[:len(Xtest)], ytrain[:len(Xtest)])
# testerr, testloss = computeloss(Xtest, ytest)
# print fmt_row(10, [i_epoch, trainloss, trainerr, testloss, testerr, elapsed])
if args.profile:
cgt.profiler.print_stats()
return
if args.unittest:
break
if __name__ == "__main__":
main()
|
terraform/stacks/iam/lambdas/python/cloud_sniper_iam/cloud_sniper_iam.py
|
houey/cloud-sniper
| 160 |
106641
|
<reponame>houey/cloud-sniper<gh_stars>100-1000
import boto3
import datetime
import os
import logging
import json
import requests
ROLE_SPOKE = os.environ['ROLE_SPOKE_CLOUD_SNIPER']
WEBHOOK_URL = os.environ['WEBHOOK_URL_CLOUD_SNIPER']
HUB_ACCOUNT_ID = os.environ['HUB_ACCOUNT_CLOUD_SNIPER']
HUB_ACCOUNT_NAME = os.environ['HUB_ACCOUNT_NAME_CLOUD_SNIPER']
BUCKET_NAME = os.environ['BUCKET_NAME']
IAM_PATH = os.environ['IAM_PATH']
log = logging.getLogger()
log.setLevel(logging.INFO)
# your accounts mapping
account_ids = {
"name": "id",
}
def assume_role(account_id, boto_type):
log.info("Assuming role: " + str(ROLE_SPOKE) + " account: " + str(account_id))
try:
sts = sts_connection.assume_role(
RoleArn="arn:aws:iam::" + account_id + ":role/" + ROLE_SPOKE,
RoleSessionName="cross_acct_lambda"
)
ACCESS_KEY = sts['Credentials']['AccessKeyId']
SECRET_KEY = sts['Credentials']['SecretAccessKey']
SESSION_TOKEN = sts['Credentials']['SessionToken']
if boto_type == "iam":
client = boto3.client(
'iam',
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
aws_session_token=SESSION_TOKEN,
)
elif boto_type == "iam-resource":
client = boto3.resource(
'iam',
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
aws_session_token=SESSION_TOKEN,
)
return client
except Exception as e:
log.info("Role could not be assumed " + str(e))
def iam_user_password_last_used():
log.info("[IAM] Console password last use ...")
if account_name != HUB_ACCOUNT_NAME:
iam_resource_users_all = assume_iam_resource.users.all()
else:
iam_resource_users_all = iam_resource.users.all()
try:
for iam_user in iam_resource_users_all:
user_name = str(iam_user.LoginProfile()).split('\'')
if iam_user.password_last_used:
log.info("User has console access: " + str(iam_user))
date_last_used = iam_user.password_last_used
days_unused = (datetime.datetime.now() - date_last_used.replace(tzinfo=None)).days
if days_unused >= 1:
last_login_console_delete.append(str(user_name[1]))
break
else:
log.info('User has only programmatic access')
last_login_console_delete.append(str(user_name[1]))
except Exception as e:
log.info("No user resources in the collection" + str(e))
def iam_list_access_keys():
log.info("[IAM] Static key last use ...")
last_date = 'LastUsedDate'
try:
if account_name != HUB_ACCOUNT_NAME:
iam_resource_users_all = assume_iam_resource.users.all()
else:
iam_resource_users_all = iam_resource.users.all()
for user in iam_resource_users_all:
if account_name != HUB_ACCOUNT_NAME:
metadata = assume_iam.list_access_keys(UserName=user.user_name)
else:
metadata = iam.list_access_keys(UserName=user.user_name)
if metadata['AccessKeyMetadata']:
for key in user.access_keys.all():
AccessId = key.access_key_id
Status = key.status
if account_name != HUB_ACCOUNT_NAME:
last_used = assume_iam.get_access_key_last_used(AccessKeyId=AccessId)
else:
last_used = iam.get_access_key_last_used(AccessKeyId=AccessId)
if Status == "Active":
if last_date in last_used['AccessKeyLastUsed']:
date_last_used = last_used['AccessKeyLastUsed'][last_date]
days_unused = (datetime.datetime.now() - date_last_used.replace(tzinfo=None)).days
if days_unused >= 90:
access_keys_last_delete.append(user.user_name)
else:
access_keys_last_keep.append(user.user_name)
else:
# Key is Active but never used
access_keys_last_delete.append(user.user_name)
else:
# Keys is inactive
access_keys_last_delete.append(user.user_name)
else:
# No keys for this user
access_keys_last_delete.append(user.user_name)
except Exception as e:
log.info("No user resources in the collection", str(e))
def iam_users_to_nuke():
log.info("IAM users to nuke ...")
global account_name
remove_access_keys = [item for item in access_keys_last_delete if item not in access_keys_last_keep]
for console in last_login_console_delete:
for static in remove_access_keys:
if console == static:
iam_users_to_clean.append(console)
def message_to_slack():
users = ""
for u in set(iam_users_to_clean):
users += u + " "
try:
log.info("Sending message to Slack ...")
if users != "":
data = {
'text': '***************************************************************\n'
+ '* [' + account_name + '] IAM users have passwords and active access keys that have not been used within 90 days:* \n\n'
+ '*IAM USERS:* ' + '`' + users + '`' + '\n'
+ '***************************************************************',
'username': 'CLOUD SNIPER BUDDY',
'icon_emoji': ':robot_face:'
}
response = requests.post(WEBHOOK_URL, data=json.dumps(data), headers={'Content-Type': 'application/json'})
log.info('Sending message to Slack. Response: ' + str(response.text) + ' Response Code: ' + str(response.status_code))
else:
log.info(str(account_name) + ": No IAM user has passwords and active access keys that have not been used within 90 days")
except Exception as e:
log.info("Message could not be send to Slack: " + str(e))
def put_to_s3():
log.info("Sending findings to S3 ...")
dataset = {
'cloud.account.name': str(account_name),
'cloud.account.users': str(iam_users_to_clean),
'cloud.provider': 'aws'
}
NOW = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
s3_resource = boto3.resource('s3')
bucket_name = BUCKET_NAME
iam_path = IAM_PATH
bucket = s3_resource.Bucket(name=bucket_name)
if iam_path.startswith("/"):
iam_path = iam_path[1:]
if iam_path.endswith("/"):
iam_path = iam_path[:-1]
try:
(bucket.Object(key=f"{iam_path}/iam_{NOW}.json")
.put(Body=bytes(json.dumps(dataset).encode('UTF-8'))))
except Exception as e:
log.info("Could not put the object to S3" + str(e))
def cloud_sniper_iam(event, context):
global assume_iam_resource
global assume_iam
global account_id
global account_name
global iam_users_to_clean
global iam
global iam_resource
global access_keys_last_delete
global access_keys_last_keep
global last_login_console_delete
global sts_connection
s = boto3.session.Session(region_name=os.environ['AWS_REGION'])
iam_resource = s.resource('iam')
iam = s.client('iam')
sts_connection = boto3.client('sts')
assume_iam_resource = ""
assume_iam = ""
account_id = ""
account_name = ""
last_login_console_delete = []
access_keys_last_delete = []
access_keys_last_keep = []
iam_users_to_clean = []
for k, v in account_ids.items():
account_name = k
account_id = v
del iam_users_to_clean[:]
del last_login_console_delete[:]
del access_keys_last_delete[:]
del access_keys_last_keep[:]
if account_name != HUB_ACCOUNT_NAME:
assume_iam_resource = assume_role(account_id, 'iam-resource')
assume_iam = assume_role(account_id, 'iam')
try:
iam_user_password_last_used()
iam_list_access_keys()
iam_users_to_nuke()
message_to_slack()
put_to_s3()
except Exception as e:
log.error('IAM report failed ' + str(e))
|
packages/core/minos-microservice-common/minos/common/testing/__init__.py
|
minos-framework/minos-python
| 247 |
106664
|
<reponame>minos-framework/minos-python<filename>packages/core/minos-microservice-common/minos/common/testing/__init__.py
from .database import (
MockedDatabaseClient,
MockedDatabaseOperation,
MockedLockDatabaseOperationFactory,
MockedManagementDatabaseOperationFactory,
)
from .testcases import (
DatabaseMinosTestCase,
MinosTestCase,
)
|
bibliopixel/commands/run.py
|
rec/leds
| 253 |
106667
|
"""
Run specified project from file or URL
"""
from .. main import project_flags
from .. project import project_runner
from .. util import signal_handler
def run(args):
for i in signal_handler.run(args.pid_filename, project_runner.stop):
project_runner.run(args)
def add_arguments(parser):
parser.set_defaults(run=run)
project_flags.add_arguments(parser)
parser.add_argument(
'name', nargs='*',
help='Path project files - can be a URL or file system location')
|
src/compas/base.py
|
XingxinHE/compas
| 235 |
106673
|
"""
********************************************************************************
base
********************************************************************************
.. deprecated:: 1.5
Use `compas.data` instead
.. currentmodule:: compas.base
Classes
=======
.. autosummary::
:toctree: generated/
:nosignatures:
Base
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import warnings
from compas.data import Data
Base = Data
__all__ = [
'Base',
]
warnings.warn(
"The base module is deprecated. Use the data module instead",
DeprecationWarning,
stacklevel=2
)
|
pypattyrn/behavioral/chain.py
|
defianceblack/PyPattyrn
| 1,499 |
106692
|
<reponame>defianceblack/PyPattyrn
from abc import ABCMeta, abstractmethod
class ChainLink(object, metaclass=ABCMeta):
"""
Abstract ChainLink object as part of the Chain of Responsibility pattern.
- External Usage documentation: U{https://github.com/tylerlaberge/PyPattyrn#chain-of-responsibility-pattern}
- External Chain of Responsibility Pattern documentation: U{https://en.wikipedia.org/wiki/Chain-of-responsibility_pattern}
"""
def __init__(self):
"""
Initialize a new ChainLink instance.
"""
self.successor = None
def set_successor(self, successor):
"""
Set a chain link to call if this chain link fails.
@param successor: The chain link to call if this chain link fails.
@type successor: ChainLink
"""
self.successor = successor
def successor_handle(self, request):
"""
Have this chain links successor handle a request.
@param request: The request to handle.
"""
return self.successor.handle(request)
@abstractmethod
def handle(self, request):
"""
Handle a request.
@param request: The request to handle.
"""
pass
class Chain(object, metaclass=ABCMeta):
"""
Abstract Chain class as part of the Chain of Responsibility pattern.
- External Usage documentation: U{https://github.com/tylerlaberge/PyPattyrn#chain-of-responsibility-pattern}
- External Chain of Responsibility Pattern documentation: U{https://en.wikipedia.org/wiki/Chain-of-responsibility_pattern}
"""
def __init__(self, chainlink):
"""
Initialize a new Chain instance.
@param chainlink: The starting chain link.
"""
self.chainlink = chainlink
def handle(self, request):
"""
Handle a request.
@param request: The request to handle.
"""
try:
return self.chainlink.handle(request)
except AttributeError:
return self.fail()
@abstractmethod
def fail(self):
"""
The method to call when the chain could not handle a request.
"""
pass
|
atlas/foundations_sdk/src/foundations/job_parameters/__init__.py
|
DeepLearnI/atlas
| 296 |
106716
|
<reponame>DeepLearnI/atlas
def load_parameters(log_parameters=True):
try:
parameters = _parsed_json(_raw_json_from_parameters_file())
if log_parameters:
log_params(parameters)
return parameters
except FileNotFoundError:
return {}
def flatten_parameter_dictionary(param_dictionary):
flattened_output = {}
for key, value in param_dictionary.items():
if _is_scalar_value(value):
flattened_output[key] = value
elif isinstance(value, dict):
flattened_output.update(_flatten_dict_value(key, value))
else:
flattened_output.update(_flatten_list_value(key, value))
return flattened_output
def log_param(key, value):
from foundations.utils import log_warning_if_not_running_in_job
log_warning_if_not_running_in_job(_log_param_in_running_job, key, value)
def _log_param_in_running_job(key, value):
from foundations_contrib.global_state import current_foundations_job, redis_connection
project_name = current_foundations_job().project_name
job_id = current_foundations_job().job_id
_insert_parameter_name_into_projects_params_set(redis_connection, project_name, key)
_insert_input_parameter_name_into_projects_input_params_set(redis_connection, project_name, key)
_insert_parameter_value_into_job_run_data(redis_connection, job_id, key, value)
_insert_input_parameter_name_into_job_input_parameter_data(redis_connection, job_id, key)
def log_params(parameters):
for key, value in flatten_parameter_dictionary(parameters).items():
log_param(key, value)
def _insert_parameter_name_into_projects_params_set(redis_connection, project_name, key):
_insert_parameter_name_into_specified_projects_params_set('job_parameter_names', redis_connection, project_name, key)
def _insert_parameter_value_into_job_run_data(redis_connection, job_id, key, value):
import json
job_params_key = f'jobs:{job_id}:parameters'
serialized_job_params = redis_connection.get(job_params_key)
job_params = _deserialized_job_params(json.loads, serialized_job_params)
job_params[key] = value
redis_connection.set(job_params_key, json.dumps(job_params))
def _insert_input_parameter_name_into_projects_input_params_set(redis_connection, project_name, key):
_insert_parameter_name_into_specified_projects_params_set('input_parameter_names', redis_connection, project_name, key)
def _insert_parameter_name_into_specified_projects_params_set(set_name, redis_connection, project_name, key):
redis_connection.sadd(f'projects:{project_name}:{set_name}', key)
def _insert_input_parameter_name_into_job_input_parameter_data(redis_connection, job_id, key):
from foundations_internal.foundations_serializer import dumps, loads
job_params_key = f'jobs:{job_id}:input_parameters'
serialized_job_params = redis_connection.get(job_params_key)
job_params = _deserialized_job_params(loads, serialized_job_params, default_type=list)
job_params.append({'argument': {'name': key, 'value': {'type': 'dynamic', 'name': key}}, 'stage_uuid': 'stageless'})
redis_connection.set(job_params_key, dumps(job_params))
def _deserialized_job_params(deserialize_callback, serialized_job_params, default_type=dict):
if serialized_job_params is None:
return default_type()
else:
return deserialize_callback(serialized_job_params)
def _is_scalar_value(value):
return isinstance(value, str) or isinstance(value, int) or isinstance(value, float) or value is None
def _flatten_dict_value(param_key, param_value):
if not param_value:
return {param_key: None}
return flatten_parameter_dictionary({'{}_{}'.format(param_key, nested_key): nested_value for nested_key, nested_value in param_value.items()})
def _flatten_list_value(param_key, param_value):
if not param_value:
return {param_key: None}
list_of_keys = _list_of_keys(param_key, len(param_value))
return flatten_parameter_dictionary({key: value for key, value in zip(list_of_keys, param_value)})
def _list_of_keys(key, length_of_list_value):
return ['{}_{}'.format(key, list_index) for list_index in range(length_of_list_value)]
def _raw_json_from_parameters_file():
with open('foundations_job_parameters.json', 'r') as parameters_file:
return parameters_file.read()
def _parsed_json(file_contents):
import json
if file_contents == '':
return {}
else:
return json.loads(file_contents)
|
lib/discord/constants/general.py
|
goztrk/django-htk
| 206 |
106735
|
<reponame>goztrk/django-htk<filename>lib/discord/constants/general.py
DISCORD_WEBHOOK_URL = 'https://discord.com/api/webhooks/{webhook_id}/{webhook_token}'
DISCORD_WEBHOOK_RELAY_PARAMS = [
'webhook_id',
'webhook_token',
'content',
]
|
third_party/com_fasterxml_jackson_module.bzl
|
wix/exodus
| 186 |
106762
|
load("//:import_external.bzl", import_external = "safe_wix_scala_maven_import_external")
def dependencies():
import_external(
name = "com_fasterxml_jackson_module_jackson_module_paranamer",
artifact = "com.fasterxml.jackson.module:jackson-module-paranamer:2.9.6",
artifact_sha256 = "dfd66598c0094d9a7ef0b6e6bb3140031fc833f6cf2e415da27bc9357cdfe63b",
srcjar_sha256 = "375052d977a4647b49a8512a2e269f3296c455544f080a94bc8855dbfd24ad75",
deps = [
"@com_fasterxml_jackson_core_jackson_databind",
"@com_thoughtworks_paranamer_paranamer"
],
)
import_external(
name = "com_fasterxml_jackson_module_jackson_module_scala_2_12",
artifact = "com.fasterxml.jackson.module:jackson-module-scala_2.12:2.9.6",
artifact_sha256 = "c775854c1da6fc4602d5850b65513d18cb9d955b3c0f64551dd58ccb24a85aba",
srcjar_sha256 = "5446419113a48ceb4fa802cd785edfc06531ab32763d2a2f7906293d1e445957",
deps = [
"@com_fasterxml_jackson_core_jackson_annotations",
"@com_fasterxml_jackson_core_jackson_core",
"@com_fasterxml_jackson_core_jackson_databind",
"@com_fasterxml_jackson_module_jackson_module_paranamer",
"@org_scala_lang_scala_library",
"@org_scala_lang_scala_reflect"
],
)
|
Packs/CounterTack/Integrations/CounterTack/CounterTack.py
|
diCagri/content
| 799 |
106766
|
<reponame>diCagri/content
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import json
import requests
import os
import os.path
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
# remove proxy if not set to true in params
if not demisto.params().get('proxy'):
del os.environ['HTTP_PROXY']
del os.environ['HTTPS_PROXY']
del os.environ['http_proxy']
del os.environ['https_proxy']
''' GLOBALS/PARAMS '''
USERNAME = demisto.params().get('credentials').get('identifier')
PASSWORD = demisto.params().get('credentials').get('password')
SERVER_URL = demisto.params().get('server')[:-1] if demisto.params().get('server').endswith('/') else \
demisto.params().get('server')
FETCH_TIME = demisto.params().get('fetch_time', '3 days').strip()
FETCH_NOTIFICATIONS = demisto.params().get('fetch_notifications')
FETCH_BEHAVIORS = demisto.params().get('fetch_behviors')
# Should we use SSL
USE_SSL = not demisto.params().get('unsecure', False)
# Service base URL
BASE_PATH = '{}/api/v2/'.format(SERVER_URL)
# Headers to be sent in requests
DEFAULT_HEADERS = {
'Content-Type': 'application/json'
}
def http_request(method, suffix_url, headers=DEFAULT_HEADERS, body=None):
"""
returns the http request
"""
url = BASE_PATH + suffix_url
response = requests.request(
method,
url,
auth=(USERNAME, PASSWORD),
headers=headers,
verify=USE_SSL,
data=body
)
# handle request failure
if response.status_code not in {200}:
message = parse_error_response(response)
return_error('Error in API call to CounterTack with status code {}\n{}'.format(response.status_code, message))
try:
response = response.json()
except Exception:
return_error(response.content)
return response
def parse_error_response(response):
try:
res = response.json()
msg = res.get('message')
if res.get('details') is not None and res.get('details')[0].get('message') is not None:
msg = msg + "\n" + json.dumps(res.get('details')[0])
except Exception:
return response.text
return msg
"""
ENDPOINTS
"""
def get_endpoints_request():
"""
This request returns a collection of endpoints.
"""
suffix_url = 'endpoints'
response = http_request('GET', suffix_url)
return response
def get_endpoints():
"""
Returns the information on existing endpoints
"""
data = []
endpoint_standards = []
endpoints = get_endpoints_request()
for endpoint in endpoints:
data.append({
'Id': endpoint.get('id'),
'Name': endpoint.get('name'),
'OS': endpoint.get('product_name'),
'IP': endpoint.get('ips'),
'Status': endpoint.get('status'),
'Threat': endpoint.get('threat')
})
endpoint_standards.append({
'Id': endpoint.get('id'),
'IPAddress': endpoint.get('ips'),
'Domain': endpoint.get('domain'),
'MACAddress': endpoint.get('mac'),
'OS': endpoint.get('product_name'),
'OSVersion': endpoint.get('driver_version'),
'Model': endpoint.get('current_profile'),
'Memory': endpoint.get('memory'),
'Processors': endpoint.get('num_cpus')
})
context = {
'CounterTack.Endpoint(val.Id && val.Id === obj.Id)': createContext(endpoints,
keyTransform=underscoreToCamelCase),
'Endpoint': endpoint_standards
}
headers = ['OS', 'Name', 'Threat', 'Status', 'Id', 'IP']
entry = {
'Type': entryTypes['note'],
'Contents': endpoints,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'CounterTack Endpoints', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
def get_endpoint_request(endpoint_id):
"""
Request for a specific endpoint
"""
suffix_url = 'endpoints/' + endpoint_id
response = http_request('GET', suffix_url)
return response
def get_endpoint():
"""
Get the information for the requested endpoint
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
returns:
The information about the specified endpoint
"""
endpoint_id = demisto.args().get('endpoint_id')
response = get_endpoint_request(endpoint_id)
content = {
'OS': response.get('product_name'),
'Domain': response.get('domain'),
'IP': response.get('ip'),
'Threat': response.get('threat'),
'MaxImpact': response.get('max_impact'),
'TenantID': response.get('tenant'),
'IsQuarantined': response.get('is_quarantined'),
'Profile': response.get('current_profile'),
'Cluster_hosts': response.get('cluster_hosts'),
'Status': response.get('status'),
'Tags': response.get('tags')
}
endpoint_standards = {
'Id': response.get('id'),
'IPAddress': response.get('ips'),
'Domain': response.get('domain'),
'MACAddress': response.get('mac'),
'OS': response.get('product_name'),
'OSVersion': response.get('driver_version'),
'Model': response.get('current_profile'),
'Memory': response.get('memory'),
'Processors': response.get('num_cpus')
}
context = {
'CounterTack.Endpoint(val.Id && val.Id === obj.Id)': createContext(response,
keyTransform=underscoreToCamelCase),
'Endpoint': endpoint_standards
}
headers = ['OS', 'Domain', 'IP', 'Threat', 'MaxImpact', 'TenantID', 'IsQuarantined',
'Profile', 'Tags', 'Cluster_Hosts', 'Status']
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'CounterTack Endpoint information:', content, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
"""
ENDPOINTS TAGS
"""
def endpoint_tags_request(endpoint_id):
"""
This request retrieves tags from specified endpoint
"""
suffix_url = 'endpoints/' + endpoint_id + '/tags'
response = http_request('GET', suffix_url)
return response
def get_endpoint_tags():
"""
Get the tags for the specified endpoint
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
"""
endpoint_id = demisto.args().get('endpoint_id')
response = endpoint_tags_request(endpoint_id)
response = {
'tags': response
}
tags_context = {
'Id': endpoint_id,
'tags': response
}
context = {
'CounterTack.Endpoint(val.Id && val.Id === obj.Id)': createContext(tags_context,
keyTransform=underscoreToCamelCase)
}
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('CounterTack tags for the specified endpoint:', response, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
def add_tags_request(endpoint_id, body):
"""
The request adds tags to specified endpoint
The request gets the endpoint ID and the tags the user wants to add.
"""
suffix_url = 'endpoints/' + endpoint_id + '/tags'
response = http_request('POST', suffix_url, body=json.dumps(body))
return response
def add_tags():
"""
The command add tags for the specified endpoint.
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
demisto parameter: (array) body
The tags to add to the endpoint
"""
endpoint_id = demisto.args().get('endpoint_id')
body = argToList(demisto.args().get('tags'))
response = add_tags_request(endpoint_id, body)
response = endpoint_tags_request(endpoint_id)
response = {
'tags': response,
'Id': endpoint_id
}
context = {
'CounterTack.Endpoint(val.Id && val.Id === obj.Id)': createContext(response, keyTransform=underscoreToCamelCase)
}
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown("Endpoint tags were added successfully", response),
'EntryContext': context
}
demisto.results(entry)
def delete_tags_request(endpoint_id, body):
"""
This request deletes specific tags from specified endpoint.
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
demisto parameter: (array) body
The tags to delete from the endpoint
"""
suffix_url = 'endpoints/' + endpoint_id + '/tags'
response = http_request('DELETE', suffix_url, body=json.dumps(body))
return response
def delete_tags():
"""
The command deletes tags for the specified endpoint.
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
demisto parameter: (array) body
The tags to delete from the endpoint
"""
endpoint_id = demisto.args().get('endpoint_id')
body = argToList(demisto.args().get('tags'))
response = delete_tags_request(endpoint_id, body)
response = endpoint_tags_request(endpoint_id)
response = {
'tags': response,
'Id': endpoint_id
}
context = {
'CounterTack.Endpoint(val.Id && val.Id === obj.Id)': createContext(response, keyTransform=underscoreToCamelCase)
}
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'Endpoint tags were deleted successfully', response),
'EntryContext': context
}
demisto.results(entry)
"""
ENDPOINTS COMMANDS
"""
def endpoint_quarantine_request(endpoint_id, body):
"""
Request to quarantine a specified endpoint
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
demisto parameter: (string) type
The type of the command: quarantine
"""
suffix_url = 'endpoints/' + endpoint_id + '/commands'
response = http_request('POST', suffix_url, body=json.dumps(body))
return response
def endpoint_quarantine():
"""
Prevents an endpoint(s) from any network communication, but maintains a connection to the Sentinel Cluster
and addresses defined in the Global Whitelist.
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
demisto parameter: (string) type
The type of the command: quarantine
"""
endpoint_id = demisto.args().get('endpoint_id')
body = {
'type': 'quarantine'
}
response = endpoint_quarantine_request(endpoint_id, body)
quarantine_response = get_endpoint_request(endpoint_id)
quarantine_context = {
'Id': endpoint_id,
'is_quarantine': quarantine_response.get('is_quarantined')
}
context = {
'CounterTack.Endpoint(val.Id && val.Id === obj.Id)': createContext(quarantine_context,
keyTransform=underscoreToCamelCase)
}
data = {
'Id': response.get('id'),
'user name': response.get('username'),
'request time': response.get('request_time'),
'endpoint ID': response.get('endpoint_ids'),
'command name': response.get('command_name'),
'status': response.get('status'),
}
entry = {
'Type': entryTypes['note'],
'Contents': quarantine_context,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('The command has been applied successfully:', data, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
def disable_quarantine():
"""
Allows a previously quarantined endpoint to communicate with the network.
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
demisto parameter: (string) type
The type of the command: lift_quarantine
"""
endpoint_id = demisto.args().get('endpoint_id')
body = {
'type': 'lift_quarantine'
}
response = endpoint_quarantine_request(endpoint_id, body)
quarantine_response = get_endpoint_request(endpoint_id)
quarantine_context = {
'Id': endpoint_id,
'is_quarantine': quarantine_response.get('is_quarantined')
}
data = {
'Id': response.get('id'),
'user name': response.get('username'),
'request time': response.get('request_time'),
'endpoint ID': response.get('endpoint_ids'),
'command name': response.get('command_name'),
'status': response.get('status'),
}
context = {
'CounterTack.Endpoint(val.Id && val.Id === obj.Id)': createContext(quarantine_context,
keyTransform=underscoreToCamelCase)
}
entry = {
'Type': entryTypes['note'],
'Contents': quarantine_context,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('The command has been applied successfully:', data, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
def file_extract_request(endpoint_id, body):
"""
Request for extracting file from specified endpoint
"""
suffix_url = 'endpoints/' + endpoint_id + '/commands'
response = http_request('POST', suffix_url, body=json.dumps(body))
return response
def extract_file():
"""
Enables an API consumer to extract the file in addition to some file metadata.
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
demisto parameter: (string) body
The type of the command: extract file and the file path
"""
endpoint_id = demisto.args().get('endpoint_id')
paths = argToList(demisto.args().get('file_path'))
body = {
'type': 'extract_files',
'paths': paths
}
response = file_extract_request(endpoint_id, body)
data = {
'Id': response.get('id'),
'User Name': response.get('username'),
'Request Time': response.get('request_time'),
'Endpoint ID': response.get('endpoint_ids'),
'Command Name': response.get('command_name'),
'Command Arguments': response.get('command_arg'),
'Status': response.get('status'),
}
context = {
'CounterTack.File(val.Id && val.Id === obj.Id)': createContext(response, keyTransform=underscoreToCamelCase)
}
headers = ['Id', 'User Name', 'Request Time', 'Endpoint ID', 'Command Name', 'Command Arguments', 'Status']
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'The file has been extracted successfully:', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
def delete_file_request(endpoint_id, body):
"""
Deletes a file from the specified endpoint
"""
suffix_url = 'endpoints/' + endpoint_id + '/commands'
response = http_request('POST', suffix_url, body=json.dumps(body))
return response
def delete_file():
"""
Deletes a file from the specified endpoint
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
demisto parameter: (string) body
The type of the command: delete_file and the file path
"""
endpoint_id = demisto.args().get('endpoint_id')
path = demisto.args().get('file_path')
body = {
'type': 'delete_file',
'path': path
}
delete_file_request(endpoint_id, body)
demisto.results('The file has been deleted successfully')
def kill_process_request(endpoint_id, body):
"""
Reqquest to terminates all instances of the process identified in the command.
"""
suffix_url = 'endpoints/' + endpoint_id + '/commands'
response = http_request('POST', suffix_url, body=json.dumps(body))
return response
def kill_process():
"""
Terminates all instances of the process identified in the command.
Processes can be identified by the PID or process name.
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
demisto parameter: (string) process_id
The ID of the process to terminate
demisto parameter: (string) process_name
The name of the process to terminate
"""
endpoint_id = demisto.args().get('endpoint_id')
pid = demisto.args().get('process_id')
name = demisto.args().get('process_name')
if not pid and not name:
return_error('Please provide either process_id or process_name')
body = {
'type': 'kill_process',
'pid': pid,
'name': name
}
response = kill_process_request(endpoint_id, body)
data = {
'Id': response.get('id'),
'User Name': response.get('username'),
'Request Time': response.get('request_time'),
'Endpoint ID': response.get('endpoint_ids'),
'Command Name': response.get('command_name'),
'Status': response.get('status'),
}
context = {
'CounterTack.Endpoint(val.Id && val.Id === obj.Id)': createContext(response,
keyTransform=underscoreToCamelCase,
removeNull=True)
}
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'The process has been terminated', data, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
"""
ENDPOINT FILES
"""
def file_request():
"""
This request retrieves all extracted files for all endpoints on the cluster
"""
suffix_url = 'endpoints/files'
response = http_request('GET', suffix_url)
return response
def get_all_files():
data = []
files_standards = []
files = file_request()
for file in files:
data.append({
'Id': file.get('id'),
'user': file.get('user'),
'endpoint_id': file.get('endpoint_id'),
'path': file.get('path'),
'extraction_time': file.get('extraction_time'),
'Status': file.get('status')
})
files_standards.append({
'Size': file.get('size'),
'MD5': file.get('md5'),
'SHA256': file.get('sha256'),
'SSDeep': file.get('ssdeep'),
'Path': file.get('path')
})
context = {
'CounterTack.File(val.Id && val.Id === obj.Id)': createContext(files, keyTransform=underscoreToCamelCase),
outputPaths['file']: files_standards
}
headers = ['Status', 'Id', 'path', 'endpoint_id', 'extraction_time', 'user']
entry = {
'Type': entryTypes['note'],
'Contents': files,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'CounterTack Endpoints Files', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
def endpoint_files_request(endpoint_id):
"""
This request returns all extracted files from specified endpoint
"""
suffix_url = 'endpoints/' + endpoint_id + '/files'
response = http_request('GET', suffix_url)
return response
def get_endpoint_files():
"""
Returns extracted files from specific endpoint
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
"""
endpoint_id = demisto.args().get('endpoint_id')
data = []
files_standards = []
files = endpoint_files_request(endpoint_id)
for file in files:
data.append({
'Id': file.get('id'),
'User': file.get('user'),
'EndpointId': file.get('endpoint_id'),
'Path': file.get('path'),
'ExtractionTime': file.get('extraction_time'),
'Status': file.get('status')
})
files_standards.append({
'Size': file.get('size'),
'MD5': file.get('md5'),
'SHA256': file.get('sha256'),
'SSDeep': file.get('ssdeep'),
'Path': file.get('path')
})
context = {
'CounterTack.File(val.Id && val.Id === obj.Id)': createContext(files, keyTransform=underscoreToCamelCase),
outputPaths['file']: files_standards
}
headers = ['Status', 'Id', 'path', 'endpoint_id', 'extraction_time', 'user']
entry = {
'Type': entryTypes['note'],
'Contents': data,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'The extracted files from the endpoint:', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
def file_information_request(file_id):
"""
request specific file information
"""
suffix_url = 'endpoints/files/' + file_id
response = http_request('GET', suffix_url)
return response
def get_file_information():
"""
Get the information of a specific file
demisto parameter: (string) file_id
The unique ID of the extracted file
"""
context = {}
files_standards = []
file_id = demisto.args().get('file_id')
response = file_information_request(file_id)
data = {
'endpoint_name': response.get('endpoint_name'),
'path': response.get('path'),
'size': response.get('size'),
'extraction_time': response.get('extraction_time'),
'status': response.get('status')
}
files_standards.append({
'Size': response.get('size'),
'MD5': response.get('md5'),
'SHA256': response.get('sha256'),
'SSDeep': response.get('ssdeep'),
'Path': response.get('path')
})
context['CounterTack.File(val.Id && val.Id === obj.Id)'] = createContext(response,
keyTransform=underscoreToCamelCase)
context[outputPaths['file']] = files_standards
headers = ['endpoint_name', 'path', 'size', 'status', 'extraction_time']
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('CounterTack File Information:', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
def download_file_request(file_id):
# This request downloads an extracted file.
suffix_url = 'downloads/extractedfiles/' + file_id
response = http_request('GET', suffix_url)
return response
def download_file():
"""
Download an extracted file in a ZIP format.
demisto parameter: (string) file_id
The unique ID of the extracted file
"""
file_id = demisto.args().get('file_id')
response = download_file_request(file_id)
demisto.results(fileResult(file_id + '.zip', response.content))
"""
BEHAVIORS
"""
def get_behaviors_request():
"""
This request retrieves information on a collection of behaviors.
"""
suffix_url = 'behaviors'
response = http_request('GET', suffix_url)
return response
def get_behaviors():
"""
retrieve information on a collection of behaviors.
"""
data = []
behaviors = get_behaviors_request()
for behavior in behaviors:
data.append({
'Id': behavior.get('id'),
'Name': behavior.get('name'),
'Type': behavior.get('type'),
'ImpactLevel': behavior.get('impact_level'),
'lastReported': behavior.get('last_reported'),
'EndpointId': behavior.get('endpoint_id')
})
context = {
'CounterTack.Behavior(val.Id && val.Id === obj.Id)': createContext(behaviors,
keyTransform=underscoreToCamelCase)
}
headers = ['Name', 'Id', 'Type', 'ImpactLevel', 'EndpointId', 'lastReported']
entry = {
'Type': entryTypes['note'],
'Contents': behaviors,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('CounterTack Endpoints Behaviors', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
def get_behavior_request(behavior_id):
"""
Request for getting specified behvior
"""
suffix_url = 'behaviors/' + behavior_id
response = http_request('GET', suffix_url)
return response
def get_behavior():
"""
Get behavior information
demisto parameter: behavior_id(string)
The unique ID of the behvior
"""
behavior_id = demisto.args().get('behavior_id')
response = get_behavior_request(behavior_id)
data = {
'Id': response.get('id'),
'Name': response.get('name'),
'ImpactLevel': response.get('impact_level'),
'LastActive': response.get('last_active'),
'EventCount': response.get('event_count'),
'MaxImpact': response.get('max_impact'),
'EndpointId': response.get('endpoint_id'),
'Type': response.get('type'),
}
context = {
'CounterTack.Behavior(val.Id && val.Id === obj.Id)': createContext(response, keyTransform=underscoreToCamelCase)
}
headers = ['Name', 'Id', 'ImpactLevel', 'MaxImpact', 'EventCount', 'Type', 'EndpointId', 'LastActive']
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('CounterTack Behavior information', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
"""
BEHAVIORS TAGS
"""
def behaviour_add_tags_request(behaviour_id, body):
"""
The request adds tags to specified behaviour
"""
suffix_url = 'behaviors/' + behaviour_id + '/tags'
response = http_request('POST', suffix_url, body=json.dumps(body))
return response
def add_behavior_tags():
"""
Add specific tags to specified behavior
demisto parameter: (string) behavior_id
The unique ID of the behavior
demisto parameter: (Array) Body.
The tags to add to the behavior. seperate the tags with comma
"""
behaviour_id = demisto.args().get('behaviour_id')
body = argToList(demisto.args().get('tags'))
response = behaviour_add_tags_request(behaviour_id, body)
behavior_tags = get_behavior_request(behaviour_id)
response = {
'tags': behavior_tags.get('tags'),
'Id': behaviour_id
}
context = {
'CounterTack.Behavior(val.Id && val.Id === obj.Id)': createContext(response, keyTransform=underscoreToCamelCase)
}
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Behavior tags were added successfully', response),
'EntryContext': context
}
demisto.results(entry)
def delete_tags_behavior_request(behaviour_id, body):
suffix_url = 'behaviors/' + behaviour_id + '/tags'
response = http_request('DELETE', suffix_url, body=json.dumps(body))
return response
def delete_behavior_tags():
"""
Delete specific tags from behavior
demisto parameter: (string) behavior_id
The unique ID of the behavior
demisto parameter: (Array) Body.
The tags to delete from the behavior. seperate the tags with comma
"""
behaviour_id = demisto.args().get('behaviour_id')
body = argToList(demisto.args().get('tags'))
response = delete_tags_behavior_request(behaviour_id, body)
response = get_behavior_request(behaviour_id)
response = {
'tags': response.get('tags'),
'Id': behaviour_id
}
context = {
'CounterTack.Behavior(val.Id && val.Id === obj.Id)': createContext(response, keyTransform=underscoreToCamelCase)
}
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Endpoint tags were deleted successfully', response, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
"""
SEARCH
"""
def search_endpoints_request(exp):
"""
Request for endpoints search using CQL expression
"""
suffix_url = 'search/endpoints' + exp
response = http_request('GET', suffix_url)
return response
def search_behaviors_request(exp):
"""
Request for endpoints search using CQL expression
"""
suffix_url = 'search/behaviors' + exp
response = http_request('GET', suffix_url)
return response
def search_events_request(exp):
"""
Request for events search using CQL expression
"""
suffix_url = 'search/events' + exp
response = http_request('GET', suffix_url)
return response
def search_events():
"""
Request for events search using CQL expression
demisto parameter: (dict) expression
The CQL expression to be used for the search
"""
data = []
expression = demisto.args().get('expression')
exp = '?expression=' + expression
events = search_events_request(exp)
if events.get('results'):
results = events.get('results')
results_lst = list()
for i in range(len(results)):
results_lst.append({k.replace('events.', ''): v for k, v in results[i].items()})
events['results'] = results_lst
for event in events.get('results'):
data.append({
'Id': event.get('id'),
'Events Action': event.get('action'),
'Events Impact': event.get('impact'),
'Events EndpointID': event.get('endpoint_id'),
'Event Type': event.get('event_type'),
'Collected time': event.get('time_stamp'),
'Source process PID': event.get('source_process_pid'),
'Source process name': event.get('source_process_name')
})
context = {
'CounterTack.Event(val.Id && val.Id === obj.Id)': createContext(results_lst,
keyTransform=underscoreToCamelCase,
removeNull=True)
}
headers = ['ID', 'Event Type', 'Events Action', 'Events EndpointID', 'Events Impact',
'Collected time', 'Source process PID', 'Source process name']
entry = {
'Type': entryTypes['note'],
'Contents': results_lst,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Results of the events search', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
else:
demisto.results('No results found')
def search_endpoints():
"""
Request for endpoints search using CQL expression
demisto parameter: (dict) expression
The CQL expression to be used for the search
"""
data = []
endpoint_standards = []
expression = demisto.args().get('expression')
exp = '?expression=' + expression
endpoints = search_endpoints_request(exp)
if endpoints.get('results'):
results = endpoints.get('results')
results_lst = list()
for i in range(len(results)):
results_lst.append({k.replace('endpoints.', ''): v for k, v in results[i].items()})
endpoints['results'] = results_lst
for endpoint in endpoints.get('results'):
data.append({
'Id': endpoint.get('id'),
'Name': endpoint.get('name'),
'OS': endpoint.get('product_name'),
'IP': endpoint.get('ips'),
'Status': endpoint.get('status'),
'Threat': endpoint.get('threat')
})
endpoint_standards.append({
'Id': endpoint.get('id'),
'IPAddress': endpoint.get('ips'),
'Domain': endpoint.get('domain'),
'MACAddress': endpoint.get('mac'),
'OS': endpoint.get('product_name'),
'OSVersion': endpoint.get('driver_version'),
'Model': endpoint.get('current_profile'),
'Memory': endpoint.get('memory'),
'Processors': endpoint.get('num_cpus')
})
context = {
'CounterTack.Endpoint(val.Id && val.Id === obj.Id)': createContext(results_lst,
keyTransform=underscoreToCamelCase,
removeNull=True),
'Endpoint': endpoint_standards
}
headers = ['Status', 'Name', 'Id', 'OS', 'Events Impact', 'Threat', 'IP']
entry = {
'Type': entryTypes['note'],
'Contents': results_lst,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Results of the endpoints search', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
else:
demisto.results('No results found')
def search_behaviors():
"""
Request for behaviors search using CQL expression
demisto parameter: (dict) expression
The CQL expression to be used for the search
"""
data = []
expression = demisto.args().get('expression')
exp = '?expression=' + expression
behaviors = search_behaviors_request(exp)
if behaviors.get('results'):
results = behaviors.get('results')
results_lst = list()
for i in range(len(results)):
results_lst.append({k.replace('behaviors.', ''): v for k, v in results[i].items()})
behaviors['results'] = results_lst
for behavior in behaviors.get('results'):
data.append({
'Id': behavior.get('id'),
'Name': behavior.get('name'),
'Type': behavior.get('type'),
'Impact_Level': behavior.get('impact_level'),
'lastReported': behavior.get('last_reported'),
'EndpointID': behavior.get('endpoint_id')
})
context = {
'CounterTack.Behavior(val.Id && val.Id === obj.Id)': createContext(results_lst,
keyTransform=underscoreToCamelCase,
removeNull=True)
}
headers = ['Name', 'Type', 'Impact_Level', 'Id', 'EndpointID', 'lastReported']
entry = {
'Type': entryTypes['note'],
'Contents': results_lst,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Results of the behaviors search', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
else:
demisto.results('No results found')
def hashes_search_request(exp):
"""
Request for Hashed search using CQL expression
"""
suffix_url = 'search/hashes' + exp
response = http_request('GET', suffix_url)
return response
def search_hashes():
"""
Request for hashes search using CQL expression
demisto parameter: (dict) expression
The CQL expression to be used for the search
"""
data = []
file_standards = []
expression = demisto.args().get('expression')
exp = '?expression=' + expression
hashes = hashes_search_request(exp)
if hashes.get('results'):
results = hashes.get('results')
results_lst = list()
for i in range(len(results)):
results_lst.append({k.replace('hashes.', ''): v for k, v in results[i].items()})
hashes['results'] = results_lst
for hash_type in hashes.get('results'):
file_hash_type = hash_type.get('type', '').upper()
if file_hash_type == 'SSDEEP':
file_hash_type = 'SSDeep'
hash_id = hash_type.get('id')
data.append({
file_hash_type: hash_id,
'Type': file_hash_type,
'Impact': hash_type.get('impact'),
'VT report location': hash_type.get('vt_report_location'),
'AV Coverage': hash_type.get('av_coverage')
})
if file_hash_type:
file_standards.append({
file_hash_type: hash_id
})
context = {
'CounterTack.Hash(val.hash_id && val.hash_id === obj.hash_id)': createContext(data),
outputPaths['file']: file_standards
}
entry = {
'Type': entryTypes['note'],
'Contents': results_lst,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Results of the hashes search:', data, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
else:
demisto.results('No results found')
"""
FETCH INCIDENTS
"""
def search_notifications_request(params=''):
"""
Request for notifications search using CQL expression
"""
suffix_url = 'search/notifications?expression=' + params
response = http_request('GET', suffix_url)
return response
def fetch_behaviors_request(params=''):
"""
Request for behaviors search using CQL expression
"""
suffix_url = 'search/behaviors?expression=' + params
response = http_request('GET', suffix_url)
return response
def fetch_incidents():
incidents = []
last_run = demisto.getLastRun()
if last_run and last_run['time_stamp']:
last_update_time = last_run['time_stamp']
else:
# In first run
last_update_time, _ = parse_date_range(FETCH_TIME, date_format='%Y-%m-%dT%H:%M:%S.%f'[:-3])
max_timestamp = last_update_time
if FETCH_BEHAVIORS:
params = 'behaviors.time_stamp>' + last_update_time
behaviors = fetch_behaviors_request(params)
for behavior in behaviors.get('results'):
incident = behavior_to_incident(behavior)
# 0 corresponds to never triggered
time_stamp = behavior.get('behaviors.time_stamp')[:-5] # comapre time_stamp
if time_stamp > max_timestamp:
max_timestamp = time_stamp
incidents.append(incident)
if FETCH_NOTIFICATIONS:
params = 'notifications.time_stamp>' + last_update_time
notifications = search_notifications_request(params)
for notification in notifications.get('results'):
incident = notifications_to_incidents(notification)
time_stamp = notification.get('notifications.time_stamp')[:-5]
if time_stamp > max_timestamp:
max_timestamp = time_stamp
incidents.append(incident)
demisto.setLastRun({
'time_stamp': max_timestamp
})
demisto.incidents(incidents)
def behavior_to_incident(behavior):
incident = {}
incident['name'] = 'CounterTack Behavior - ' + behavior.get('behaviors.name')
incident['rawJSON'] = json.dumps(behavior)
return incident
def notifications_to_incidents(notification):
incident = {}
incident['name'] = 'CounterTack Notification - ' + notification.get('notifications.message')
incident['rawJSON'] = json.dumps(notification)
return incident
"""
EXECUTION
"""
command = demisto.command()
LOG('Running command "{}"'.format(command))
try:
if command == 'test-module':
get_endpoints_request()
demisto.results('ok')
elif command == 'fetch-incidents':
fetch_incidents()
elif command == 'countertack-get-endpoints':
get_endpoints()
elif command == 'countertack-get-endpoint':
get_endpoint()
elif command == 'countertack-get-endpoint-tags':
get_endpoint_tags()
elif command == 'countertack-add-tags':
add_tags()
elif command == 'countertack-delete-tags':
delete_tags()
elif command == 'countertack-endpoint-quarantine':
endpoint_quarantine()
elif command == 'countertack-disable-quarantine':
disable_quarantine()
elif command == 'countertack-extract-file':
extract_file()
elif command == 'countertack-delete-file':
delete_file()
elif command == 'countertack-get-all-files':
get_all_files()
elif command == 'countertack-get-endpoint-files':
get_endpoint_files()
elif command == 'countertack-get-file-information':
get_file_information()
elif command == 'countertack-download-file':
download_file()
elif command == 'countertack-get-behaviors':
get_behaviors()
elif command == 'countertack-get-behavior':
get_behavior()
elif command == 'countertack-add-behavior-tags':
add_behavior_tags()
elif command == 'countertack-delete-behavior-tags':
delete_behavior_tags()
elif command == 'countertack-search-events':
search_events()
elif command == 'countertack-search-hashes':
search_hashes()
elif command == 'countertack-search-endpoints':
search_endpoints()
elif command == 'countertack-search-behaviors':
search_behaviors()
elif command == 'countertack-kill-process':
kill_process()
except Exception as e:
return_error(e.message)
LOG(e)
|
openverse_api/catalog/api/migrations/0012_auto_20190102_2012.py
|
ritesh-pandey/openverse-api
| 122 |
106769
|
# Generated by Django 2.0.8 on 2019-01-02 20:12
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0011_auto_20181117_0029'),
]
operations = [
migrations.AlterUniqueTogether(
name='usertags',
unique_together=set(),
),
migrations.RemoveField(
model_name='usertags',
name='image',
),
migrations.RemoveField(
model_name='usertags',
name='tag',
),
migrations.RemoveField(
model_name='usertags',
name='user',
),
migrations.AlterField(
model_name='image',
name='tags',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='image',
name='watermarked',
field=models.NullBooleanField(),
),
migrations.DeleteModel(
name='UserTags',
),
]
|
Contributions/Python/simple-SD.py
|
OluSure/Hacktoberfest2021-1
| 215 |
106772
|
# Hacktoberfest 2021
# Problem: House Prices
#
# You are given an array that represents house prices.
# Calculate and output the percentage of houses that are within one standard deviation from the mean.
# To calculate the percentage, divide the number of houses that satisfy the condition by the total number of houses, and multiply the result by 100.
import numpy as np
data = np.array([150000, 125000, 320000, 540000, 200000, 120000, 160000, 230000, 280000, 290000, 300000, 500000, 420000, 100000, 150000, 280000])
m = np.mean(data)
d = np.std(data)
y1 = m-d
y2 = m+d
s = len(data [(data > y1) & (data < y2)])
r = (s/len(data))*100
print(r)
|
oncogemini/gemini_plot.py
|
fakedrtom/cancer_gemini
| 221 |
106784
|
#!/usr/bin/env python
def plot(parser, args):
"""
To do.
"""
pass
if __name__ == "__main__":
plot()
|
bibliopixel/project/types/spi_interface.py
|
rec/leds
| 253 |
106795
|
<filename>bibliopixel/project/types/spi_interface.py<gh_stars>100-1000
import functools
from ...drivers.spi_interfaces import SPI_INTERFACES
USAGE = """
A spi_interface is represented by a string.
Possible values are """ + ', '.join(sorted(SPI_INTERFACES.__members__))
@functools.singledispatch
def make(c):
raise ValueError("Don't understand type %s" % type(c), USAGE)
@make.register(SPI_INTERFACES)
def _(c):
return c
@make.register(str)
def _(c):
return SPI_INTERFACES[c]
|
codes/models/modules/denoised_LR.py
|
WestCityInstitute/InvDN
| 122 |
106835
|
import sys
sys.path.append('./')
import numpy as np
import torch
import glob
import cv2
from skimage import img_as_float32 as img_as_float
from skimage import img_as_ubyte
import time
import os
from codes.models.modules.VDN import VDN as DN
from codes.data.util import imresize_np
def denoise(noisy_path, pretrained_path, save_path, scale=4, LR_path=None):
use_gpu = True
C = 3
dep_U = 4
# load the pretrained model
print('Loading the Model')
checkpoint = torch.load(pretrained_path)
net = DN(C, dep_U=dep_U, wf=64)
if use_gpu:
net = torch.nn.DataParallel(net).cuda()
net.load_state_dict(checkpoint)
else:
load_state_dict_cpu(net, checkpoint)
net.eval()
files = glob.glob(os.path.join(noisy_path, '*.png'))
if not os.path.exists(save_path):
os.mkdir(save_path)
for i in range(len(files)):
im_noisy = cv2.imread(files[i])[:, :, ::-1]
im_noisy = img_as_float(cv2.cvtColor(im_noisy, cv2.COLOR_BGR2RGB))
im_noisy = torch.from_numpy(im_noisy.transpose((2, 0, 1))[np.newaxis,])
_, C, H, W = im_noisy.shape
if H % 2**dep_U != 0:
H -= H % 2**dep_U
if W % 2**dep_U != 0:
W -= W % 2**dep_U
im_noisy = im_noisy[:H, :W, ]
if use_gpu:
im_noisy = im_noisy.cuda()
print('Begin Testing on GPU')
else:
print('Begin Testing on CPU')
with torch.autograd.set_grad_enabled(False):
tic = time.time()
phi_Z = net(im_noisy, 'test')
toc = time.time() - tic
err = phi_Z.cpu().numpy()
print('Time: %.5f' % toc)
if use_gpu:
im_noisy = im_noisy.cpu().numpy()
else:
im_noisy = im_noisy.numpy()
im_denoise = im_noisy - err[:, :C, ]
im_denoise = np.transpose(im_denoise.squeeze(), (1, 2, 0))
im_denoise = img_as_ubyte(im_denoise.clip(0, 1))
file_name = files[i].split('/')[-1]
cv2.imwrite(os.path.join(save_path, file_name), im_denoise)
if not LR_path is None:
if not os.path.exists(LR_path):
os.mkdir(LR_path)
LR_denoise = imresize_np(im_denoise, 1 / scale, True)
cv2.imwrite(os.path.join(LR_path, file_name), LR_denoise)
def load_state_dict_cpu(net, state_dict0):
state_dict1 = net.state_dict()
for name, value in state_dict1.items():
assert 'module.'+name in state_dict0
state_dict1[name] = state_dict0['module.'+name]
net.load_state_dict(state_dict1)
def main():
# Validation
noisy_path = ''
save_path = ''
LR_path = ''
denoise(noisy_path, pretrained_path, save_path, 4, LR_path)
if __name__ == '__main__':
main()
|
relation_extraction/core/parser.py
|
linatal/emnlp2017-relation-extraction
| 299 |
106844
|
<reponame>linatal/emnlp2017-relation-extraction
# coding: utf-8
# Copyright (C) 2016 UKP lab
#
# Author: <NAME> (ukp.tu-darmstadt.de/ukp-home/)
#
import numpy as np
np.random.seed(1)
import os
import codecs
from core import keras_models
from core import embeddings
class RelParser:
def __init__(self, relext_model_name, models_folder="../trainedmodels/"):
"""
Initialize a new relation parser with the given model type. This class simplifies the loading of models and
encapsulates encoding sentences into the correct format for the given model.
:param relext_model_name: The name of the model type that should correspond to the correct model class and
the name of the model file
:param models_folder: location of pre-trained model files
"""
module_location = os.path.abspath(__file__)
module_location = os.path.dirname(module_location)
model_params = keras_models.model_params
max_sent_len = keras_models.model_params['max_sent_len']
self._embeddings, self._word2idx = embeddings.load(keras_models.model_params['wordembeddings'])
print("Loaded embeddings:", self._embeddings.shape)
self._idx2word = {v: k for k, v in self._word2idx.items()}
self._model = getattr(keras_models, relext_model_name)(model_params,
np.zeros((len(self._word2idx), 50), dtype='float32'),
max_sent_len, len(keras_models.property2idx))
self._model.load_weights(models_folder + relext_model_name + ".kerasmodel")
with codecs.open(os.path.join(module_location, "../../resources/properties-with-labels.txt"), encoding='utf-8') as infile:
self._property2label = {l.split("\t")[0]: l.split("\t")[1].strip() for l in infile.readlines()}
self._graphs_to_indices = keras_models.to_indices
if "Context" in relext_model_name:
self._graphs_to_indices = keras_models.to_indices_with_extracted_entities
elif "CNN" in relext_model_name:
self._graphs_to_indices = keras_models.to_indices_with_relative_positions
def classify_graph_relations(self, graphs):
"""
Classify graph relation in the given list of sentences. Each sentence should be a dictionary that has a "tokens"
and a "edgeSet" fields. The edge set encodes pairs of entities in the sentence that would be assigned either a
relation type or en empty relation.
:param graphs: input as a list of dictionaries
:return: the input graphs with labeled edges
"""
graphs = keras_models.split_graphs(graphs)
data_as_indices = list(self._graphs_to_indices(graphs, self._word2idx))
probabilities = self._model.predict(data_as_indices[:-1], verbose=0)
if len(probabilities) == 0:
return None
classes = np.argmax(probabilities, axis=-1)
assert len(classes) == len(graphs)
for gi, g in enumerate(graphs):
if gi < len(classes):
g_classes = classes[gi]
for i, e in enumerate(g['edgeSet']):
if i < len(g_classes):
e['kbID'] = keras_models.idx2property[g_classes[i]]
e["lexicalInput"] = self._property2label[e['kbID']] if e['kbID'] in self._property2label else embeddings.all_zeroes
else:
e['kbID'] = "P0"
e["lexicalInput"] = embeddings.all_zeroes
return graphs
|
awxkit/awxkit/api/pages/dashboard.py
|
Avinesh/awx
| 11,396 |
106847
|
from awxkit.api.resources import resources
from . import base
from . import page
class Dashboard(base.Base):
pass
page.register_page(resources.dashboard, Dashboard)
|
data/data_conv/create_lda.py
|
huonw/nmslib
| 2,031 |
106851
|
<reponame>huonw/nmslib
#!/usr/bin/env python
import logging, gensim, bz2, sys
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# load id->word mapping (the dictionary), one of the results of step 2 above
id2word = gensim.corpora.Dictionary.load_from_text('sparse_wiki_wordids.txt')
# load corpus iterator
mmTrain = gensim.corpora.MmCorpus(bz2.BZ2File('sparse_wiki_tfidf_part1.mm.bz2'))
mmTest = gensim.corpora.MmCorpus(bz2.BZ2File('sparse_wiki_tfidf_part2.mm.bz2'))
print mmTrain
print mmTest
if len(sys.argv) != 3:
raise Exception("Usage: <number of topics> <number of cores>")
ntop=int(sys.argv[1])
ncores=int(sys.argv[2])
print "Using " + str(ntop) + " topics and " + str(ncores) + " cores"
if ncores > 1:
print "Running in a MULTI-core mode"
lda = gensim.models.LdaMulticore(corpus=mmTrain, id2word=id2word, num_topics= ntop, workers=ncores)
else:
print "Running in a SINGLE-core mode"
lda = gensim.models.ldamodel.LdaModel(corpus=mmTrain, id2word=id2word, num_topics= ntop, update_every=0, passes=20)
lda_file = 'LDA/lda'+str(ntop)
lda.save(lda_file)
out_vect = 'LDA/wikipedia_lda'+str(ntop)+'.txt'
gensim.corpora.MmCorpus.serialize(out_vect, lda[mmTest])
|
json_to_models/dynamic_typing/__init__.py
|
bogdandm/attrs-api-client
| 111 |
106866
|
<filename>json_to_models/dynamic_typing/__init__.py
from .base import (
BaseType, ImportPathList, MetaData, Null, Unknown, get_hash_string
)
from .complex import ComplexType, DDict, DList, DOptional, DTuple, DUnion, SingleType, StringLiteral
from .models_meta import AbsoluteModelRef, ModelMeta, ModelPtr
from .string_datetime import IsoDateString, IsoDatetimeString, IsoTimeString, register_datetime_classes
from .string_serializable import (
BooleanString, FloatString, IntString, StringSerializable, StringSerializableRegistry, registry
)
from .typing import compile_imports, metadata_to_typing
|
kili/mutations/user/fragments.py
|
ASonay/kili-playground
| 214 |
106881
|
<gh_stars>100-1000
"""
Fragments of user mutations
"""
AUTH_PAYLOAD_FRAGMENT = '''
id
token
user {
id
}
'''
USER_FRAGMENT = '''
id
'''
|
batchflow/models/torch/repr_mixin.py
|
analysiscenter/dataset
| 101 |
106883
|
<gh_stars>100-1000
""" Mixins for nn.Modules for better textual visualization. """
from textwrap import indent
class LayerReprMixin:
""" Adds useful properties and methods for nn.Modules, mainly related to visualization and introspection. """
VERBOSITY_THRESHOLD = 10
@property
def num_frozen_parameters(self):
return sum(p.numel() for p in self.parameters() if not p.requires_grad)
@property
def num_trainable_parameters(self):
return sum(p.numel() for p in self.parameters() if p.requires_grad)
@property
def num_parameters(self):
return sum(p.numel() for p in self.parameters())
def __repr__(self):
if hasattr(self, 'verbosity') and self.verbosity < self.VERBOSITY_THRESHOLD:
return ''
msg = super().__repr__()
if getattr(self, 'collapsible', False):
msg = msg.replace('(\n (layer): ', ':').replace('\n ', '\n ')
msg = msg.replace('\n )\n)', '\n)').replace(')\n)', ')')
return msg
def prepare_repr(self, verbosity=1, collapsible=True, show_num_parameters=False, extra=False):
""" Set flags on children, call `repr`, delete flags.
Returns string.
"""
def set_flags(module):
setattr(module, 'verbosity', verbosity)
setattr(module, 'collapsible', collapsible)
setattr(module, 'show_num_parameters', show_num_parameters)
setattr(module, 'extra', extra)
def del_flags(module):
try:
delattr(module, 'verbosity')
delattr(module, 'collapsible')
delattr(module, 'show_num_parameters')
delattr(module, 'extra')
except AttributeError:
pass
self.apply(set_flags)
msg = repr(self)
self.apply(del_flags)
return msg
def repr(self, verbosity=1, collapsible=True, show_num_parameters=False, extra=False):
""" Set flags on children, call `repr`, delete flags.
Prints output to stdout.
"""
print(self.prepare_repr(verbosity=verbosity, collapsible=collapsible,
show_num_parameters=show_num_parameters, extra=extra))
class ModuleDictReprMixin(LayerReprMixin):
""" Mixin to allow `repr` for multiple levels for nn.ModuleDicts.
Also adds `__getitem__` for convenience.
Relies on modules having `shapes` dictionary, that for each children stores the information about their
input and output shapes.
Depending on `verbosity`, creates string representation for different levels:
- verbosity 1, modules and their shapes. For example, shapes of `initial_block`, `body` and `head`.
- verbosity 2, blocks inside modules. For example, shapes of blocks inside Encoder.
- verbosity 3, blocks inside repeated chains of `:class:~.blocks.Block`. Mainly used for debug purposes.
- verbosity 4, letters inside each `:class:~.layers.MultiLayer`. For example, each letter inside given block.
- verbosity 5, PyTorch implementations of each letter inside `:class:~.layers.MultiLayer`.
- verbosity 6+, default repr of nn.Module.
For most cases, levels 2 and 4 should be used.
Additional parameters can be used to show number of parameters inside each level and collapse multilines.
"""
def __getitem__(self, key):
if isinstance(key, int):
key = list(self.keys())[key]
return super().__getitem__(key)
def prepare_shape(self, shape, indent=0):
""" Beautify shape or list of shapes.
Changes the first dimension (batch) to `?`.
Makes multiple lines for lists of shapes with provided indentation.
"""
#pylint: disable=redefined-outer-name
if isinstance(shape, tuple):
msg = ', '.join([f'{item:>3}' for item in shape[1:]])
return f' (?, {msg}) '
if isinstance(shape, list):
msg = '[' + self.prepare_shape(shape[0])[1:-1] + ','
for shape_ in shape[1:]:
msg += '\n ' + ' '*indent + self.prepare_shape(shape_)[1:-1] + ','
msg = msg[:-1] + ']'
return msg
raise TypeError(f'Should be used on tuple or list of tuples, got {type(shape)} instead.')
def __repr__(self):
if hasattr(self, 'verbosity'):
indent_prefix = ' '
# Parse verbosity. If equal to max level, set flag
verbosity = self.verbosity
if verbosity >= 5:
verbosity = 4
detailed_last_level = True
else:
detailed_last_level = False
if len(self.keys()):
key = list(self.keys())[0]
input_shapes, output_shapes = None, None
if (len(self.items()) == 1 and getattr(self, 'collapsible', False)
and getattr(self[key], 'VERBOSITY_THRESHOLD', -1) == self.VERBOSITY_THRESHOLD):
# Subclasses names can be folded, i.e. `Block:ResBlock(` instead of `Block(\n ResBlock(`
msg = self._get_name() + ':' + repr(self[key])
msg = msg.replace(')\n)', ')')
else:
msg = self._get_name() + '(\n'
extra_repr = self.extra_repr()
if extra_repr:
msg += indent(extra_repr, prefix=indent_prefix) + '\n'
max_key_length = max(len(key) for key in self.keys())
for key, value in self.items():
# Short description: module name and description of shapes
empty_space = ' ' * (1 + max_key_length - len(key))
module_short_description = f'({key}:{empty_space}'
if key in self.shapes:
input_shapes, output_shapes = self.shapes.get(key)
current_line_len = len(module_short_description)
input_shapes = self.prepare_shape(input_shapes, indent=current_line_len)
module_short_description += input_shapes + ' ⟶ '
current_line_len = len(module_short_description.splitlines()[-1]) + 1
output_shapes = self.prepare_shape(output_shapes, indent=current_line_len).strip(' ')
module_short_description += output_shapes
if getattr(self, 'show_num_parameters', False):
num_parameters = sum(p.numel() for p in value.parameters() if p.requires_grad)
module_short_description += f', #params={num_parameters:,}'
module_short_description += ')'
# Long description: ~unmodified repr of a module
module_long_description = repr(value).strip(' ')
# Select appropriate message
module_description = ''
if verbosity > self.VERBOSITY_THRESHOLD:
module_description = f'({key}): ' + module_long_description
if verbosity == self.VERBOSITY_THRESHOLD or module_description == f'({key}): ':
module_description = module_short_description
if self.VERBOSITY_THRESHOLD == 4 and detailed_last_level:
module_description = (module_short_description + ':\n' +
indent(module_long_description, prefix=indent_prefix))
msg += indent(module_description, prefix=indent_prefix) + '\n'
msg = msg.replace('\n\n', '\n')
msg += ')'
if len(self.items()) == 1 and getattr(self, 'collapsible', False) and 'r0' in msg:
msg = msg.replace('(\n (r0:', '(r0:', 1)
msg = msg.replace(')\n)', ')')
else:
msg = self._get_name() + '()'
return msg
return super().__repr__()
REPR_DOC = '\n'.join(ModuleDictReprMixin.__doc__.split('\n')[3:])
LayerReprMixin.repr.__doc__ += '\n' + REPR_DOC
LayerReprMixin.prepare_repr.__doc__ += '\n' + REPR_DOC
|
src/enamlnative/widgets/fragment.py
|
codelv/enaml-native
| 237 |
106903
|
"""
Copyright (c) 2017, <NAME>.
Distributed under the terms of the MIT License.
The full license is in the file LICENSE, distributed with this software.
Created on May 20, 2017
@author: jrm
"""
from atom.api import Typed, ForwardTyped, Bool, observe
from enaml.core.declarative import d_
from enaml.core.conditional import Conditional, new_scope
from enaml.widgets.toolkit_object import ToolkitObject, ProxyToolkitObject
class ProxyFragment(ProxyToolkitObject):
""" The abstract definition of a proxy fragment object.
"""
#: A reference to the Label declaration.
declaration = ForwardTyped(lambda: Fragment)
def set_cached(self, cached):
raise NotImplementedError
def set_defer_loading(self, defer):
raise NotImplementedError
class Fragment(Conditional, ToolkitObject):
""" Fragment a "sub" activity with a lifecycle, view, and state.
A fragment has no "widget" but it can have child
widgets that will define it's view. The children are rendered
when the fragment's view is requested.
"""
#: A reference to the proxy object.
proxy = Typed(ProxyFragment)
#: Don't destroy the view once loaded
cached = d_(Bool())
#: Defer loading of child nodes
defer_loading = d_(Bool(True))
def refresh_items(self):
""" Refresh the items of the pattern.
This method destroys the old items and creates and initializes
the new items.
It is overridden to NOT insert the children to the parent. The Fragment
adapter handles this.
"""
items = []
if self.condition:
for nodes, key, f_locals in self.pattern_nodes:
with new_scope(key, f_locals):
for node in nodes:
child = node(None)
if isinstance(child, list):
items.extend(child)
else:
items.append(child)
for old in self.items:
if not old.is_destroyed:
old.destroy()
#: Insert items into THIS node, NOT the PARENT
#if len(items) > 0:
# self.parent.insert_children(self, items)
self.items = items
@observe('cached', 'defer_loading')
def _update_proxy(self, change):
""" Update the proxy """
super(Fragment, self)._update_proxy(change)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.