content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
# In[6]:
# In[16]:
# In[18]:
# In[21]:
from __future__ import division, print_function
# coding=utf-8
import streamlit as st
import h5py
from PIL import Image
import os
import numpy as np
import json
import predict3
from tensorflow.keras.models import load_model
import keras
from keras.models import Model
from keras.layers import Dense, Activation
from keras.layers import GlobalAveragePooling2D
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.callbacks import ReduceLROnPlateau, CSVLogger
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.inception_v3 import InceptionV3, preprocess_input
from keras.optimizers import Adam
import argparse
import pandas as pd
from tqdm import tqdm
IM_WIDTH, IM_HEIGHT = 299, 299
NB_EPOCHS = 5
BATCH_SIZE = 32
FC_SIZE = 1024
# Get Inception model without final layer
base_model = InceptionV3(weights='imagenet', include_top=False)
# Add new fully connected layer to base model
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(FC_SIZE, activation='relu')(x)
predictions = Dense(38, activation='softmax')(x)
model = Model(base_model.input, predictions)
# Compile model
model.compile(optimizer=Adam(lr=0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
#from tensorflow.keras.models import load_weights
# Model path
folder_path = "./models"
#model_name = "model_vgg16_2.hdf5"
#model_name = "inception_1.h5"
model_name = "inception_15ep.h5"
model_file = os.path.join(folder_path, model_name)
# Load your trained model
#model = load_model(model_file)
#import joblib
#model = joblib.load('./models/export_resnet34_model.pkl')
#import pickle
#pkl_filename = './models/export_resnet34_model.pkl'
#with open(pkl_filename, 'rb') as file:
#model = pickle.load(file)
model.load_weights(model_file)
st.markdown("<h1 style='text-align: left; color: green;'>Welcome to your plant HealthScanner!</h1>", unsafe_allow_html=True)
st.write("")
image = Image.open('./models/logo_crop_2.png')
st.sidebar.image(image, use_column_width = True)
st.sidebar.title('Image diagnosis')
st.set_option('deprecation.showfileUploaderEncoding', False)
img_file_buffer = st.sidebar.file_uploader("Upload an image", type=["png", "jpg", "jpeg"])
if img_file_buffer is not None:
#image = np.array(Image.open(img_file_buffer))
image = Image.open(img_file_buffer)
st.image(image, caption='Uploaded Image.')
processed_image = predict3.preprocess_image(img_file_buffer)
prediction = predict3.model_predict(processed_image, model)
st.write("### Diagnosis results:")
res = '%s : %s' % (prediction[0][0], prediction[0][1])
st.write(res)
st.write("### Disease description:")
descr = predict3.description(prediction)
st.write(descr[0][1])
st.write("### Treatment recommendations:")
tx = predict3.treatment(prediction)
st.write(tx[0][1])
else:
# st.sidebar.success("Select an image above.")
st.markdown(
"""
HealthScanner help you diagnose your lovely plants, and recommend proper treatments for you!
Upload an image on the left to have quick diagnosis!
"""
)
image2 = Image.open('./models/img_crop.jpg')
st.image(image2, use_column_width = True)
|
python
|
#!/usr/bin/env python3
from mpl_toolkits.mplot3d import Axes3D
from rasterization import Rasterizer
from transformation import multiply
from transformation import TransformGenerator
import argparse
import DirectLinearTransform
import json
import math
import matplotlib
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import random
import sys
def main():
args = _getParsedArgs(sys.argv[1:])
if args.p:
_pointPicker(args.input_image)
sys.exit(0)
transformImage(
args.input_image,
args.output_image,
_readCorrespondences(args.correspondences),
args.background
)
def transformImage(input_image_path, output_image_path,
corresponding_points, background_path):
im = mpimg.imread(input_image_path)
bg_im = None
if background_path is not None:
bg_im = mpimg.imread(background_path)
transform_matrix = DirectLinearTransform.computeTransform(
corresponding_points
)
image_rasterization = Rasterizer(
im,
transformation_matrix = transform_matrix,
background = bg_im
)
matplotlib.image.imsave(
output_image_path,
image_rasterization.rasterize()
)
def _pointPicker(input_image_path):
""" Utility function to select coordinates on image """
image = mpimg.imread(input_image_path)
fig = plt.figure()
axes = plt.imshow(image)
fig.canvas.mpl_connect(
'button_press_event',
lambda ev: print(ev.xdata, ev.ydata)
)
plt.show()
def _readCorrespondences(correspondenceFilePath):
with open(correspondenceFilePath, 'r') as correspondenceFileHandler:
return json.load(correspondenceFileHandler)
def _getParsedArgs(args):
parser = argparse.ArgumentParser(
description = "CLI input to homography application"
)
parser.add_argument(
"-p",
action = "store_true",
help = "use point picker utility")
parser.add_argument(
"--input-image",
default = "./media/t2.png",
help = "input image to be transformed")
parser.add_argument(
"--output-image",
default = "output.png",
help = "output image path for saving new image")
parser.add_argument(
"--correspondences",
default = "3.json",
help = "corresponding set of points to derive transform")
parser.add_argument(
"--background",
default = None,
help = "optionally specify background image to act as canvas")
return parser.parse_args(args)
if __name__ == "__main__":
main()
|
python
|
#!/usr/bin/env python3
import argparse
import gym
import gym-minigrid
from baselines import bench, logger
from baselines.common import set_global_seeds
from baselines.common.cmd_util import make_atari_env, atari_arg_parser
from baselines.a2c.a2c import learn
from baselines.ppo2.policies import MlpPolicy
from baselines.common.vec_env.vec_normalize import VecNormalize
from baselines.common.vec_env.dummy_vec_env import DummyVecEnv
def train(env_id, num_timesteps, seed, lrschedule, num_env):
def make_env():
env = gym.make(env_id)
env = bench.Monitor(env, logger.get_dir(), allow_early_resets=True)
return env
env = DummyVecEnv([make_env])
env = VecNormalize(env)
set_global_seeds(seed)
policy_fn = MlpPolicy
learn(policy_fn, env, seed, total_timesteps=int(num_timesteps * 1.1), lrschedule=lrschedule)
env.close()
def main():
parser = argparse.ArgumentParser("A2C baseline")
parser.add_argument('--env', help='environment ID', default='CartPole-v1')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--num-timesteps', type=int, default=int(1e6))
parser.add_argument('--lrschedule', help='Learning rate schedule', choices=['constant', 'linear'], default='constant')
args = parser.parse_args()
logger.configure()
train(args.env, num_timesteps=args.num_timesteps, seed=args.seed, lrschedule=args.lrschedule, num_env=1)
if __name__ == '__main__':
main()
|
python
|
import os
import torch
from torch import tensor
from typing import Optional, Tuple
from dataclasses import dataclass
from schemes import EventDetectorOutput
from models import BaseComponent
from models.event_detection.src.models.SingleLabelSequenceClassification import SingleLabelSequenceClassification
from stores.ontologies.event_type_wikidata_links_trecis import EVENT_TYPE_WIKIDATA_LINKS
from transformers import logging
logging.set_verbosity_error()
os.environ["TOKENIZERS_PARALLELISM"] = "false"
@dataclass
class InputFeature:
input_ids: tensor
attention_mask: tensor
labels: Optional[tensor] = None
@dataclass
class SingleLabelClassificationForwardOutput:
loss: Optional[tensor] = None
prediction_logits: tensor = None
hidden_states: Optional[Tuple[tensor]] = None
attentions: Optional[Tuple[tensor]] = None
class EventDetector(BaseComponent):
def __init__(self, path_to_pretrained_model: str):
super(EventDetector).__init__()
checkpoint = torch.load(path_to_pretrained_model, map_location=torch.device('cpu'))
self.model = SingleLabelSequenceClassification(checkpoint['config'])
self.model.load_state_dict(checkpoint['model_state_dict'])
self.index_label_map = checkpoint['index_label_map']
def forward(self, tweet: str) -> EventDetectorOutput:
tokenized_text = self.model.tokenizer(tweet, padding=True, truncation=True, max_length=512, return_tensors="pt")
input_ids: tensor = tokenized_text["input_ids"].to(self.model.device)
attention_masks: tensor = tokenized_text["attention_mask"].to(self.model.device)
labels = None
input_feature: InputFeature = InputFeature(input_ids=input_ids, attention_mask=attention_masks, labels=labels)
output: SingleLabelClassificationForwardOutput = self.model.forward(input_feature)
prediction = output.prediction_logits.argmax(1).item()
event_type = self.index_label_map[str(prediction)]
wikidata_link = EVENT_TYPE_WIKIDATA_LINKS.get(event_type)
return EventDetectorOutput(tweet=tweet, event_type=event_type, wikidata_links={event_type: wikidata_link})
@property
def __version__(self):
return "1.0.0"
|
python
|
"""test2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from authe import views as authe_view
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', authe_view.index, name='index'),
url(r'^contact/', authe_view.contact, name='contact'),
url(r'^signup', authe_view.signup, name='signup'),
url(r'^signin', authe_view.signin, name='signin'),
url(r'^logout', authe_view.logout, name='logout'),
url(r'^tiqu', authe_view.tiqu, name='tiqu'),
url(r'^xuanran', authe_view.xuanranwj, name='xuanranwj'),
url(r'^txcg', authe_view.jieshou, name='jieshou'),
url(r'^editor', authe_view.editor, name='editor'),
url(r'^teditor', authe_view.teditor, name='teditor'),
url(r'houtai', authe_view.houtai, name='houtai'),
url(r'cjwj', authe_view.create_houtai, name='create_houtai'),
url(r'myques', authe_view.myques, name='myques'),
url(r'delete', authe_view.deleteque, name='delete'),
url(r'^genggaipw', authe_view.genggaipw, name='genggaipw'),
url(r'^Tongji', authe_view.Tongji, name='Tongji'),
url(r'^Guanlian', authe_view.Guanlian, name='Guanlian'),
url(r'^GLPrint', authe_view.GLPrint, name='GLPrint'),
url(r'^Chongzhipw', authe_view.Chongzhipw, name='Chongzhipw'),
url(r'^Duibi', authe_view.Duibi, name='Duibi'),
url(r'^DBPrint', authe_view.DBPrint, name='DBPrint'),
]
|
python
|
import xarray as xr
import numpy as np
from xrspatial import hillshade
def _do_sparse_array(data_array):
import random
indx = list(zip(*np.where(data_array)))
pos = random.sample(range(data_array.size), data_array.size//2)
indx = np.asarray(indx)[pos]
r = indx[:, 0]
c = indx[:, 1]
data_half = data_array.copy()
data_half[r, c] = 0
return data_half
def _do_gaussian_array():
_x = np.linspace(0, 50, 101)
_y = _x.copy()
_mean = 25
_sdev = 5
X, Y = np.meshgrid(_x, _y, sparse=True)
x_fac = -np.power(X-_mean, 2)
y_fac = -np.power(Y-_mean, 2)
gaussian = np.exp((x_fac+y_fac)/(2*_sdev**2)) / (2.5*_sdev)
return gaussian
#
# -----
data_random = np.random.random_sample((100, 100))
data_random_sparse = _do_sparse_array(data_random)
data_gaussian = _do_gaussian_array()
def test_hillshade():
"""
Assert Simple Hillshade transfer function
"""
da_gaussian = xr.DataArray(data_gaussian)
da_gaussian_shade = hillshade(da_gaussian)
assert da_gaussian_shade.dims == da_gaussian.dims
assert da_gaussian_shade.coords == da_gaussian.coords
assert da_gaussian_shade.attrs == da_gaussian.attrs
assert da_gaussian_shade.mean() > 0
assert da_gaussian_shade[60, 60] > 0
|
python
|
from django.test import TestCase
from django.contrib.auth.models import User
from .models import Profile,Procedure
# Create your tests here.
class ProfileTestClass(TestCase):
def setUp(self):
self.new_user = User.objects.create_user(username='user',password='password')
self.new_profile = Profile(id=1,prof_user=self.new_user,bio='Test Bio',contact_info='0723030837',profile_Id=1)
def test_instance(self):
self.assertTrue(isinstance(self.new_profile,Profile))
def test_save_profile(self):
self.new_profile.save_profile()
profiles = Profile.objects.all()
self.assertTrue(len(profiles) > 0)
def test_delete_profile(self):
self.new_profile.delete_profile()
profiles = Profile.objects.all()
self.assertTrue(len(profiles) == 0)
def test_update_bio(self):
self.new_profile.save_profile()
self.new_profile = Profile.objects.get(id=1)
profile = self.new_profile
profile.update_bio('updated user-bio')
self.updated_profile = Profile.objects.get(id=1)
self.assertEqual(self.updated_profile.bio,'updated user-bio')
class ProcedureTestClass(TestCase):
def setUp(self):
self.new_user = User.objects.create_user(username='user',password='password')
self.new_profile = Profile(id=1,prof_user=self.new_user,bio='Test Bio',contact_info='0723030837',profile_Id=1)
self.new_profile.save_profile()
self.new_procedure = Procedure(id=1,title='title',details='details',link='www.link.com',user=self.new_user)
def test_instance(self):
self.assertTrue(isinstance(self.new_procedure,Procedure))
def test_save_instance(self):
self.new_procedure.save_procedure()
procedure = Procedure.objects.all()
self.assertTrue(len(procedure)>0)
def test_delete_profile(self):
self.new_procedure.delete_procedure()
procedure = Procedure.objects.all()
self.assertTrue(len(procedure)==0)
def test_fetch_procedure(self):
self.new_procedure.save_procedure()
procedure = Procedure.fetch_all_images()
self.assertTrue(len(procedure)>0)
def test_find_procedure(self):
self.new_procedure.save_procedure()
procedure = Procedure.get_single_procedure(self.new_procedure.id)
self.assertTrue(procedure == self.new_procedure)
|
python
|
'''Create a segmentation by thresholding a predicted probability map'''
import os
import logging
from time import time as now
import numpy as np
import configargparse as argparse
import daisy
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# patch: suppress daisy warnings
logging.getLogger('daisy.client').setLevel(logging.ERROR)
def extract_segmentation_with_threshold_worker(
block,
probas,
mask,
out,
threshold):
# load the chunk
probas = probas[block.read_roi].to_ndarray()
if mask:
mask = mask[block.read_roi].to_ndarray()
segmentation = ((probas >= threshold) & (mask != 0))
else:
segmentation = probas >= threshold
# store binary mask as {0,255}
segmentation = segmentation.astype(np.uint32) * 255
# save to output dataset
out[block.write_roi] = segmentation
def extract_segmentation_with_threshold(
filename,
ds_name,
mask_filename,
mask_ds_name,
out_ds_name,
chunk_shape,
threshold,
num_workers):
probas = daisy.open_ds(
filename,
ds_name,
mode='r'
)
try:
mask = daisy.open_ds(
mask_filename,
mask_ds_name,
mode='r'
)
except (KeyError, RuntimeError):
logger.warning((
"Did not find a mask dataset "
f"at {os.path.join(mask_filename, str(mask_ds_name))}."
))
mask = None
out = daisy.prepare_ds(
filename=filename,
ds_name=out_ds_name,
total_roi=probas.roi,
voxel_size=probas.voxel_size,
dtype=np.uint32,
write_size=probas.voxel_size * daisy.Coordinate(chunk_shape),
compressor={'id': 'zlib', 'level': 3}
)
# Spawn a worker per chunk
block_roi = daisy.Roi(
(0, 0, 0),
probas.voxel_size * daisy.Coordinate(chunk_shape)
)
start = now()
daisy.run_blockwise(
total_roi=probas.roi,
read_roi=block_roi,
write_roi=block_roi,
process_function=lambda block: extract_segmentation_with_threshold_worker(
block,
probas=probas,
mask=mask,
out=out,
threshold=threshold
),
read_write_conflict=False,
fit='shrink',
num_workers=num_workers,
)
logger.info(f"Done in {now() - start} s")
def parse_args():
p = argparse.ArgParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add('--config', is_config_file=True, help='config file path')
p.add(
'--prediction_filename',
required=True,
help="Zarr file with the prediction."
)
p.add(
'--dataset',
'-d',
required=True,
help='Name of the dataset with prediction probabilities.'
)
p.add(
'--mask_filename',
default="",
help="Zarr file with the mask."
)
p.add(
'--mask',
'-m',
default='volumes/mask',
help='Binary mask to exclude non-cell voxels.'
)
p.add(
'--out_dataset',
'-o',
required=True,
help='Name of the output segmentation in the prediction zarr file.'
)
p.add(
'--chunk_shape',
'-c',
nargs='+',
type=int,
default=[128, 128, 128],
help=(
'Size of a chunk in voxels. Should be a multiple of the existing '
'chunk size.'
)
)
p.add(
'--threshold',
'-t',
type=float,
required=True,
help='Threshold for positive prediction.'
)
p.add(
'--num_workers',
'-n',
type=int,
default=32,
help='Number of daisy processes.'
)
args = p.parse_args()
logger.info(f'\n{p.format_values()}')
return args
def main():
args = parse_args()
extract_segmentation_with_threshold(
filename=args.prediction_filename,
ds_name=args.dataset,
mask_filename=args.mask_filename,
mask_ds_name=args.mask,
out_ds_name=args.out_dataset,
chunk_shape=args.chunk_shape,
threshold=args.threshold,
num_workers=args.num_workers
)
if __name__ == '__main__':
main()
|
python
|
import os
import time
import pandas as pd
import numpy as np
import xgboost as xgb
import lightgbm as lgb
import warnings
warnings.filterwarnings('ignore')
from sklearn.metrics import log_loss
from sklearn.model_selection import train_test_split, StratifiedKFold
def lgb_foldrun(X, y, params, name):
skf = StratifiedKFold(n_splits = 10, random_state = 111, shuffle = True)
if isinstance(X, pd.core.frame.DataFrame):
X = X.values
if isinstance(y, pd.core.frame.DataFrame):
y = y.is_duplicate.values
if isinstance(y, pd.core.frame.Series):
y = y.values
print('Running LGBM model with parameters:', params)
i = 1
losses = []
oof_train = np.zeros((X.shape[0]))
os.makedirs('saved_models/LGBM/SKF/{}'.format(name), exist_ok = True)
for tr_index, val_index in skf.split(X, y):
X_tr, X_val = X[tr_index], X[val_index]
y_tr, y_val = y[tr_index], y[val_index]
t = time.time()
lgb_train = lgb.Dataset(X_tr, y_tr)
lgb_val = lgb.Dataset(X_val, y_val)
print('Start training on fold: {}'.format(i))
gbm = lgb.train(params, lgb_train, num_boost_round = 100000, valid_sets = lgb_val,
early_stopping_rounds = 200, verbose_eval = 100)
print('Start predicting...')
val_pred = gbm.predict(X_val, num_iteration=gbm.best_iteration)
oof_train[val_index] = val_pred
score = log_loss(y_val, val_pred)
losses.append(score)
print('Final score for fold {} :'.format(i), score, '\n',
'Time it took to train and predict on fold:', time.time() - t, '\n')
gbm.save_model('saved_models/LGBM/SKF/{}/LGBM_10SKF_loss{:.5f}_fold{}.txt'.format(name, score, i))
i += 1
np.save('OOF_preds/train/{}'.format(name), oof_train)
print('Mean logloss for model in 10-folds SKF:', np.array(losses).mean(axis = 0))
return
def xgb_foldrun(X, y, params, name):
skf = StratifiedKFold(n_splits = 10, random_state = 111, shuffle = True)
if isinstance(X, pd.core.frame.DataFrame):
X = X.values
if isinstance(y, pd.core.frame.DataFrame):
y = y.is_duplicate.values
if isinstance(y, pd.core.frame.Series):
y = y.values
print('Running XGB model with parameters:', params)
i = 1
losses = []
oof_train = np.zeros((X.shape[0]))
os.makedirs('saved_models/XGB/SKF/{}'.format(name), exist_ok = True)
for tr_index, val_index in skf.split(X, y):
X_tr, X_val = X[tr_index], X[val_index]
y_tr, y_val = y[tr_index], y[val_index]
t = time.time()
dtrain = xgb.DMatrix(X_tr, label = y_tr)
dval = xgb.DMatrix(X_val, label = y_val)
watchlist = [(dtrain, 'train'), (dval, 'valid')]
print('Start training on fold: {}'.format(i))
gbm = xgb.train(params, dtrain, 100000, watchlist,
early_stopping_rounds = 200, verbose_eval = 100)
print('Start predicting...')
val_pred = gbm.predict(xgb.DMatrix(X_val), ntree_limit=gbm.best_ntree_limit)
oof_train[val_index] = val_pred
score = log_loss(y_val, val_pred)
losses.append(score)
print('Final score for fold {} :'.format(i), score, '\n',
'Time it took to train and predict on fold:', time.time() - t, '\n')
gbm.save_model('saved_models/XGB/SKF/{}/XGB_10SKF_loss{:.5f}_fold{}.txt'.format(name, score, i))
i += 1
np.save('OOF_preds/train/{}'.format(name), oof_train)
print('Mean logloss for model in 10-folds SKF:', np.array(losses).mean(axis = 0))
return
def lgb_foldrun_test(X, y, X_test, params, name, save = True):
skf = StratifiedKFold(n_splits = 10, random_state = 111, shuffle = True)
if isinstance(X, pd.core.frame.DataFrame):
X = X.values
if isinstance(y, pd.core.frame.DataFrame):
y = y.is_duplicate.values
if isinstance(y, pd.core.frame.Series):
y = y.values
print('Running LGBM model with parameters:', params)
i = 0
losses = []
oof_train = np.zeros((X.shape[0]))
oof_test = np.zeros((10, 2345796))
os.makedirs('saved_models/LGBM/SKF/{}'.format(name), exist_ok = True)
for tr_index, val_index in skf.split(X, y):
X_tr, X_val = X[tr_index], X[val_index]
y_tr, y_val = y[tr_index], y[val_index]
t = time.time()
lgb_train = lgb.Dataset(X_tr, y_tr)
lgb_val = lgb.Dataset(X_val, y_val)
print('Start training on fold: {}'.format(i))
gbm = lgb.train(params, lgb_train, num_boost_round = 100000, valid_sets = lgb_val,
early_stopping_rounds = 200, verbose_eval = 100)
print('Start predicting...')
val_pred = gbm.predict(X_val, num_iteration=gbm.best_iteration)
oof_train[val_index] = val_pred
score = log_loss(y_val, val_pred)
losses.append(score)
if X_test is not None:
test_preds = gbm.predict(X_test, num_iteration=gbm.best_iteration)
oof_test[i, :] = test_preds
print('Final score for fold {} :'.format(i), score, '\n',
'Time it took to train and predict on fold:', time.time() - t, '\n')
gbm.save_model('saved_models/LGBM/SKF/{}/LGBM_10SKF_loss{:.5f}_fold{}.txt'.format(name, score, i))
i += 1
print('Mean logloss for model in 10-folds SKF:', np.array(losses).mean(axis = 0))
oof_train = pd.DataFrame(oof_train)
oof_train.columns = ['{}_prob'.format(name)]
oof_test = oof_test.mean(axis = 0)
oof_test = pd.DataFrame(oof_test)
oof_test.columns = ['{}_prob'.format(name)]
if save:
oof_train.to_pickle('OOF_preds/train/train_preds_{}.pkl'.format(name))
oof_test.to_pickle('OOF_preds/test/test_preds_{}.pkl'.format(name))
return oof_train, oof_test
def xgb_foldrun_test(X, y, X_test, params, name, save = True):
skf = StratifiedKFold(n_splits = 10, random_state = 111, shuffle = True)
if isinstance(X, pd.core.frame.DataFrame):
X = X.values
if isinstance(y, pd.core.frame.DataFrame):
y = y.is_duplicate.values
if isinstance(y, pd.core.frame.Series):
y = y.values
print('Running XGB model with parameters:', params)
i = 0
losses = []
oof_train = np.zeros((X.shape[0]))
oof_test = np.zeros((10, 2345796))
os.makedirs('saved_models/XGB/SKF/{}'.format(name), exist_ok = True)
for tr_index, val_index in skf.split(X, y):
X_tr, X_val = X[tr_index], X[val_index]
y_tr, y_val = y[tr_index], y[val_index]
t = time.time()
dtrain = xgb.DMatrix(X_tr, label = y_tr)
dval = xgb.DMatrix(X_val, label = y_val)
watchlist = [(dtrain, 'train'), (dval, 'valid')]
print('Start training on fold: {}'.format(i))
gbm = xgb.train(params, dtrain, 100000, watchlist,
early_stopping_rounds = 200, verbose_eval = 100)
print('Start predicting...')
val_pred = gbm.predict(xgb.DMatrix(X_val), ntree_limit=gbm.best_ntree_limit)
oof_train[val_index] = val_pred
score = log_loss(y_val, val_pred)
losses.append(score)
if X_test is not None:
test_preds = gbm.predict(X_test, ntree_limit=gbm.best_ntree_limit)
oof_test[i, :] = test_preds
print('Final score for fold {} :'.format(i), score, '\n',
'Time it took to train and predict on fold:', time.time() - t, '\n')
gbm.save_model('saved_models/XGB/SKF/{}/XGB_10SKF_loss{:.5f}_fold{}.txt'.format(name, score, i))
i += 1
print('Mean logloss for model in 10-folds SKF:', np.array(losses).mean(axis = 0))
oof_train = pd.DataFrame(oof_train)
oof_train.columns = ['{}_prob'.format(name)]
oof_test = oof_test.mean(axis = 0)
oof_test = pd.DataFrame(oof_test)
oof_test.columns = ['{}_prob'.format(name)]
if save:
oof_train.to_pickle('OOF_preds/train/train_preds_{}.pkl'.format(name))
oof_test.to_pickle('OOF_preds/test/test_preds_{}.pkl'.format(name))
return oof_train, oof_test
|
python
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Session'
db.create_table('django_session', (
('session_key', self.gf('django.db.models.fields.CharField')(max_length=40, primary_key=True)),
('session_data', self.gf('django.db.models.fields.TextField')()),
('expire_date', self.gf('django.db.models.fields.DateTimeField')(db_index=True)),
))
db.send_create_signal('sessions', ['Session'])
def backwards(self, orm):
# Deleting model 'Session'
db.delete_table('django_session')
models = {
'sessions.session': {
'Meta': {'object_name': 'Session', 'db_table': "'django_session'"},
'expire_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'session_data': ('django.db.models.fields.TextField', [], {}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'})
}
}
complete_apps = ['sessions']
|
python
|
# ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import Str, Property, Int, List, Button, \
cached_property, on_trait_change, Instance
# ============= standard library imports ========================
import os
# ============= local library imports ==========================
from pychron.experiment.automated_run.factory import AutomatedRunFactory
from pychron.pychron_constants import NULL_STR
from pychron.experiment.automated_run.uv.factory_view import UVFactoryView
from pychron.experiment.automated_run.uv.spec import UVAutomatedRunSpec
from pychron.paths import paths
class UVAutomatedRunFactory(AutomatedRunFactory):
reprate = Int
mask = Str
attenuator = Str
image = Str
masks = Property
extract_units_names = List([NULL_STR, 'burst', 'continuous'])
_default_extract_units = 'burst'
browser_button = Button('Browse')
_spec_klass = UVAutomatedRunSpec
factory_view_klass = UVFactoryView
def _get_run_attr(self):
#r = super(UVAutomatedRunFactory, self)._get_run_attr()
#r.extend(['mask', 'attenuator', ])
r=['position',
'extract_value', 'extract_units', 'cleanup',
'mask','attenuator','reprate',
'weight', 'comment',
'sample', 'irradiation',
'skip', 'mass_spectrometer', 'extract_device']
return r
@cached_property
def _get_masks(self):
p = os.path.join(paths.device_dir, 'fusions_uv', 'mask_names.txt')
masks = []
if os.path.isfile(p):
with open(p, 'r') as rfile:
for lin in rfile:
lin = lin.strip()
if not lin or lin.startswith('#'):
continue
masks.append(lin)
return masks
@on_trait_change('mask, attenuator, reprate')
def _uv_edit_handler(self, name, new):
self._update_run_values(name, new)
# ============= EOF =============================================
|
python
|
import unittest
from circleofgifts.dealer import Dealer
class DealerTestCase(unittest.TestCase):
def test_sorted_deal(self):
"""Test deal which uses sorted() as the sort method for both teams
and players. This makes the results predicable."""
players = [['Pierre', 'Paul'], ['Jeanne', 'Julie'],
['Sylvain', 'Sandra']]
dealer = Dealer(players)
dealer.sort_players_callback = lambda x: sorted(x)
dealer.sort_teams_callback = lambda x: sorted(x)
deal = dealer.deal()
self.assertEqual(deal, ['Jeanne', 'Paul', 'Sandra', 'Julie', 'Pierre',
'Sylvain'])
def test_deal_with_respect_to_history(self):
# When there is no alternative (sorted result), then the dealer
# MUST return successfully... and has no choice: it MUST return the
# same result again and again, whatever the history.
players = [['Pierre', 'Paul'], ['Jeanne', 'Julie'],
['Sylvain', 'Sandra']]
dealer = Dealer(players)
dealer.sort_players_callback = lambda x: sorted(x)
dealer.sort_teams_callback = lambda x: sorted(x)
deal = dealer.deal_with_respect_to_history()
self.assertEqual(deal, ['Jeanne', 'Paul', 'Sandra', 'Julie', 'Pierre',
'Sylvain'])
deal = dealer.deal_with_respect_to_history()
self.assertEqual(deal, ['Jeanne', 'Paul', 'Sandra', 'Julie', 'Pierre',
'Sylvain'])
def test_is_deal_valid(self):
dealer = Dealer([1, 2, 3, 4, 5, 6])
# No repetition: valid
self.assertTrue(dealer.is_deal_valid([1, 2, 3, 4, 5, 6], [1, 3, 5, 2, 6, 4]))
# Exactly the same: invalid
self.assertFalse(dealer.is_deal_valid([1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6]))
# Scheme 6-1 is repeated: invalid
self.assertFalse(dealer.is_deal_valid([1, 2, 3, 4, 5, 6], [1, 3, 5, 2, 4, 6]))
# Exactly the opposite: invalid
self.assertFalse(dealer.is_deal_valid([1, 2, 3, 4, 5, 6], [6, 5, 4, 3, 2, 1]))
# Nothing in common: valid
self.assertTrue(dealer.is_deal_valid([1, 2, 3, 4, 5, 6],
[7, 8, 9]))
|
python
|
from django.shortcuts import render
from django.http import HttpResponse
def explore(request):
return HttpResponse('This will list all chemicals.')
def my_chemicals(request):
return HttpResponse('This will list MY chemicals!')
|
python
|
import re
# Your puzzle input is 246540-787419.
## Part 1
pwords = []
for i in range(246540,787419):
if(int(str(i)[0]) <= int(str(i)[1]) <= int(str(i)[2]) <= int(str(i)[3]) <= int(str(i)[4]) <= int(str(i)[5])):
if((str(i)[0] == str(i)[1]) or (str(i)[1] == str(i)[2]) or (str(i)[2] == str(i)[3]) or (str(i)[3] == str(i)[4]) or (str(i)[4] == str(i)[5])):
pwords.append(i)
print("Candidate found " + str(i))
print("Number of possible passwords: " + str(len(pwords)))
input()
## Part 2
pwords2 = []
for pw in pwords:
# search for packages of three and more digits
x = re.sub(r"(\d)\1{2,6}", "", str(pw))
if(x):
# but allow packages with two other digits
y = re.search(r"(\d)\1", str(x))
if(not y):
print("Invalid password " + str(pw))
else:
# collect valid pw for counting
pwords2.append(pw)
print("Number of possible passwords left: " + str(len(pwords2)))
input()
|
python
|
class Server(object):
def __init__(self, host, port):
self.host = host
self.port = port
def get_host(self):
return self.host
def get_port(self):
return self.port
class MailType(object):
def __init__(self):
pass
|
python
|
from django import template
import base64
from socialDistribution.models.post import Post, InboxPost
from socialDistribution.utility import get_post_like_info, get_like_text
register = template.Library()
# Django Software Foundation, "Custom Template tags and Filters", 2021-10-10
# https://docs.djangoproject.com/en/3.2/howto/custom-template-tags/#inclusion-tags
@register.inclusion_tag('tagtemplates/post.html')
def post_card(post, author):
"""
Handles "liking" and "deleting" a post
"""
# Delete/Edit
is_author = post.author == author
is_public = post.is_public()
is_friends = post.is_friends()
if type(post) is InboxPost:
post_type = 'inbox'
else:
post_type = 'local'
is_liked, likes = get_post_like_info(post, author)
like_text = get_like_text(is_liked, likes)
return {
'post': post,
'post_type': post_type,
'is_author': is_author,
'is_liked': is_liked,
'like_text': like_text,
'is_public': is_public,
'is_friends': is_friends
}
|
python
|
"""
Setup of core python codebase
Author: Jeff Mahler
"""
import os
from setuptools import setup
requirements = [
"numpy",
"scipy",
"scikit-image",
"scikit-learn",
"ruamel.yaml",
"matplotlib",
"multiprocess",
"setproctitle",
"opencv-python",
"Pillow",
"joblib",
"colorlog",
"pyreadline; platform_system=='Windows'",
]
# load __version__ without importing anything
version_file = os.path.join(
os.path.dirname(__file__), "autolab_core/version.py"
)
with open(version_file, "r") as f:
# use eval to get a clean string of version from file
__version__ = eval(f.read().strip().split("=")[-1])
setup(
name="autolab_core",
version=__version__,
description="Core utilities for the Berkeley AutoLab",
long_description=(
"Core utilities for the Berkeley AutoLab. "
"Includes rigid transformations, loggers, and 3D data wrappers."
),
author="Jeff Mahler",
author_email="[email protected]",
maintainer="Mike Danielczuk",
maintainer_email="[email protected]",
license="Apache Software License",
url="https://github.com/BerkeleyAutomation/autolab_core",
keywords="robotics grasping transformations",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Natural Language :: English",
"Topic :: Scientific/Engineering",
],
packages=["autolab_core"],
install_requires=requirements,
extras_require={
"docs": ["sphinx", "sphinxcontrib-napoleon", "sphinx_rtd_theme"],
"ros": ["rospkg", "catkin_pkg", "empy"],
},
)
|
python
|
from collections.abc import Iterable
from itertools import chain, product
import pprint
import inspect
from .nodes import *
from .graph import *
class GraphGenerator():
def __init__(self, specifications):
"""
Parameters
----------
specifications: Iterable[Specification] -- TODO
"""
self._specifications = specifications
self._graph_iterator = None
def iterator():
for s in self._specifications:
for g in self._build_graph_iterator(s):
yield g
self._iterator = iterator()
def __iter__(self):
return self._iterator
def __next__(self):
return next(self._iterator)
def _build_graph_iterator(self, specification):
# 1. Expand specification combinations into multiple flat specifications.
specifications = self._expand(specification)
# [print(s.get_providers(), '\n') for s in specifications]
for specification in specifications:
# 2. Expand into node lists.
specification._build_nodes()
specification._build_edges()
# 3. Sort nodes topologically
specification._sort_topologically()
def generator():
for specification in specifications:
# 4. Instanitate nodes, injecting dependencies, and set graph...
specification._complete()
yield Graph(specification.top)
return generator()
def _expand(self, specification):
"""
Returns list of expanded specifications.
"""
expanded_nodes = [node.expand() for node in specification.get_providers()]
return [Specification(nodes=nodes) for nodes in product(*expanded_nodes)]
|
python
|
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Allocated Blocks Report
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Django Modules
from django.utils.translation import ugettext_lazy as _
from django import forms
from django.db.models import Q
# NOC Modules
from noc.lib.app.simplereport import SimpleReport, TableColumn
from noc.core.validators import check_ipv4_prefix, check_ipv6_prefix, ValidationError
from noc.ip.models.vrf import VRF
from noc.ip.models.prefix import Prefix
from noc.main.models.customfield import CustomField
class ReportForm(forms.Form):
"""
Report form
"""
vrf = forms.ModelChoiceField(label=_("VRF"), queryset=VRF.objects.all().order_by("name"))
afi = forms.ChoiceField(label=_("Address Family"), choices=[("4", _("IPv4")), ("6", _("IPv6"))])
prefix = forms.CharField(label=_("Prefix"))
def clean_prefix(self):
vrf = self.cleaned_data["vrf"]
afi = self.cleaned_data["afi"]
prefix = self.cleaned_data.get("prefix", "").strip()
if afi == "4":
check_ipv4_prefix(prefix)
elif afi == "6":
check_ipv6_prefix(prefix)
try:
return Prefix.objects.get(vrf=vrf, afi=afi, prefix=prefix)
except Prefix.DoesNotExist:
raise ValidationError(_("Prefix not found"))
class ReportAllocated(SimpleReport):
title = _("Allocated Blocks")
form = ReportForm
def get_form(self):
fc = super(ReportAllocated, self).get_form()
self.customize_form(fc, "ip_prefix", search=True)
return fc
def get_data(self, vrf, afi, prefix, **kwargs):
def get_row(p):
r = [p.prefix, p.state.name, unicode(p.vc) if p.vc else ""]
for f in cf:
v = getattr(p, f.name)
r += [v if v is not None else ""]
r += [p.description, p]
return r
cf = CustomField.table_fields("ip_prefix")
cfn = dict((f.name, f) for f in cf)
# Prepare columns
columns = ["Prefix", "State", "VC"]
for f in cf:
columns += [f.label]
columns += ["Description", TableColumn(_("Tags"), format="tags")]
# Prepare query
q = Q()
for k in kwargs:
v = kwargs[k]
if k in cfn and v is not None and v != "":
q &= Q(**{str(k): v})
#
return self.from_dataset(
title=_(
"Allocated blocks in VRF %(vrf)s (IPv%(afi)s), %(prefix)s"
% {"vrf": vrf.name, "afi": afi, "prefix": prefix.prefix}
),
columns=columns,
data=[get_row(p) for p in prefix.children_set.filter(q).order_by("prefix")],
enumerate=True,
)
|
python
|
from buildbot.config import BuilderConfig
from buildbot.changes.gitpoller import GitPoller
from buildbot.plugins import util, schedulers
from buildbot.process.factory import BuildFactory
from buildbot.process.properties import Interpolate
from buildbot.process import results
from buildbot.steps.source.git import Git
from buildbot.steps.shell import ShellCommand
from buildbot.steps.transfer import FileDownload
from buildbot_ros_cfg.helpers import success
def ros_sysbuild(c, job_name, rosdistro, machines,
source=True, locks=[]):
# Create a Job for system test job_name = sys_name
project_name = '_'.join([job_name, rosdistro, 'system_build'])
c['schedulers'].append(
schedulers.SingleBranchScheduler(
name=project_name,
builderNames=[project_name, ],
change_filter=util.ChangeFilter(category=project_name)
)
)
c['schedulers'].append(
schedulers.Nightly(
name = project_name+'-nightly-master',
builderNames = [project_name,],
hour=4,
minute=0,
)
)
# Directory which will be bind-mounted
binddir = '/tmp'
rosinstall_url = "https://raw.githubusercontent.com/ipa-rwu/scalable_system_setup/master/config/" + job_name + ".rosinstall"
f = BuildFactory()
# Remove any old crud in /tmp folder
f.addStep(
ShellCommand(
name='rm src',
command=['rm', '-rf', 'scalable_ws'],
hideStepIf=success,
workdir=Interpolate('%(prop:builddir)s/build/')
)
)
# wstool init src .rosinstall
f.addStep(
ShellCommand(
haltOnFailure=True,
name='wstool_rosintall',
command=['wstool', 'init', 'src', rosinstall_url],
hideStepIf=success,
workdir=Interpolate('%(prop:builddir)s/build/scalable_ws')
)
)
# Download Dockerfile_sys.py script from master
f.addStep(
FileDownload(
name=job_name + '-grab-script',
mastersrc='docker_components/Dockerfile_sys',
workerdest=('Dockerfile_sys'),
hideStepIf=success
)
)
# Download docker-compose-sys.py script from master
f.addStep(
FileDownload(
name=job_name + '-grab-script',
mastersrc='docker_components/docker-compose-sys.yaml',
workerdest=('docker-compose-sys.yaml'),
hideStepIf=success
)
)
f.addStep(
FileDownload(
name=job_name + '-grab-script',
mastersrc='docker_components/rosdep_private.yaml',
workerdest=('rosdep_private.yaml'),
hideStepIf=success
)
)
f.addStep(
FileDownload(
name=job_name + '-grab-script',
mastersrc='scripts/docker-container.py',
workerdest=('docker-container.py'),
hideStepIf=success
)
)
f.addStep(
FileDownload(
name=job_name + '-grab-script',
mastersrc='shell/uplode_docker_image.sh',
workerdest=('upload_docker_image.sh'),
hideStepIf=success
)
)
f.addStep(
FileDownload(
name=job_name + '-grab-script',
mastersrc='scripts/unique_docker_sys.py',
workerdest=('unique_docker_sys.py'),
mode=0o755,
hideStepIf=success
)
)
f.addStep(
FileDownload(
name=job_name + '-grab-script',
mastersrc='shell/test_sys.sh',
workerdest=('test_sys.sh'),
mode=0o755,
hideStepIf=success
)
)
# reedit docker-compose-deb.yaml
f.addStep(
ShellCommand(
haltOnFailure=True,
name=job_name + '-reedit-docker-compose',
command=['python','unique_docker_sys.py', 'docker-compose-sys.yaml',
Interpolate(job_name)],
workdir=Interpolate('%(prop:builddir)s/build/'),
descriptionDone=['reedit docker-compose', job_name]
)
)
# Build docker image for creating debian
f.addStep(
ShellCommand(
haltOnFailure = True,
name = job_name + '-create_docker_image',
command=['docker-compose', '-f','docker-compose-sys.yaml',
'build'],
workdir=Interpolate('%(prop:builddir)s/build/'),
descriptionDone=['sourcedeb', job_name]
)
)
# Make and run tests in a docker container
f.addStep(
ShellCommand(
name=job_name + '-test_system',
command=['docker', 'run', '--name=' + project_name,
'scalable-sys:' + job_name,
'bash','/usr/local/sbin/test_sys.sh'],
descriptionDone=['make and test', job_name]
)
)
f.addStep(
ShellCommand(
name=job_name + '-upload_docker_image',
command=['bash', 'upload_docker_image.sh', project_name, binddir, job_name],
descriptionDone=['upload_docker_image', job_name],
workdir=Interpolate('%(prop:builddir)s/build/')
)
)
f.addStep(
ShellCommand(
name=job_name + '-rm_container',
command=['docker', 'rm', project_name],
descriptionDone=['remove docker container', job_name]
)
)
f.addStep(
ShellCommand(
name=job_name + '-rm_image',
command=['docker', 'image', 'rm', 'scalable-sys:' + job_name],
descriptionDone=['remove docker image', job_name]
)
)
c['builders'].append(
BuilderConfig(
name=project_name,
workernames=machines,
factory=f,
locks=locks
)
)
# return the name of the job created
return project_name
## @brief ShellCommand w/overloaded evaluateCommand so that tests can be Warn
|
python
|
import numpy as np
# from crf import Sample, CRF
from crf_bin import Sample, CRFBin
trainCorpus = "/home/laboratory/github/homeWork/machineTranslation/data/train.txt"
testCorpus = "/home/laboratory/github/homeWork/machineTranslation/data/test.txt"
labelTableverse = {}
print("cache label...")
with open("/home/laboratory/github/homeWork/machineTranslation/data/label.txt", 'r') as f:
index = 0
for line in f:
for items in line.strip().replace(" ", " ").split(" "):
if items == "":
continue
word_label = items.split("/")
if "]" in word_label[1]:
word_label[1] = word_label[1].split("]")[0]
if word_label[1] not in labelTableverse:
Sample.LabelTable[index] = word_label[1]
labelTableverse[Sample.LabelTable[index]] = index
index += 1
Sample.LabelSize = len(Sample.LabelTable)
# with open("/home/laboratory/corpus/cn_vectorTable/cn_vectors_50.txt", 'r') as f:
# for line in f:
# items = line.strip().split(" ")
# Sample.WordsTable[items[0]] = np.array([float(elem) for elem in items[1:]], dtype=np.float)
def load(filename):
data = []
maxLen = 0
with open(filename, 'r') as f:
for line in f:
wordSeq = []
labels = []
for items in line.strip().replace(" ", " ").split(" "):
if items == "":
continue
word_label = items.split("/")
if "]" in word_label[1]:
word_label[1] = word_label[1].split("]")[0]
wordSeq.append(word_label[0])
#print(line)
try:
labels.append(labelTableverse[word_label[1]])
except:
print(word_label[1])
seqLength = len(wordSeq)
if seqLength > maxLen:
maxLen = seqLength
data.append(Sample(wordSeq, labels))
return data, maxLen
Sample.DictLength = len(Sample.WordsTable)
print("load train data...")
train, maxLen = load("/home/laboratory/github/homeWork/machineTranslation/data/train.txt")
print("load test data...")
test, _ = load("/home/laboratory/github/homeWork/machineTranslation/data/test.txt")
nodeFeatureSize = Sample.LabelSize * Sample.DictLength
edgeDeatureSize = Sample.LabelSize * Sample.LabelSize * Sample.DictLength
crf = CRFBin(nodeFeatureSize, edgeDeatureSize, len(labelTableverse))
print("training...")
#0.01
crf.SGA(train[0:30], iterations=1000, a0=1, validate=None)
print("sample...")
labels = crf.Sample(train[1])
print("prediction: " + str(labels))
print("true : " + str(train[1].Labels))
labelStrs = []
labels = crf.Sample(test[0])
print("prediction: " + str(labels))
print("true : " + str(test[0].Labels))
labelStrs = []
labels = crf.Sample(test[1])
print("prediction: " + str(labels))
print("true : " + str(test[1].Labels))
labelStrs = []
labels = crf.Sample(test[2])
print("prediction: " + str(labels))
print("true : " + str(test[2].Labels))
|
python
|
# architecture.py ---
#
# Filename: architecture.py
# Description: defines the architecture of the 3DSmoothNet
# Author: Zan Gojcic, Caifa Zhou
#
# Project: 3DSmoothNet https://github.com/zgojcic/3DSmoothNet
# Created: 04.04.2019
# Version: 1.0
# Copyright (C)
# IGP @ ETHZ
# Code:
# Import python dependencies
import tensorflow as tf
import numpy as np
# Import custom functions
from core import ops
def network_architecture(x_anc,x_pos, dropout_rate, config, reuse=False):
# Join the 3DSmoothNet structure with the desired output dimension
net_structure = [1, 32, 32, 64, 64, 128, 128]
outputDim = config.output_dim
channels = [item for sublist in [net_structure, [outputDim]] for item in sublist]
# In the third layer stride is 2
stride = np.ones(len(channels))
stride[2] = 2
# Apply dropout in the 6th layer
dropout_flag = np.zeros(len(channels))
dropout_flag[5] = 1
# Initalize data
input_anc = x_anc
input_pos = x_pos
layer_index = 0
# Loop over the desired layers
with tf.name_scope('3DIM_cnn') as scope:
for layer in np.arange(0, len(channels)-2):
scope_name = "3DIM_cnn" + str(layer_index+1)
with tf.name_scope(scope_name) as inner_scope:
input_anc, input_pos = conv_block(input_anc, input_pos, [channels[layer], channels[layer + 1]],
dropout_flag[layer], dropout_rate, layer_index,
stride_input=stride[layer], reuse=reuse)
layer_index += 1
with tf.name_scope('3DIM_cnn7') as inner_scope:
input_anc, input_pos = out_block(input_anc, input_pos, [channels[-2], channels[-1]],
layer_index, reuse=reuse)
return ops.l2_normalize(input_anc), \
ops.l2_normalize(input_pos)
def conv_block(input_anc, input_pos, channels, dropout_flag, dropout_rate, laxer_idx, stride_input=1, k_size=3,
padding_type='SAME', reuse=False):
# Traditional 3D conv layer followed by batch norm and relu activation
i_size = input_anc.get_shape().as_list()[-2]/stride_input
weights = ops.weight([k_size, k_size, k_size, channels[0], channels[1]],
layer_name='wcnn' + str(laxer_idx+1), reuse=reuse)
bias = ops.bias([i_size, i_size, i_size, channels[1]], layer_name='bcnn' + str(laxer_idx+1),reuse=reuse)
conv_output_anc = tf.add(ops.conv3d(input_anc, weights, stride=[stride_input,stride_input, stride_input], padding=padding_type),bias)
conv_output_pos = tf.add(ops.conv3d(input_pos, weights, stride=[stride_input, stride_input, stride_input], padding=padding_type),bias)
conv_output_anc = ops.batch_norm(conv_output_anc)
conv_output_pos = ops.batch_norm(conv_output_pos)
conv_output_anc = ops.relu(conv_output_anc)
conv_output_pos = ops.relu(conv_output_pos)
if dropout_flag:
conv_output_anc = ops.dropout(conv_output_anc, dropout_rate=dropout_rate)
conv_output_pos = ops.dropout(conv_output_pos, dropout_rate=dropout_rate)
return conv_output_anc, conv_output_pos
def out_block(input_anc, input_pos, channels, laxer_idx, stride_input=1, k_size=8, padding_type = 'VALID', reuse=False):
# Last conv layer, flatten the output
weights = ops.weight([k_size, k_size, k_size, channels[0], channels[1]],
layer_name='wcnn' + str(laxer_idx+1), reuse=reuse)
bias = ops.bias([1, 1, 1, channels[1]], layer_name='bcnn' + str(laxer_idx + 1),reuse=reuse)
conv_output_anc = tf.add(ops.conv3d(input_anc, weights, stride=[stride_input,stride_input, stride_input], padding=padding_type), bias)
conv_output_pos = tf.add(ops.conv3d(input_pos, weights, stride=[stride_input, stride_input, stride_input], padding=padding_type), bias)
conv_output_anc = ops.batch_norm(conv_output_anc)
conv_output_pos = ops.batch_norm(conv_output_pos)
conv_output_anc = tf.contrib.layers.flatten(conv_output_anc)
conv_output_pos = tf.contrib.layers.flatten(conv_output_pos)
return conv_output_anc, conv_output_pos
|
python
|
from django.db import models
from django.contrib.auth.models import User
class UserProfile(models.Model):
""" Extension of User model """
user = models.OneToOneField(User,
on_delete=models.CASCADE,
primary_key=True)
# if vacation mode is set to true, task streaks will not be reset
vacation = models.BooleanField(default=False)
|
python
|
# -*- coding: utf-8 -*-
"""DigitRecognizer.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/149_-4guTkO2Vzex8bjA402nPcJ_Z7G7W
#***Digit Recognizer(MNIST) dataset using CNN***
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import zipfile
zip=zipfile.ZipFile('train.csv (2).zip')
zip.extractall()
df=pd.read_csv('train.csv')
df=df.dropna(how='any')
df
df['label'].unique()
y = df["label"]
X = df.drop(labels=["label"],axis=1)
X
y
X = X / 255.0
X
X = X.values.reshape(-1,28,28,1)
X
from tensorflow.keras.utils import to_categorical
y = to_categorical(y, 10)
y
from sklearn.model_selection import train_test_split
X_train, X_val, Y_train, Y_val = train_test_split(X, y, test_size = 0.25, random_state=42)
print("x_train shape",X_train.shape)
print("x_val shape",X_val.shape)
print("y_train shape",Y_train.shape)
print("y_val shape",Y_val.shape)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Dropout
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import SGD
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=(28, 28, 1)))
model.add(BatchNormalization())
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(100, activation='relu', kernel_initializer='he_uniform'))
model.add(BatchNormalization())
model.add(Dense(10, activation='softmax'))
opt = SGD(lr=0.01, momentum=0.9)
model.compile(optimizer=opt,loss='categorical_crossentropy',metrics=['accuracy'])
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=5,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(X_train)
model.fit(datagen.flow(X_train,Y_train, batch_size=20), validation_data = (X_val,Y_val),epochs = 1, steps_per_epoch=X_train.shape[0] )
model.save('DR.h5')
print("Accuracy of the model is --> " , model.evaluate(X_val, Y_val, batch_size=20)[1]*100 , "%")
print("Loss of the model is --> " , model.evaluate(X_val, Y_val, batch_size=20)[0])
"""#***Testing the model***"""
zip=zipfile.ZipFile('test.csv.zip')
zip.extractall()
from tensorflow.keras.models import load_model
model=load_model('DR.h5')
print("loaded model from disk")
test=pd.read_csv('test.csv')
test=test.dropna(how='any')
test
X_test = test / 255.0
X_test = X_test.values.reshape(-1,28,28,1)
X_test
labels = model.predict(X_test)
labels = np.argmax(labels, axis=1)
len(labels)
ss = pd.read_csv("sample_submission.csv")
ss.head()
index = test.index.values + 1
data = {'ImageId' : index, "Label" : labels}
df = pd.DataFrame(data=data)
df.head()
df.to_csv('submission.csv', index=False)
df
final = pd.DataFrame({'ImageId' : index, "Label" : labels.astype(int).ravel()})
final.to_csv("final_submission.csv",index = False)
|
python
|
# See LICENSE file for full copyright and licensing details.
#from . import models
|
python
|
import numpy as np
import utils
# class Piece:
# def __init__(self,color = 0, pos=(-1,-1)):
# self.color = color
# self.pos = pos
class Player:
def __init__(self, collected_flag=np.zeros(9), collected_shape_dict={}, collected_num=0):
self.collected_flag = np.zeros((9,2)) # 收集过的点数及是否使用
self.collected_shape_dict = {} # 收集到的形状
self.collect_num = 0 # 收集了几种点数,4种即胜利
def new_remove(self, position_list, size):
self.collected_flag[size, 0] = 1
self.collected_shape_dict[size] = position_list
self.collect_num += 1
def new_convert(self):
pass
class State:
def __init__(self):
self.turn = 0
self.board_state = np.zeros((3,3)) # board_state element: 1=black, -1=white, 0=none
self.shape_area_selected_index = - 1 # -1 = no select, 0 = first
self.convert_shape_selected = -1 # -1 = no select, 2 = selected shape size = 2
self.player1 = Player() # Black
self.player2 = Player() # White
self.result = 0 # 0 = Going, 1 = Black win, 2 = White win
def update_board(self, position, operation):
# operation 1 = move, 3 = remove, 2 = convert
player = self.player1 if self.turn % 2 == 0 else self.player2
# if operation == 1 and self.convert_shape_selected == -1: # move or convert
# if self.board_state[position] == 0:
# self.board_state[position] = 1 if self.turn % 2 == 0 else -1
# self.next_turn()
if operation == 1 and self.board_state[position] == 0: # move
self.board_state[position] = 1 if self.turn % 2 == 0 else -1
self.next_turn()
elif operation == 1 and self.convert_shape_selected != -1: # convert
selected_shape_size = self.convert_shape_selected
if player.collected_flag[selected_shape_size, 1] == 0: # unused
position_list = self.search_fit_shape(position, selected_shape_size)
if position_list != []:
print("Convert shape")
for pos in position_list:
self.board_state[pos] *= -1
player.collected_flag[selected_shape_size, 1] = 1 # mark as used
self.next_turn()
else: print("This shape has been used")
elif operation == 3: # remove
#if self.board_state[position] == 1 and self.turn % 2 == 0:
position_list, num_list = self.shape_find(position,pos_list=[],flag=np.zeros((3,3)), num=[0])
size = num_list.pop()
print('Remove detect: ',position_list, size)
if size > 1:
if player.collected_flag[size, 0] == 0:
player.new_remove(position_list, size)
# print(self.player1.collected_flag[size,0])
for pos in position_list:
self.board_state[pos] = 0
self.next_turn()
elif operation == 2: # wheel click
pass
def check_result(self):
player = player = self.player1 if self.turn % 2 == 0 else self.player2
chess_color = 1 if self.turn % 2 == 0 else -1
black_count = 0
white_count = 0
# result 0 = Going, 1 = black win, 2 = white win
if self.player1.collect_num >= 4:
self.result = 1
return 1 # white win by collecting 4 shapes
elif self.player2.collect_num >= 4:
self.result = 2
return 2 # black win by collecting 4 shapes
if self.turn > 1:
for i in range(3):
for j in range(3):
if self.board_state[i,j] == 1: black_count+=1
elif self.board_state[i,j] == -1: white_count+=1
if black_count == 0:
self.result = 2
return 2 # white win by no black pieces existing
elif white_count == 0:
self.result = 1
return 1 # black win by no white pieces existing
trapped_dead_flag = 0 # Check trapped dead
if black_count+white_count == 9:
trapped_dead_flag = 1
for i in range(3): # check whether can remove or not
#if trapped_dead_flag == 1:
for j in range(3):
if self.board_state[i, j] == chess_color:
position = (i, j)
position_list, num_list = self.shape_find(position, pos_list=[], flag=np.zeros((3, 3)), num=[0])
size = num_list.pop()
# print('Remove detect: ', position_list, size)
if size > 1:
if player.collected_flag[size, 0] == 0:
print("Find fit shape to remove")
trapped_dead_flag = 0 # can do remove
break
if trapped_dead_flag == 0: break
if trapped_dead_flag == 1:
for flag_index in range(9): # check whether can convert or not
if trapped_dead_flag == 1 and \
player.collected_flag[flag_index, 0] == 1 \
and player.collected_flag[flag_index, 1] == 0:
for i in range(3):
# if trapped_dead_flag == 1:
for j in range(3):
selected_shape_size = flag_index
position = (i, j)
position_list = self.search_fit_shape(position, selected_shape_size)
if position_list != []:
print("Find fit shape to convert")
trapped_dead_flag = 0 # can do convert
break
if trapped_dead_flag == 0: break
if trapped_dead_flag == 0: break
if trapped_dead_flag == 1:
self.result = 2 if self.turn % 2 == 0 else 1
# print("Trapped dead!")
return self.result
return self.result
def shape_find(self,position,pos_list,flag,num):
position_X, position_Y = position
if position_X < 0 or position_X > 2 or position_Y < 0 or position_Y > 2:
return
if (self.board_state[position] == 1 and self.turn % 2 == 0)\
or (self.board_state[position] == -1 and self.turn % 2) == 1:
if flag[position] == 0:
pos_list.append(position)
flag[position] = 1
num.append(num.pop() + 1)
self.shape_find(position=(position_X - 1,position_Y), \
pos_list=pos_list, flag=flag, num=num)
self.shape_find(position=(position_X + 1, position_Y), \
pos_list=pos_list, flag=flag, num=num)
self.shape_find(position=(position_X, position_Y - 1), \
pos_list=pos_list, flag=flag, num=num)
self.shape_find(position=(position_X, position_Y + 1), \
pos_list=pos_list, flag=flag, num=num)
return pos_list,num
def search_fit_shape(self, target_position, selected_shape_size):
player = self.player1 if self.turn % 2 == 0 else self.player2
# selected_shape_size = self.convert_shape_selected
shape = player.collected_shape_dict[selected_shape_size]
# print("Shape:", shape)
rel_XY_list = [] # 存储以第一个点为基准的相对距离
pos0 = shape[0]
#pos0_X, pos0_Y = pos0
for pos in shape:
#pos_X, pos_Y = pos
#rel_XY = (pos_X-pos0_X, pos_Y-pos0_Y)
rel_XY = utils.minus_tuple(pos,pos0)
rel_XY_list.append(rel_XY)
# print("rel_XY_list: ", rel_XY_list)
chess_color = 1 if self.turn % 2 == 0 else -1
position_list = []
for i in range(3):
for j in range(3):
#if self.board_state[(i,j)] == chess_color:
fit_color_count = 0
position_list = []
click_fit_flag = 0
for rel_XY in rel_XY_list:
check_X, check_Y = utils.add_tuple((i,j), rel_XY)
if check_X<0 or check_X>2 or check_Y<0 or check_Y>2:
position_list = []
break
if self.board_state[check_X, check_Y] != -chess_color:
position_list = []
break
# print("fit_color_count: ", fit_color_count)
position_list.append((check_X, check_Y))
fit_color_count += 1
if target_position == (check_X, check_Y): click_fit_flag = 1
if fit_color_count == selected_shape_size:
if click_fit_flag == 1:
# print("Find fit shape")
return position_list
else:
position_list = []
break
return position_list
def next_turn(self):
self.turn += 1
def update_convert_shape_selected(self, area_index, player_id):
self.shape_area_selected_index = area_index
if area_index == -1:
self.convert_shape_selected = -1
return True
shape_count = 0
player = self.player1 if player_id == 1 else self.player2
for i in range(9): # convert area_index to convert_shape_selected
if player.collected_flag[i, 0] == 1:
shape_count += 1
if shape_count > area_index:
self.convert_shape_selected = i
return True
self.convert_shape_selected = -1
return False
|
python
|
"""Integration with nest devices."""
import json
import logging
import sys
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from appengine import account, device, rest
@device.register('nest_thermostat')
class NestThermostat(device.Device):
"""Class represents a Nest thermostat."""
temperature = ndb.FloatProperty()
humidity = ndb.FloatProperty()
target_temperature = ndb.FloatProperty()
def get_categories(self):
return ['CLIMATE']
def handle_event(self, event):
self.account = event['account']
self.humidity = event['humidity']
self.temperature = event['ambient_temperature_c']
self.device_name = event['name_long']
self.target_temperature = event['target_temperature_c']
@device.register('nest_protect')
class NestProtect(device.Device):
"""Class represents a Nest protect (smoke alarm)."""
def get_categories(self):
return ['CLIMATE']
def handle_event(self, event):
self.account = event['account']
self.device_name = event['name_long']
@account.register('nest')
class NestAccount(account.Account):
"""Class represents a Nest account."""
AUTH_URL = ('https://home.nest.com/login/oauth2?'
'client_id=%(client_id)s&state=%(state)s')
ACCESS_TOKEN_URL = ('https://api.home.nest.com/oauth2/access_token?'
'client_id=%(client_id)s&code=%(auth_code)s&'
'client_secret=%(client_secret)s&'
'grant_type=authorization_code')
API_URL = 'https://developer-api.nest.com/devices.json?auth=%(access_token)s'
STRUCTURES_URL = ('https://developer-api.nest.com/structures.json'
'?auth=%(access_token)s')
SINGLE_STRUCTURE_URL = ('https://developer-api.nest.com/structures/'
'%(id)s?auth=%(access_token)s')
def __init__(self, *args, **kwargs):
super(NestAccount, self).__init__(*args, **kwargs)
# pylint: disable=invalid-name
from common import creds
self.CLIENT_ID = creds.NEST_CLIENT_ID
self.CLIENT_SECRET = creds.NEST_CLIENT_SECRET
def get_human_type(self):
return 'Nest'
def set_away(self, value):
"""Set away status of all structures."""
structures = self.do_request(self.STRUCTURES_URL)
logging.info(structures)
value = 'away' if value else 'home'
for structure_id in structures.iterkeys():
url = self.SINGLE_STRUCTURE_URL % {'id': structure_id,
'access_token': self.access_token}
request_data = json.dumps({'away': value})
logging.info('Sending request "%s" to %s', request_data, url)
try:
self.do_request(
url, payload=request_data,
method=urlfetch.PUT)
except:
logging.error('Setting Away on nest failed', exc_info=sys.exc_info())
@rest.command
def refresh_devices(self):
if self.access_token is None:
logging.info('No access token, skipping.')
return
result = self.do_request(self.API_URL)
logging.info(result)
events = []
if 'smoke_co_alarms' in result:
for protect_id, protect_info in result['smoke_co_alarms'].iteritems():
protect_info['account'] = self.key.string_id()
events.append({
'device_type': 'nest_protect',
'device_id': 'nest-protect-%s' % protect_id,
'event': protect_info,
})
if 'thermostats' in result:
for thermostat_id, thermostat_info in result['thermostats'].iteritems():
thermostat_info['account'] = self.key.string_id()
events.append({
'device_type': 'nest_thermostat',
'device_id': 'nest-thermostat-%s' % thermostat_id,
'event': thermostat_info,
})
device.process_events(events)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
def run_tests(**options):
import django
import sys
from django.conf import settings
from ginger.conf.settings import template_settings
defaults = dict(
MIDDLEWARE_CLASSES=(
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
),
TEMPLATE_DEBUG = True,
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
},
TEMPLATES = template_settings(dirs=[os.path.join(os.path.dirname(__file__), 'test_templates'),])
)
defaults.update(options)
settings.configure(**defaults)
django.setup()
# Run the test suite, including the extra validation tests.
from django.test.utils import get_runner
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=1, interactive=False, failfast=False)
failures = test_runner.run_tests(sys.argv[1:] or settings.INSTALLED_APPS)
return failures
urlpatterns = []
ROOT_URLCONF = 'runtests'
INSTALLED_APPS = [
# 'django_nose',
'ginger',
'ginger.contrib.staging',
]
def main():
failures = run_tests(
INSTALLED_APPS=INSTALLED_APPS,
ROOT_URLCONF=ROOT_URLCONF,
# TEST_RUNNER='django_nose.NoseTestSuiteRunner'
)
sys.exit(failures)
if __name__ == "__main__":
main()
|
python
|
from __future__ import print_function
import os
from os.path import dirname, join, abspath, isdir
import sys
import json
import requests
try:
from urlparse import urljoin
except:
from urllib.parse import urljoin # python 3.x
# http://python-redmine.readthedocs.org/
from redmine import Redmine
if __name__=='__main__':
SRC_ROOT = dirname(dirname(abspath(__file__)))
sys.path.append(SRC_ROOT)
from datetime import datetime
from utils.msg_util import *
class RedmineIssueDownloader:
"""
For a given Redmine project, download the issues in JSON format
"""
#TIME_FORMAT_STRING = '%Y-%m%d-%H%M'
TIME_FORMAT_STRING = '%Y-%m%d'
# Redmine tickets are written to JSON files with the naming convention "(issue id).json"
# For file sorting, preceding zeros are tacked on.
# Example: If ZERO_PADDING_LEVEL=5:
# issue #375 is written to file "00375.json"
# issue #1789 is written to file "01789.json"
# issue #2 is written to file "00002.json"
#
# If your issue numbers go beyond 99,999 then increase the ZERO_PADDING_LEVEL
#
ZERO_PADDING_LEVEL = 5
def __init__(self, redmine_server, redmine_api_key, project_name_or_identifier, issues_base_directory, **kwargs):
"""
Constructor
:param redmine_server: str giving the url of the redmine server. e.g. https://redmine.myorg.edu/
:param redmine_api_key: str with a redmine api key
:param project_name_or_identifier: str or int with either the redmine project id or project identifier
:param issues_base_directory: str, directory to download the redmine issues in JSON format. Directory will be crated
:param specific_tickets_to_download: optional, list of specific ticket numbers to download. e.g. [2215, 2216, etc]
"""
self.redmine_server = redmine_server
self.redmine_api_key = redmine_api_key
self.project_name_or_identifier = project_name_or_identifier
self.issues_base_directory = issues_base_directory
self.issue_status = kwargs.get('issue_status', '*') # values 'open', 'closed', '*'
self.specific_tickets_to_download = kwargs.get('specific_tickets_to_download', None)
self.redmine_conn = None
self.redmine_project = None
self.issue_dirname = join(self.issues_base_directory\
, datetime.today().strftime(RedmineIssueDownloader.TIME_FORMAT_STRING)\
)
self.setup()
def setup(self):
self.connect_to_redmine()
if not isdir(self.issue_dirname):
os.makedirs(self.issue_dirname)
msgt('Directory created: %s' % self.issue_dirname)
def connect_to_redmine(self):
self.redmine_conn = Redmine(self.redmine_server, key=self.redmine_api_key)
self.redmine_project = self.redmine_conn.project.get(self.project_name_or_identifier)
msg('Connected to server [%s] project [%s]' % (self.redmine_server, self.project_name_or_identifier))
def get_issue_count(self):
msgt('get_issue_count')
issue_query_str = 'issues.json?project_id=%s&limit=1&status_id=%s' \
% (self.project_name_or_identifier, self.issue_status)
url = urljoin(self.redmine_server, issue_query_str)
msg('Issue count url: %s' % url)
# Note: Auth purposely uses the API KEY "as a username with a random password via HTTP Basic authentication"
# from: http://www.redmine.org/projects/redmine/wiki/Rest_api
#
auth = (self.redmine_api_key, 'random-pw')
r = requests.get(url, auth=auth)
if not r.status_code == 200:
msgt('Error!')
msg(r.text)
raise Exception("Request for issue count failed! Status code: %s\nUrl: %s\nAuth:%s" % (r.status_code, url, auth))
msg('Convert result to JSON')
try:
data = r.json() # Let it blow up
except:
msgt('Error!')
msg('Data from request (as text): %s' % r.text)
raise Exception('Failed to convert issue count data to JSON.\nUrl: %s\nAuth:%s" % (url, auth)')
if not data.has_key('total_count'):
msgx('Total count not found in data: \n[%s]' % data)
return data['total_count']
"""
from __future__ import print_function
import requests
project_id = 'dvn'
redmine_api_key = 'some-key'
url = 'https://redmine.hmdc.harvard.edu/issues.json?project_id=%s&limit=1' % project_id
#---------------------
# Alternative 1
#---------------------
auth = (redmine_api_key, 'random-pw')
r = requests.get(url, auth=auth)
print (r.text)
print (r.status_code)
data = r.json()
print (data['total_count'])
#---------------------
# Alternative 2
#---------------------
url2 = '%s&key=%s' % (url, redmine_api_key)
r = requests.get(url2)
print (r.text)
print (r.status_code)
data = r.json()
print (data['total_count'])
"""
def write_issue_list(self, issue_fname, issue_dict):
if issue_fname is None or not type(issue_dict) == dict:
msgx('ERROR: write_issue_list, issue_fname is None or issue_dict not dict')
return
fh = open(issue_fname, 'w')
fh.write(json.dumps(issue_dict))
fh.close()
msg('file updated: %s' % issue_fname)
def show_project_info(self):
msg(self.redmine_project._attributes)
def download_tickets2(self):
"""
fyi: Retrieving total count via regular api, not python redmine package
"""
issue_dict = {}
issue_fname = join(self.issue_dirname, 'issue_list.json')
msg('Gathering issue information.... (may take a minute)')
ticket_cnt = self.get_issue_count()
RECORD_RETRIEVAL_SIZE = 100
num_loops = ticket_cnt / RECORD_RETRIEVAL_SIZE
extra_recs = ticket_cnt % RECORD_RETRIEVAL_SIZE
if extra_recs > 0:
num_loops+=1
#num_loops=3
msg('num_loops: %d' % num_loops)
msg('extra_recs: %d' % extra_recs)
cnt = 0
for loop_num in range(0, num_loops):
start_record = loop_num * RECORD_RETRIEVAL_SIZE
end_record = (loop_num+1) * RECORD_RETRIEVAL_SIZE
msgt('Retrieve records via idx (skip last): %s - %s' % (start_record, end_record))
# limit of 100 is returning 125
rec_cnt = 0
for item in self.redmine_conn.issue.filter(project_id=self.project_name_or_identifier, status_id=self.issue_status, sort='id', offset=start_record)[:RECORD_RETRIEVAL_SIZE]: #, limit=RECORD_RETRIEVAL_SIZE): #[start_record:end_record]
rec_cnt +=1
cnt +=1
msg('(%s) %s - %s' % (rec_cnt, item.id, item.subject))
if self.specific_tickets_to_download is not None:
# only download specific tickets
#
if item.id in self.specific_tickets_to_download:
self.save_single_issue(item)
issue_dict[self.pad_issue_id(item.id)] = item.subject
continue # go to next item
else:
# Get all tickets
#
self.save_single_issue(item)
issue_dict[self.pad_issue_id(item.id)] = item.subject
if rec_cnt == RECORD_RETRIEVAL_SIZE:
break
#continue
#self.save_single_issue(item)
self.write_issue_list(issue_fname, issue_dict)
def pad_issue_id(self, issue_id):
if issue_id is None:
msgx('ERROR. pad_issue_id. The "issue_id" is None')
return ('%s' % issue_id).zfill(self.ZERO_PADDING_LEVEL)
def save_single_issue(self, single_issue):
"""
Write a single issue object to a file using JSON format
:param single_issue: Issue object
"""
if single_issue is None:
msgx('ERROR. download_single_issue. The "single_issue" is None')
## FIX: Expensive adjustment -- to pull out full relation and journal info
json_str = self.get_single_issue(single_issue.id) # another call to redmine
#json_str = json.dumps(single_issue._attributes, indent=4)
fullpath = join(self.issue_dirname, self.pad_issue_id(single_issue.id) + '.json')
open(fullpath, 'w').write(json_str)
msg('Ticket retrieved: %s' % fullpath)
def process_files(self, issues_dirname=None):
if issues_dirname is None:
issues_dirname = self.issue_dirname
tracker_info = []
status_info = []
priority_info = []
fnames = [x for x in os.listdir(issues_dirname) if x.endswith('.json')]
for fname in fnames:
content = open(join(issues_dirname, fname), 'rU').read()
d = json.loads(content)
# Tracker Info
tracker = d.get('tracker', None)
if tracker:
tracker_str = '%s|%s' % (tracker['id'], tracker['name'])
if not tracker_str in tracker_info:
tracker_info.append(tracker_str)
# Status Info
status = d.get('status', None)
if status:
status_str = '%s|%s' % (status['id'], status['name'])
if not status_str in status_info:
status_info.append(status_str)
# Priority Info
priority = d.get('priority', None)
if priority:
priority_str = '%s|%s' % (priority['id'], priority['name'])
if not priority_str in priority_info:
priority_info.append(priority_str)
#print d.keys()
msg(tracker_info)
msg(status_info)
msg(priority_info)
def get_single_issue(self, issue_id):
"""
Download a single issue
:param ticket_id: int of issue id in redmine
:returns: json string with issue information
"""
# test using .issue.get
issue = self.redmine_conn.issue.get(issue_id, include='children,attachments,journals,watchers,relations')
json_str = json.dumps(issue._attributes, indent=4)
msg('Issue retrieved: %s' % issue_id)
return json_str
if __name__=='__main__':
from settings.base import REDMINE_SERVER, REDMINE_PROJECT_ID, REDMINE_API_KEY, REDMINE_ISSUES_DIRECTORY
#rn = RedmineIssueDownloader(REDMINE_SERVER, REDMINE_API_KEY, 'dvn', REDMINE_ISSUES_DIRECTORY)
#Only import some specific tickets
#kwargs = dict(specific_tickets_to_download=[1371, 1399, 1843, 2214, 2215, 2216, 3362, 3387, 3397, 3400, 3232, 3271, 3305, 3426, 3425, 3313, 3208])
rn = RedmineIssueDownloader(REDMINE_SERVER, REDMINE_API_KEY, REDMINE_PROJECT_ID, REDMINE_ISSUES_DIRECTORY)
rn.download_tickets2()
msg(rn.get_issue_count())
#rn.show_project_info()
#rn.process_files()
#msg(rn.get_single_issue(3232))
"""
import json
c = open('issue_list2.txt', 'rU').read()
d = json.loads(c)
print(len(d))
"""
|
python
|
import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
vertices = (
# ( x, y, z)
( 1, -1, -1), # A
( 1, 1, -1), # B
(-1, 1, -1), # C
(-1, -1, -1), # D
( 1, -1, 1), # E
( 1, 1, 1), # F
(-1, -1, 1), # G
(-1, 1, 1) # H
)
edges = (
(0, 1),
(0, 3),
(0, 4),
(2, 1),
(2, 3),
(2, 7),
(6, 3),
(6, 4),
(6, 7),
(5, 1),
(5, 4),
(5, 7),
# (7, 1),
# (5, 2),
# (1, 4),
# (5, 0),
# (7, 4),
# (6, 5),
# (6, 2),
# (7, 3),
# (2, 0),
# (1, 3),
# (6, 0),
# (3, 4),
# (7, 0),
# (5, 3),
# (2, 4),
# (1, 6)
)
surfaces = (
(0, 1, 2, 3),
(3, 2, 7, 6),
(6, 7, 5, 4),
(4, 5, 1, 0),
(1, 5, 7, 2),
(4, 0, 3, 6)
)
color = (
(1, 0, 0),
(0, 1, 0),
(0, 0, 0),
(1, 1, 1),
(0, 1, 1),
(0, 1, 1),
(1, 0, 0),
(0, 1, 0),
(0, 0, 1),
(0, 1, 0),
(0, 0, 1),
(0, 1, 0),
)
def Cube():
glBegin(GL_QUADS)
for surface in surfaces:
x = 0
glColor3fv((1, 0, 0))
for vertex in surface:
x += 1
glColor3fv(color[x])
glVertex3fv(vertices[vertex])
glEnd()
glBegin(GL_LINES)
glColor3fv((0, 0.9, 0))
for edge in edges:
for vertex in edge:
glVertex3fv(vertices[vertex])
glEnd()
def main():
pygame.init()
screen = pygame.display.set_mode((800, 600), DOUBLEBUF | OPENGL)
gluPerspective(45, (800 / 600), 0.1, 50)
glTranslatef(0, 0, -5)
glRotatef(0, 0, 0, 0)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
glRotatef(1, 1, 1, 1)
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
Cube()
pygame.display.flip()
pygame.time.wait(10)
main()
|
python
|
#!/usr/bin/env python
import argparse
import math
from collections import Counter
from spawningtool.parser import parse_replay
def map_replays(filenames, map_fn, results, update_fn, cache_dir=None, **kwargs):
for filename in filenames:
filename = filename.rstrip('\n')
replay = parse_replay(filename, cache_dir=cache_dir)
map_fn(replay, results, update_fn, **kwargs)
def check_supplies(replay, results, update_fn, condition=None, min_time=None, max_time=None):
if len(replay['players']) != 2:
return
players = list(replay['players'].values())
player_1 = players[0]
player_2 = players[1]
if player_1['is_winner'] == player_2['is_winner']:
return
is_counting_player_1 = not condition or condition(replay, player_1, player_2)
is_counting_player_2 = not condition or condition(replay, player_2, player_1)
if is_counting_player_1 or is_counting_player_2:
if min_time is None:
min_time = 0
min_time = int(min_time) * 16
if max_time is None:
max_time = 10000
max_time = int(max_time) * 16
player_2_supply = player_2['supply']
supply_len = len(player_2_supply)
for i, data in enumerate(player_1['supply']):
if i >= supply_len or min_time > data[0]:
continue
if max_time < data[0]: # we're not going down at this point
return
diff = data[1] - player_2_supply[i][1]
if is_counting_player_1:
update_fn(results, player_1, player_2, data[1], player_2_supply[i][1], data[0])
if is_counting_player_2:
update_fn(results, player_2, player_1, player_2_supply[i][1], data[1], data[0])
def count_win_rate_by_supply_difference(filenames, condition=None, cache_dir=None,
min_time=None, max_time=None):
results = {
'num_games_counter': Counter(),
'num_wins_counter': Counter(),
}
def update_fn(results, player_1, player_2, supply_1, supply_2, frames):
diff = supply_1 - supply_2
results['num_games_counter'][diff] += 1
if player_1['is_winner']:
results['num_wins_counter'][diff] += 1
map_replays(filenames, check_supplies, results, update_fn, cache_dir, condition=condition,
min_time=min_time, max_time=max_time)
return [ [str(i),
str(results['num_wins_counter'][i]),
str(results['num_games_counter'][i])]
for i in range(-100, 100)]
def count_win_rate_by_supply_ratio(filenames, condition=None, cache_dir=None,
min_time=None, max_time=None):
results = {
'num_games_counter': Counter(),
'num_wins_counter': Counter(),
}
def update_fn(results, player_1, player_2, supply_1, supply_2, frames):
if supply_1 == 0 or supply_2 == 0:
return
result = int(round(math.log(1.0 * supply_1 / supply_2) * 50))
results['num_games_counter'][result] += 1
if player_1['is_winner']:
results['num_wins_counter'][result] += 1
map_replays(filenames, check_supplies, results, update_fn, cache_dir, condition=condition,
min_time=min_time, max_time=max_time)
return [ [str(i),
str(results['num_wins_counter'][i]),
str(results['num_games_counter'][i])]
for i in range(-100, 100)]
def count_win_rate_by_time_supply_difference(filenames, condition=None, cache_dir=None,
min_time=None, max_time=None):
results = {
'num_games_counter': Counter(),
'num_wins_counter': Counter(),
}
def update_fn(results, player_1, player_2, supply_1, supply_2, frames):
diff = supply_1 - supply_2
minute = frames / (16 * 60)
index = (diff, minute)
results['num_games_counter'][index] += 1
if player_1['is_winner']:
results['num_wins_counter'][index] += 1
map_replays(filenames, check_supplies, results, update_fn, cache_dir, condition=condition,
min_time=min_time, max_time=max_time)
return [ [str(index[0]), str(index[1]),
str(results['num_wins_counter'][index]),
str(results['num_games_counter'][index])]
for index in list(results['num_wins_counter'].keys())
if results['num_games_counter'][index] > 30]
RACE_BY_LETTER = {'z': 'Zerg', 't': 'Terran', 'p': 'Protoss'}
OBJECTIVES = {
'supply_difference': count_win_rate_by_supply_difference,
'supply_ratio': count_win_rate_by_supply_ratio,
'supply_time_difference': count_win_rate_by_time_supply_difference,
}
def main():
"""
Execute spawningtool
"""
parser = argparse.ArgumentParser()
parser.add_argument('filenames', help='text file with a list of SC2Replay paths')
parser.add_argument('--cache-dir', help='Directory to cache results in')
parser.add_argument('--matchup', help='Which matchup to check')
parser.add_argument('--objective', help='What is being calculated')
parser.add_argument('--min-time', help='lower bound to count')
parser.add_argument('--max-time', help='upper bound to count')
args = parser.parse_args()
condition = None
if args.matchup and len(args.matchup) == 3:
player_1_race = RACE_BY_LETTER[args.matchup[0].lower()]
player_2_race = RACE_BY_LETTER[args.matchup[2].lower()]
condition = lambda replay, player_1, player_2: \
player_1['race'] == player_1_race and player_2['race'] == player_2_race
objective_fn = OBJECTIVES.get(args.objective, count_win_rate_by_supply_difference)
with open(args.filenames, 'r') as fin:
result = objective_fn(fin, condition=condition, cache_dir=args.cache_dir,
min_time=args.min_time, max_time=args.max_time)
for row in result:
print(','.join([str(val) for val in row]))
if __name__ == '__main__':
main()
|
python
|
import numpy as np
class Exploration_Noise:
def reset(self):
"""
Reset the noise generator.
"""
pass
def process_action(self, a):
"""
Add noise to the given action.
Args:
a: the action to be processed
Return:
the processed action
"""
raise NotImplementedError
class OUNoise_Exploration:
"""
The OU noise.
"""
def __init__(self, action_dimension, mu=0, theta=0.15, init_epsilon=0.2, final_epsilon=0.01, explore_len=100000):
self.action_dimension = action_dimension
self.mu = mu
self.theta = theta
self.sigma = init_epsilon
self.final_sigma = final_epsilon
self.sigma_decay = (init_epsilon - final_epsilon) / explore_len
self.state = np.ones(self.action_dimension) * self.mu
self.extra_info = ['epsilon']
self.reset()
def reset(self):
self.state = np.ones(self.action_dimension) * self.mu
def process_action(self, a):
processed_a = a + self.noise()
if self.sigma > self.final_sigma:
self.sigma -= self.sigma_decay
return processed_a, {'epsilon': self.sigma}
def noise(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))
self.state = x + dx
return self.state
class NoNoise_Exploration:
def __init__(self):
self.extra_info = []
def process_action(self, a):
return np.argmax(a), {}
def reset(self):
pass
class EpsilonGreedy_Exploration:
"""
The epsilon greedy noise.
"""
def __init__(self, action_n, init_epsilon, final_epsilon, explore_len):
self.epsilon = init_epsilon
self.epsilon_decay = (init_epsilon - final_epsilon) / explore_len
self.final_epsilon = final_epsilon
self.extra_info = ['epsilon']
self.n = action_n
def process_action(self, a):
if np.random.rand() < self.epsilon:
new_a = np.random.randint(0, self.n)
else:
new_a = np.argmax(a)
if self.epsilon > self.final_epsilon:
self.epsilon -= self.epsilon_decay
return new_a, {'epsilon': self.epsilon}
def reset(self):
pass
class Boltzmann_Exploration:
"""
The Boltzmann noise.
"""
def __init__(self, action_n, init_epsilon, final_epsilon, explore_len):
self.epsilon = init_epsilon
self.epsilon_decay = (init_epsilon - final_epsilon) / explore_len
self.final_epsilon = final_epsilon
self.extra_info = ['epsilon']
self.n = action_n
def process_action(self, a):
probs = np.exp(a / self.epsilon)
new_a = np.argmax(np.random.multinomial(1, probs))
if self.epsilon > self.final_epsilon:
self.epsilon -= self.epsilon_decay
return new_a, {'epsilon': self.epsilon}
def reset(self):
pass
|
python
|
from scrapy.http import Request
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
class HackernewsSpider(BaseSpider):
name = 'hackernews'
allowed_domains = []
start_urls = ['http://news.ycombinator.com']
def parse(self, response):
if 'news.ycombinator.com' in response.url:
hxs = HtmlXPathSelector(response)
titles = sites = hxs.select('//td[@class="title"]//a/text()')
for title in titles:
print title.extract()
|
python
|
import sys
sys.path.insert(0, '/home/vagrant/Integration-Testing-Framework/sikuli/examples')
from sikuli import *
from test_helper import TestHelper
from flex_regions import *
from Regionplus import *
# Setup
helper = TestHelper("drag_column")
set_flex_helper(helper)
# Opening
#############
helper.Click("Lexicon.png", "Couldn't find 'Lexicon' button")
LEFT_SIDEBAR.Click("LexiconEdit.png", "Couldn't find 'Lexicon Edit' button")
# Goal
#############
# Not doing the drag-drop directly, so if it fails we can
# pinpoint what wasn't found
glosses = MID_TOOLBAR.Find("3losses.png", "Couldn't find 'Glosses' column header")
target = MID_TOOLBAR.Find("lLexemeJorm_.png",
"'Headword' and 'Lexeme Form' headers not where expected")
dragDrop(glosses, target)
# Check that it's in the new position
helper.Find(Pattern("hexemeformHe.png").similar(0.80), "'Glosses' column not in new position")
# Closing
##############
helper.write_success()
# Drag it back to previous position
glosses = MID_TOOLBAR.Find("3losses.png", "Couldn't find 'Glosses' column header",
restart=True)
target = MID_TOOLBAR.Find("LexemeFormIG.png",
"'Lexeme Form' and 'Grammatical Info' headers not where expected",
restart=True)
dragDrop(glosses, target)
|
python
|
# Copyright 2018 Datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
from collections import deque
from subprocess import DEVNULL, PIPE, Popen
from threading import Lock, Thread
class BackgroundProcessCrash(Exception):
def __init__(self, message: str, details: str) -> None:
super().__init__(message)
self.details = details
class _Logger:
"""Logger that optionally captures what is logged"""
def __init__(
self,
write: typing.Callable[[str], None],
do_log: bool,
do_capture: bool,
max_capture: int,
):
self.write = write
self.do_log = do_log
self.do_capture = do_capture
self.finished = Lock()
if max_capture < 0:
capture = [] # type: typing.MutableSequence[str]
else:
capture = deque(maxlen=max_capture)
self.capture = capture # type: typing.MutableSequence[str]
self.finished.acquire()
def log(self, line: str) -> None:
if self.do_log:
self.write(line)
if self.do_capture:
self.capture.append(line)
def finish(self) -> None:
self.finished.release()
def get_captured(self) -> str:
self.finished.acquire() # Block until finish is called
return "".join(self.capture).strip()
def _launch_command(
args: typing.List[str],
out_logger: _Logger,
err_logger: _Logger,
done: typing.Optional[typing.Callable[[Popen], None]] = None,
**kwargs: typing.Any
) -> Popen:
"""
Launch subprocess with args, kwargs.
Log stdout and stderr by calling respective callbacks.
"""
def pump_stream(logger: _Logger, stream: typing.Iterable[str]) -> None:
"""Pump the stream"""
for line in stream:
logger.log(line)
logger.finish()
def joiner() -> None:
"""Wait for streams to finish, then call done callback"""
for th in threads:
th.join()
if done:
done(process)
kwargs = kwargs.copy()
in_data = kwargs.get("input")
if "input" in kwargs:
del kwargs["input"]
assert kwargs.get("stdin") is None, kwargs["stdin"]
kwargs["stdin"] = PIPE
elif "stdin" not in kwargs:
kwargs["stdin"] = DEVNULL
kwargs.setdefault("stdout", PIPE)
kwargs.setdefault("stderr", PIPE)
kwargs["universal_newlines"] = True # Text streams, not byte streams
process = Popen(args, **kwargs)
threads = []
if process.stdout:
thread = Thread(
target=pump_stream, args=(out_logger, process.stdout), daemon=True
)
thread.start()
threads.append(thread)
if process.stderr:
thread = Thread(
target=pump_stream, args=(err_logger, process.stderr), daemon=True
)
thread.start()
threads.append(thread)
if done and threads:
Thread(target=joiner, daemon=True).start()
if in_data:
process.stdin.write(str(in_data, "utf-8"))
process.stdin.close()
return process
|
python
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: yandex/cloud/logging/v1/log_ingestion_service.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
from yandex.cloud.logging.v1 import log_entry_pb2 as yandex_dot_cloud_dot_logging_dot_v1_dot_log__entry__pb2
from yandex.cloud.logging.v1 import log_resource_pb2 as yandex_dot_cloud_dot_logging_dot_v1_dot_log__resource__pb2
from yandex.cloud import validation_pb2 as yandex_dot_cloud_dot_validation__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='yandex/cloud/logging/v1/log_ingestion_service.proto',
package='yandex.cloud.logging.v1',
syntax='proto3',
serialized_options=b'\n\033yandex.cloud.api.logging.v1ZCgithub.com/yandex-cloud/go-genproto/yandex/cloud/logging/v1;logging',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n3yandex/cloud/logging/v1/log_ingestion_service.proto\x12\x17yandex.cloud.logging.v1\x1a\x17google/rpc/status.proto\x1a\'yandex/cloud/logging/v1/log_entry.proto\x1a*yandex/cloud/logging/v1/log_resource.proto\x1a\x1dyandex/cloud/validation.proto\"\x90\x02\n\x0cWriteRequest\x12?\n\x0b\x64\x65stination\x18\x01 \x01(\x0b\x32$.yandex.cloud.logging.v1.DestinationB\x04\xe8\xc7\x31\x01\x12;\n\x08resource\x18\x02 \x01(\x0b\x32).yandex.cloud.logging.v1.LogEntryResource\x12\x45\n\x07\x65ntries\x18\x03 \x03(\x0b\x32).yandex.cloud.logging.v1.IncomingLogEntryB\t\x82\xc8\x31\x05\x31-100\x12;\n\x08\x64\x65\x66\x61ults\x18\x04 \x01(\x0b\x32).yandex.cloud.logging.v1.LogEntryDefaults\"\x96\x01\n\rWriteResponse\x12\x42\n\x06\x65rrors\x18\x01 \x03(\x0b\x32\x32.yandex.cloud.logging.v1.WriteResponse.ErrorsEntry\x1a\x41\n\x0b\x45rrorsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x03\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status:\x02\x38\x01\x32m\n\x13LogIngestionService\x12V\n\x05Write\x12%.yandex.cloud.logging.v1.WriteRequest\x1a&.yandex.cloud.logging.v1.WriteResponseBb\n\x1byandex.cloud.api.logging.v1ZCgithub.com/yandex-cloud/go-genproto/yandex/cloud/logging/v1;loggingb\x06proto3'
,
dependencies=[google_dot_rpc_dot_status__pb2.DESCRIPTOR,yandex_dot_cloud_dot_logging_dot_v1_dot_log__entry__pb2.DESCRIPTOR,yandex_dot_cloud_dot_logging_dot_v1_dot_log__resource__pb2.DESCRIPTOR,yandex_dot_cloud_dot_validation__pb2.DESCRIPTOR,])
_WRITEREQUEST = _descriptor.Descriptor(
name='WriteRequest',
full_name='yandex.cloud.logging.v1.WriteRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='destination', full_name='yandex.cloud.logging.v1.WriteRequest.destination', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='resource', full_name='yandex.cloud.logging.v1.WriteRequest.resource', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='entries', full_name='yandex.cloud.logging.v1.WriteRequest.entries', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\202\3101\0051-100', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='defaults', full_name='yandex.cloud.logging.v1.WriteRequest.defaults', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=222,
serialized_end=494,
)
_WRITERESPONSE_ERRORSENTRY = _descriptor.Descriptor(
name='ErrorsEntry',
full_name='yandex.cloud.logging.v1.WriteResponse.ErrorsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='yandex.cloud.logging.v1.WriteResponse.ErrorsEntry.key', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='yandex.cloud.logging.v1.WriteResponse.ErrorsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=582,
serialized_end=647,
)
_WRITERESPONSE = _descriptor.Descriptor(
name='WriteResponse',
full_name='yandex.cloud.logging.v1.WriteResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='errors', full_name='yandex.cloud.logging.v1.WriteResponse.errors', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_WRITERESPONSE_ERRORSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=497,
serialized_end=647,
)
_WRITEREQUEST.fields_by_name['destination'].message_type = yandex_dot_cloud_dot_logging_dot_v1_dot_log__entry__pb2._DESTINATION
_WRITEREQUEST.fields_by_name['resource'].message_type = yandex_dot_cloud_dot_logging_dot_v1_dot_log__resource__pb2._LOGENTRYRESOURCE
_WRITEREQUEST.fields_by_name['entries'].message_type = yandex_dot_cloud_dot_logging_dot_v1_dot_log__entry__pb2._INCOMINGLOGENTRY
_WRITEREQUEST.fields_by_name['defaults'].message_type = yandex_dot_cloud_dot_logging_dot_v1_dot_log__entry__pb2._LOGENTRYDEFAULTS
_WRITERESPONSE_ERRORSENTRY.fields_by_name['value'].message_type = google_dot_rpc_dot_status__pb2._STATUS
_WRITERESPONSE_ERRORSENTRY.containing_type = _WRITERESPONSE
_WRITERESPONSE.fields_by_name['errors'].message_type = _WRITERESPONSE_ERRORSENTRY
DESCRIPTOR.message_types_by_name['WriteRequest'] = _WRITEREQUEST
DESCRIPTOR.message_types_by_name['WriteResponse'] = _WRITERESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
WriteRequest = _reflection.GeneratedProtocolMessageType('WriteRequest', (_message.Message,), {
'DESCRIPTOR' : _WRITEREQUEST,
'__module__' : 'yandex.cloud.logging.v1.log_ingestion_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.logging.v1.WriteRequest)
})
_sym_db.RegisterMessage(WriteRequest)
WriteResponse = _reflection.GeneratedProtocolMessageType('WriteResponse', (_message.Message,), {
'ErrorsEntry' : _reflection.GeneratedProtocolMessageType('ErrorsEntry', (_message.Message,), {
'DESCRIPTOR' : _WRITERESPONSE_ERRORSENTRY,
'__module__' : 'yandex.cloud.logging.v1.log_ingestion_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.logging.v1.WriteResponse.ErrorsEntry)
})
,
'DESCRIPTOR' : _WRITERESPONSE,
'__module__' : 'yandex.cloud.logging.v1.log_ingestion_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.logging.v1.WriteResponse)
})
_sym_db.RegisterMessage(WriteResponse)
_sym_db.RegisterMessage(WriteResponse.ErrorsEntry)
DESCRIPTOR._options = None
_WRITEREQUEST.fields_by_name['destination']._options = None
_WRITEREQUEST.fields_by_name['entries']._options = None
_WRITERESPONSE_ERRORSENTRY._options = None
_LOGINGESTIONSERVICE = _descriptor.ServiceDescriptor(
name='LogIngestionService',
full_name='yandex.cloud.logging.v1.LogIngestionService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=649,
serialized_end=758,
methods=[
_descriptor.MethodDescriptor(
name='Write',
full_name='yandex.cloud.logging.v1.LogIngestionService.Write',
index=0,
containing_service=None,
input_type=_WRITEREQUEST,
output_type=_WRITERESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_LOGINGESTIONSERVICE)
DESCRIPTOR.services_by_name['LogIngestionService'] = _LOGINGESTIONSERVICE
# @@protoc_insertion_point(module_scope)
|
python
|
"""
Tests of simple 2-layer PCBs
"""
from . import plotting_test_utils
import os
import mmap
import re
import logging
def expect_file_at(filename):
assert(os.path.isfile(filename))
def get_gerber_filename(board_name, layer_slug, ext='.gbr'):
return board_name + '-' + layer_slug + ext
def find_gerber_aperture(s, ap_desc):
m = re.search(r'%AD(.*)' + ap_desc + r'\*%', s)
if not m:
return None
return m.group(1)
def expect_gerber_has_apertures(gbr_data, ap_list):
aps = []
for ap in ap_list:
# find the circular aperture for the outline
ap_no = find_gerber_aperture(gbr_data, ap)
assert ap_no is not None
# apertures from D10 to D999
assert len(ap_no) in [2, 3]
aps.append(ap_no)
logging.debug("Found apertures {}".format(aps))
return aps
def expect_gerber_flash_at(gbr_data, pos):
"""
Check for a gerber flash at a given point
(it's hard to check that aperture is right without a real gerber parser
"""
repat = r'^X{x}Y{y}D03\*$'.format(
x=int(pos[0] * 100000),
y=int(pos[1] * 100000)
)
m = re.search(repat, gbr_data, re.MULTILINE)
assert(m)
logging.debug("Gerber flash found: " + repat)
def get_mmapped_data(filename):
with open(filename) as fo:
return mmap.mmap(fo.fileno(), 0, access=mmap.ACCESS_READ)
# content of test_sample.py
def test_2layer():
ctx = plotting_test_utils.KiPlotTestContext('simple_2layer')
ctx.load_yaml_config_file('simple_2layer.kiplot.yaml')
ctx.board_name = 'simple_2layer'
ctx.do_plot()
gbr_dir = ctx.cfg.resolve_output_dir_for_name('gerbers')
f_cu_gbr = os.path.join(gbr_dir,
get_gerber_filename(ctx.board_name, "F_Cu"))
expect_file_at(f_cu_gbr)
f_cu_data = get_mmapped_data(f_cu_gbr)
ap_ids = expect_gerber_has_apertures(f_cu_data, [
"C,0.200000",
"R,2.000000X2.000000",
"C,1.000000"])
# expect a flash for the square pad
expect_gerber_flash_at(f_cu_data, (140, -100))
ctx.clean_up()
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-01-09 07:00
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sass', '0003_auto_20190109_0659'),
]
operations = [
migrations.AlterField(
model_name='sitevisit',
name='additional_data',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, default={}, null=True),
),
]
|
python
|
"""
Fetch ads.txt files from URLs
"""
from datetime import datetime
import logging
import os
import pandas as pd
import requests
logger = logging.getLogger(__name__)
HEADERS = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36",
"Accept": "text/plain"
}
def fetch_adstxt_file(adstxt_url, timeout=5):
logger.info("fetching ads.txt file from %s", adstxt_url)
dt = datetime.now()
try:
response = requests.get(adstxt_url, headers=HEADERS, timeout=timeout)
result = {
"adstxt_url": adstxt_url,
"failed": False,
"dt": dt.isoformat(),
"exception": None,
"status_code": response.status_code,
"text": response.text,
}
except requests.exceptions.RequestException as oops:
result = {
"adstxt_url": adstxt_url,
"failed": True,
"dt": dt.isoformat(),
"exception": repr(oops),
"status_code": None,
"text": None,
}
return result
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
all_domains = []
file_path = "unreliable-news/data/fake-news-codex-2018-06.csv"
df1 = pd.read_csv(file_path)
all_domains.extend(df1["domain"].tolist())
built_with_file = "../data/All-Live-Ads.txt-Sites.csv"
df_built_with = pd.read_csv(built_with_file, skiprows=1)
all_domains.extend(df_built_with["Domain"].tolist())
adstxt_results = []
for domain in all_domains:
adstxt_url = "http://{}/ads.txt".format(domain)
adstxt_fetch_result = fetch_adstxt_file(adstxt_url)
adstxt_results.append(adstxt_fetch_result)
|
python
|
##
# Copyright 2021 IBM Corp. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
##
import torch
from ...._utils import val_clamp
from ..node import _NodeActivation
from ....constants import Direction
from ...parameters.neuron import _NeuronParameters
"""
Dynamic activation function
"""
class _NeuronActivation(_NodeActivation, _NeuronParameters):
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
self.Xf = 1 - self.alpha
self.Xt = self.alpha
self.Xmid = 0.5
def function(self, func: str, direction: Direction) -> torch.Tensor:
return getattr(self, "_" + func.lower())(direction)
@torch.no_grad()
def downward_conditional(
self, out_bounds: torch.Tensor, f_inv: torch.Tensor, input_terms: torch.Tensor
) -> torch.Tensor:
full_and_input = input_terms.sum(dim=-1)[..., None]
partial_and_input = (full_and_input - input_terms).flip([-2])
result = 1 + (
(f_inv[..., None] - self.bias + partial_and_input)
/ self.weights.clamp(min=0)
)
unknown = torch.ones_like(result)
unknown[..., 0, :] = 0
out_repeat = out_bounds[..., None].repeat_interleave(unknown.shape[-1], dim=-1)
result[..., 0, :] = result[..., 0, :].where(
out_repeat[..., 0, :] > 1 - self.alpha, unknown[..., 0, :]
)
result[..., 1, :] = result[..., 1, :].where(
out_repeat[..., 1, :] < self.alpha, unknown[..., 1, :]
)
weight_repeat = self.weights[None].repeat_interleave(2, dim=0)
result = result.where(weight_repeat != 0, unknown)
return val_clamp(result)
def _bidirectional(self, direction: Direction):
"""placeholder for the Bidirectional Neuron
This neuron currently uses an AND reformulation to execute
"""
pass
|
python
|
from PIL import Image
import pytesseract
def extractWords():
img1 = Image.open("Solution\images\img1.jpg")
img2 = Image.open("Solution\images\img2.jpg")
engText = pytesseract.image_to_string(img1, lang='eng')
araText = pytesseract.image_to_string(img2, lang='ara')
print(engText)
print(araText)
extractWords()
|
python
|
import lab12
def createSomePlanets():
aPlanet=lab12.Planet("Zorks",2000,30000,100000,5,['Steve','Lou','The Grinch'])
bPlanet=lab12.Planet("Zapps",1000,20000,200000,17,[])
print(aPlanet.getName() + " has a radius of " + str(aPlanet.getRadius()))
planetList=[aPlanet,bPlanet]
for planet in planetList:
print(planet.getName() + " has a mass of " + str(planet.getMass()))
for planet in planetList:
print(planet.getName() + " has " + str(planet.getMoons()) + " moons.")
print(bPlanet.getName() + " has a circumference of " + str(bPlanet.getCircumference()))
aPlanet.setMoons()
print(aPlanet.getName() + " has " + str(aPlanet.getMoons()) + " moons.")
aPlanet.addMoon()
print("The list of moons for " + aPlanet.getName() + " is " + str(aPlanet.getMoonList()))
##createSomePlanets()
def makeASentence():
aString = "The quick brown fox jumps over the lazy dog"
aSentence = lab12.Sentence(aString)
print(aSentence.getSentence())
print(aSentence.getWords())
print(aSentence.getLength())
print(aSentence.getNumWords())
print(aSentence.capSentence())
print(aSentence.addPunctuation())
##makeASentence()
def makeASentence2():
aString = "The quick brown fox jumps over the lazy dog"
aSentence = lab12.Sentence2(aString)
print(aSentence.getList())
print(aSentence.getSentence())
print(aSentence.getWords())
print(aSentence.getLength())
print(aSentence.getNumWords())
##makeASentence2()
def Time():
myTime = lab12.Time(15,25,59)
myTime2 = lab12.Time(6,8,0)
## myTime.getTime()
## myTime2.getTime()
## myTime.addSec()
## myTime2.subSec()
## myTime.getTime()
## myTime2.getTime()
## print(str(myTime.showHour()))
## print(str(myTime.showMinute()))
## print(str(myTime.showSecond()))
## myTime.changeHour()
## myTime.changeMinute()
## myTime.changeSecond()
## myTime.getTime()
## print(str(myTime.differenceInTime(myTime2)))
## myTime.addTime()
## myTime.getTime()
myTime.fullDay()
Time()
|
python
|
from .__init__ import *
def home(request):
return Http_Response("", "这是视频API", "")
def url(request):
# 获取视频URL
query = request_query(request, "id", ["res", {"resolution": 1080}])
query["ids"] = '["{}"]'.format(query.pop("id"))
data = send(query).POST("weapi/cloudvideo/playurl")
return Http_Response(request, data.text)
def detail(request):
# 获取视频详情
query = request_query(request, "id")
data = send(query).POST("weapi/cloudvideo/v1/video/detail")
return Http_Response(request, data.text)
def lists(request):
# 获取视频标签列表
data = send().POST("weapi/cloudvideo/group/list")
return Http_Response(request, data.text)
def group(request):
# 获取视频标签下的视频
query = request_query(request,
["id", "groupId"],
["offset", {"offset": 0}],
["res", {"resolution": 1080}])
query["needUrl"] = True
data = send(query).POST("weapi/videotimeline/videogroup/get")
return Http_Response(request, data.text)
def related(request):
# 相关视频
query = request_query(request, "id")
query["type"] = 0 if query["id"].isdigit() else 1
data = send(query).POST("weapi/cloudvideo/v1/allvideo/rcmd")
return Http_Response(request, data.text)
def sub(request):
# 收藏与取消收藏视频
query = request_query(request, "id", ["t", {"t": 1}])
t = "sub" if query.pop("t") == "1" else "unsub"
data = send(query).POST("weapi/cloudvideo/video/" + t)
return Http_Response(request, data.text)
|
python
|
#
# PySNMP MIB module BNET-ATM-TOPOLOGY-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/BNET-ATM-TOPOLOGY-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:40:07 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ConstraintsIntersection")
VpiInteger, = mibBuilder.importSymbols("LAN-EMULATION-CLIENT-MIB", "VpiInteger")
s5AtmTop, = mibBuilder.importSymbols("S5-ROOT-MIB", "s5AtmTop")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
TimeTicks, Gauge32, MibIdentifier, NotificationType, iso, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, Counter64, Bits, ModuleIdentity, Counter32, Unsigned32, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "Gauge32", "MibIdentifier", "NotificationType", "iso", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "Counter64", "Bits", "ModuleIdentity", "Counter32", "Unsigned32", "Integer32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
bnetAtmTopGbl = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 1, 6, 14, 1))
bnetAtmTopLinks = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 1, 6, 14, 2))
bnetAtmTopGblStatus = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 6, 14, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("topOn", 1), ("topOff", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: bnetAtmTopGblStatus.setStatus('mandatory')
if mibBuilder.loadTexts: bnetAtmTopGblStatus.setDescription('Indicates if the agent is participating in ATM topology discovery. The values are: topOn(1)....topology is on topOff(2)...topology is off When the value is topOff(2) the status of bnetAtmTopLinkTable entries is undefined and the topolgy discovery is disabled for the entire device. When the value is topOn(1) the status of the topology discovery of a given port is defined by the bnetAtmTopLinksPeerStatus in the bnetAtmTopLinkTable. The agent must save the setting of this object in nonvolatile memory (i.e., save across reboots).')
bnetAtmTopGblLstChg = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 6, 14, 1, 2), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bnetAtmTopGblLstChg.setStatus('mandatory')
if mibBuilder.loadTexts: bnetAtmTopGblLstChg.setDescription('The value of sysUpTime the last time an entry in the bnetAtmTopLinkTable was added, deleted, or modified. If bnetAtmTopLinkTable has not changed since cold/warm start of the agent, then the value is zero.')
bnetAtmTopGblCurNum = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 6, 14, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bnetAtmTopGblCurNum.setStatus('mandatory')
if mibBuilder.loadTexts: bnetAtmTopGblCurNum.setDescription('The current number of entries in the bnetAtmTopLinkTable.')
bnetAtmTopGblCurMibVer = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 6, 14, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bnetAtmTopGblCurMibVer.setStatus('mandatory')
if mibBuilder.loadTexts: bnetAtmTopGblCurMibVer.setDescription('The current version of the ATM Topology mib. This object is provided by agent so NMS can distinguish variations in the mib as they occur in the future.')
bnetAtmTopGblOperStatus = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 6, 14, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("topOn", 1), ("topOff", 2), ("topUnavailable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bnetAtmTopGblOperStatus.setStatus('mandatory')
if mibBuilder.loadTexts: bnetAtmTopGblOperStatus.setDescription('Indicates if the agent is participating in ATM topology discovery. The values are: topOn(1)....topology is on topOff(2)...topology is off When the value is topOff(2) the status of bnetAtmTopLinkTable entries is undefined and the topolgy discovery is disabled for the entire device. When the value is topOn(1) the status of the topology discovery of a given port is defined by the bnetAtmTopLinksPeerStatus in the bnetAtmTopLinkTable. The agent must save the setting of this object in nonvolatile memory (i.e., save across reboots). When status is topUnavailable(3), this implies that ilmi is not configured on this switch at all.')
bnetAtmTopLinksTable = MibTable((1, 3, 6, 1, 4, 1, 45, 1, 6, 14, 2, 1), )
if mibBuilder.loadTexts: bnetAtmTopLinksTable.setStatus('mandatory')
if mibBuilder.loadTexts: bnetAtmTopLinksTable.setDescription('A table of all physical and virtual links attached to this switch. This table is built by aggregating information from all ILMI or PNNI entities in the switch and adding local port information.')
bnetAtmTopLinksEntry = MibTableRow((1, 3, 6, 1, 4, 1, 45, 1, 6, 14, 2, 1, 1), ).setIndexNames((0, "BNET-ATM-TOPOLOGY-MIB", "bnetAtmTopLinksSlotNumIndx"), (0, "BNET-ATM-TOPOLOGY-MIB", "bnetAtmTopLinksPortNumIndx"), (0, "BNET-ATM-TOPOLOGY-MIB", "bnetAtmTopLinksLcnIndx"))
if mibBuilder.loadTexts: bnetAtmTopLinksEntry.setStatus('mandatory')
if mibBuilder.loadTexts: bnetAtmTopLinksEntry.setDescription('A row in the ATM topology table. Entries in the table can not be created or deleted via SNMP.')
bnetAtmTopLinksSlotNumIndx = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 6, 14, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bnetAtmTopLinksSlotNumIndx.setStatus('mandatory')
if mibBuilder.loadTexts: bnetAtmTopLinksSlotNumIndx.setDescription('Our slot number.')
bnetAtmTopLinksPortNumIndx = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 6, 14, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bnetAtmTopLinksPortNumIndx.setStatus('mandatory')
if mibBuilder.loadTexts: bnetAtmTopLinksPortNumIndx.setDescription('Our port number.')
bnetAtmTopLinksLcnIndx = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 6, 14, 2, 1, 1, 3), VpiInteger()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bnetAtmTopLinksLcnIndx.setStatus('mandatory')
if mibBuilder.loadTexts: bnetAtmTopLinksLcnIndx.setDescription(' Logical Channel Number, This will give the VP, when the ILMI entity uses the VP signalling. This allows to have multiple NNI or UNI links on a single port.')
bnetAtmTopLinksTopoState = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 6, 14, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("unavailable", 1), ("notParticipating", 2), ("participating", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bnetAtmTopLinksTopoState.setStatus('mandatory')
if mibBuilder.loadTexts: bnetAtmTopLinksTopoState.setDescription('The current state of link from topology point of view. Possible values: unavailable(1) - third party device or older version of Bay device notParticipating(2) - peer has set TopoOff participating(3) - actively providing topo info. ')
bnetAtmTopLinksPeerSlotNum = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 6, 14, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bnetAtmTopLinksPeerSlotNum.setStatus('mandatory')
if mibBuilder.loadTexts: bnetAtmTopLinksPeerSlotNum.setDescription("The peer's slot number.")
bnetAtmTopLinksPeerPortNum = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 6, 14, 2, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bnetAtmTopLinksPeerPortNum.setStatus('mandatory')
if mibBuilder.loadTexts: bnetAtmTopLinksPeerPortNum.setDescription("The peer's port number.")
bnetAtmTopLinksPeerIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 6, 14, 2, 1, 1, 7), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bnetAtmTopLinksPeerIpAddr.setStatus('mandatory')
if mibBuilder.loadTexts: bnetAtmTopLinksPeerIpAddr.setDescription("The peer's IP address.")
bnetAtmTopLinksPeerChassisType = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 6, 14, 2, 1, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bnetAtmTopLinksPeerChassisType.setStatus('mandatory')
if mibBuilder.loadTexts: bnetAtmTopLinksPeerChassisType.setDescription('The chassis type of the device that sent the topology message. This number is the leaf of the s5 ChassisType OID registered in s5reg*.mib.')
bnetAtmTopLinksPeerChassisSubType = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 6, 14, 2, 1, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bnetAtmTopLinksPeerChassisSubType.setStatus('mandatory')
if mibBuilder.loadTexts: bnetAtmTopLinksPeerChassisSubType.setDescription('Any relavant additional information of the chassis of the peer Bay device. This is an integer field which could be used as backplane type or any information for stackable devices etc.')
bnetAtmTopLinksEosSize = MibScalar((1, 3, 6, 1, 4, 1, 45, 1, 6, 14, 2, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1440))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bnetAtmTopLinksEosSize.setStatus('mandatory')
if mibBuilder.loadTexts: bnetAtmTopLinksEosSize.setDescription('The current size of a row in the table bnetAtmTopLinksEosTable. This allows applications to be able to break apart rows in the table.')
bnetAtmTopLinksEosTable = MibTable((1, 3, 6, 1, 4, 1, 45, 1, 6, 14, 2, 3), )
if mibBuilder.loadTexts: bnetAtmTopLinksEosTable.setStatus('mandatory')
if mibBuilder.loadTexts: bnetAtmTopLinksEosTable.setDescription("A table of encoded octet strings of entries in table bnetAtmTopLinksTable. The number of entries is determined by packing of 'rows' from bnetAtmTopLinksTable.")
bnetAtmTopLinksEosEntry = MibTableRow((1, 3, 6, 1, 4, 1, 45, 1, 6, 14, 2, 3, 1), ).setIndexNames((0, "BNET-ATM-TOPOLOGY-MIB", "bnetAtmTopLinksSlotNumIndx"), (0, "BNET-ATM-TOPOLOGY-MIB", "bnetAtmTopLinksPortNumIndx"), (0, "BNET-ATM-TOPOLOGY-MIB", "bnetAtmTopLinksLcnIndx"))
if mibBuilder.loadTexts: bnetAtmTopLinksEosEntry.setStatus('mandatory')
if mibBuilder.loadTexts: bnetAtmTopLinksEosEntry.setDescription('A row in the table of encoded octet strings for table bnetAtmTopLinksTable. Instance returned in Response PDU: On GETs: instance specified is the instance returned. On GET-NEXTs: instance returned is the instance of the last record contained in the octet string. Entries in the table can not be created or deleted via SNMP.')
bnetAtmTopLinksEos = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 1, 6, 14, 2, 3, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 1400))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bnetAtmTopLinksEos.setStatus('mandatory')
if mibBuilder.loadTexts: bnetAtmTopLinksEos.setDescription("An encoded octet string. On reads it contains an integral number of 'rows' from table bnetAtmTopLinksTable. All 'rows' are the same size which is defined by object bnetAtmTopLinksEosSize.")
mibBuilder.exportSymbols("BNET-ATM-TOPOLOGY-MIB", bnetAtmTopGblStatus=bnetAtmTopGblStatus, bnetAtmTopLinksTable=bnetAtmTopLinksTable, bnetAtmTopLinksPeerPortNum=bnetAtmTopLinksPeerPortNum, bnetAtmTopGblOperStatus=bnetAtmTopGblOperStatus, bnetAtmTopLinksEosEntry=bnetAtmTopLinksEosEntry, bnetAtmTopLinksPeerSlotNum=bnetAtmTopLinksPeerSlotNum, bnetAtmTopGbl=bnetAtmTopGbl, bnetAtmTopGblLstChg=bnetAtmTopGblLstChg, bnetAtmTopLinks=bnetAtmTopLinks, bnetAtmTopLinksSlotNumIndx=bnetAtmTopLinksSlotNumIndx, bnetAtmTopLinksPeerIpAddr=bnetAtmTopLinksPeerIpAddr, bnetAtmTopLinksEosSize=bnetAtmTopLinksEosSize, bnetAtmTopLinksEos=bnetAtmTopLinksEos, bnetAtmTopLinksEosTable=bnetAtmTopLinksEosTable, bnetAtmTopLinksEntry=bnetAtmTopLinksEntry, bnetAtmTopLinksPeerChassisSubType=bnetAtmTopLinksPeerChassisSubType, bnetAtmTopGblCurNum=bnetAtmTopGblCurNum, bnetAtmTopLinksPortNumIndx=bnetAtmTopLinksPortNumIndx, bnetAtmTopLinksLcnIndx=bnetAtmTopLinksLcnIndx, bnetAtmTopLinksPeerChassisType=bnetAtmTopLinksPeerChassisType, bnetAtmTopLinksTopoState=bnetAtmTopLinksTopoState, bnetAtmTopGblCurMibVer=bnetAtmTopGblCurMibVer)
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Mar 11, 2012
@author: moloch
Copyright 2012 Root the Box
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import imghdr
import io
import xml.etree.cElementTree as ET
from os import urandom
from uuid import uuid4
from collections import OrderedDict
from sqlalchemy import Column, ForeignKey
from sqlalchemy.orm import relationship, backref
from sqlalchemy.types import Integer, Unicode, String, Boolean, Enum
from models import dbsession
from models.BaseModels import DatabaseObject
from models.Relationships import team_to_box
from models.IpAddress import IpAddress
from models.GameLevel import GameLevel
from models.Corporation import Corporation
from models.Category import Category
from models.SourceCode import SourceCode
from tornado.options import options
from libs.XSSImageCheck import is_xss_image, get_new_avatar
from libs.ValidationError import ValidationError
from libs.StringCoding import encode
from PIL import Image
from resizeimage import resizeimage
import enum
class FlagsSubmissionType(str, enum.Enum):
CLASSIC = "CLASSIC"
SINGLE_SUBMISSION_BOX = "SINGLE_SUBMISSION_BOX"
from builtins import ( # noqa: E402
str,
) # TODO Python2/3 compatibility issue if imported before FlagSubmissionType
class Box(DatabaseObject):
""" Box definition """
uuid = Column(String(36), unique=True, nullable=False, default=lambda: str(uuid4()))
corporation_id = Column(Integer, ForeignKey("corporation.id"), nullable=False)
category_id = Column(Integer, ForeignKey("category.id"), nullable=True)
_name = Column(Unicode(32), unique=True, nullable=False)
_operating_system = Column(Unicode(16))
_description = Column(Unicode(1024))
_capture_message = Column(Unicode(1024))
_difficulty = Column(Unicode(16))
game_level_id = Column(Integer, ForeignKey("game_level.id"), nullable=False)
_avatar = Column(String(64))
_value = Column(Integer, nullable=True)
_locked = Column(Boolean, default=False, nullable=False)
garbage = Column(
String(32),
unique=True,
nullable=False,
default=lambda: encode(urandom(16), "hex"),
)
teams = relationship(
"Team", secondary=team_to_box, back_populates="boxes", lazy="select"
)
hints = relationship(
"Hint",
backref=backref("box", lazy="select"),
cascade="all,delete,delete-orphan",
)
flags = relationship(
"Flag",
backref=backref("box", lazy="select"),
cascade="all,delete,delete-orphan",
order_by="desc(-Flag._order)",
)
flag_submission_type = Column(
Enum(FlagsSubmissionType), default=FlagsSubmissionType.CLASSIC
)
ip_addresses = relationship(
"IpAddress",
backref=backref("box", lazy="select"),
cascade="all,delete,delete-orphan",
)
@classmethod
def all(cls):
""" Returns a list of all objects in the database """
return dbsession.query(cls).all()
@classmethod
def by_id(cls, _id):
""" Returns a the object with id of _id """
return dbsession.query(cls).filter_by(id=_id).first()
@classmethod
def by_uuid(cls, _uuid):
""" Return and object based on a uuid """
return dbsession.query(cls).filter_by(uuid=str(_uuid)).first()
@classmethod
def by_name(cls, name):
""" Return the box object whose name is "name" """
return dbsession.query(cls).filter_by(_name=str(name)).first()
@classmethod
def by_category(cls, _cat_id):
""" Return the box object whose category is "_cat_id" """
return dbsession.query(cls).filter_by(category_id=int(_cat_id)).all()
@classmethod
def by_garbage(cls, _garbage):
return dbsession.query(cls).filter_by(garbage=_garbage).first()
@classmethod
def by_ip_address(cls, ip_addr):
"""
Returns a box object based on an ip address, supports both ipv4
and ipv6
"""
ip = IpAddress.by_address(ip_addr)
return ip.box if ip is not None else None
@classmethod
def flaglist(self, box_id=None):
flags = self.by_id(box_id).flags
flaglist = OrderedDict()
for flag in flags:
flaglist[flag.uuid] = flag.name
return flaglist
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if not 3 <= len(str(value)) <= 32:
raise ValidationError("Name must be 3 - 32 characters")
self._name = str(value)
@property
def operating_system(self):
return self._operating_system if self._operating_system else "?"
@operating_system.setter
def operating_system(self, value):
self._operating_system = str(value)
@property
def description(self):
if self._description is None:
self._description = ""
ls = []
if 0 < len(self._description):
text = self._description.replace("\r\n", "\n").strip()
ls.append("%s" % text)
else:
ls.append("No information on file.")
if self.difficulty != "Unknown":
ls.append("Reported Difficulty: %s" % self.difficulty)
if not encode(ls[-1], "utf-8").endswith(b"\n"):
ls[-1] = ls[-1] + "\n"
return str("\n\n".join(ls))
@description.setter
def description(self, value):
if value is None:
return ""
if 1025 < len(value):
raise ValidationError("Description cannot be greater than 1024 characters")
self._description = str(value)
@property
def difficulty(self):
return (
self._difficulty
if self._difficulty and len(self._difficulty)
else "Unknown"
)
@difficulty.setter
def difficulty(self, value):
if value is None:
return
if 17 < len(value):
raise ValidationError("Difficulty cannot be greater than 16 characters")
self._difficulty = str(value)
@property
def capture_message(self):
return self._capture_message if self._capture_message else ""
@capture_message.setter
def capture_message(self, value):
self._capture_message = str(value)
@property
def value(self):
if not self._value:
return 0
return self._value
@value.setter
def value(self, value):
try:
self._value = abs(int(value))
except ValueError:
raise ValidationError("Reward value must be an integer")
@property
def locked(self):
""" Determines if an admin has locked an box. """
return self._locked
@locked.setter
def locked(self, value):
""" Setter method for _lock """
assert isinstance(value, bool)
self._locked = value
@property
def avatar(self):
if self._avatar is not None:
return self._avatar
else:
avatar = get_new_avatar("box")
if not avatar.startswith("default_"):
self._avatar = avatar
dbsession.add(self)
dbsession.commit()
return avatar
@avatar.setter
def avatar(self, image_data):
if self.uuid is None:
self.uuid = str(uuid4())
if len(image_data) < (1024 * 1024):
ext = imghdr.what("", h=image_data)
if ext in ["png", "jpeg", "gif", "bmp"] and not is_xss_image(image_data):
try:
if self._avatar is not None and os.path.exists(
options.avatar_dir + "/upload/" + self._avatar
):
os.unlink(options.avatar_dir + "/upload/" + self._avatar)
file_path = str(
options.avatar_dir + "/upload/" + self.uuid + "." + ext
)
image = Image.open(io.BytesIO(image_data))
cover = resizeimage.resize_cover(image, [500, 250])
cover.save(file_path, image.format)
self._avatar = "upload/" + self.uuid + "." + ext
except Exception as e:
raise ValidationError(e)
else:
raise ValidationError(
"Invalid image format, avatar must be: .png .jpeg .gif or .bmp"
)
else:
raise ValidationError("The image is too large")
@property
def ipv4s(self):
""" Return a list of all ipv4 addresses """
return [ip for ip in self.ip_addresses if ip.version == 4]
@property
def ipv6s(self):
""" Return a list of all ipv6 addresses """
return [ip for ip in self.ip_addresses if ip.version == 6]
@property
def visable_ips(self):
return [ip for ip in self.ip_addresses if ip.visable is True]
@property
def source_code(self):
return SourceCode.by_box_id(self.id)
def get_garbage_cfg(self):
try:
hex_name = encode(self.name).hex()
except AttributeError:
hex_name = encode(self.name, "hex")
return "[Bot]\nname = %s\ngarbage = %s\n" % (hex_name, self.garbage)
def is_complete(self, user):
boxcomplete = True
for boxflag in self.flags:
if user.team and boxflag not in user.team.flags:
boxcomplete = False
break
return boxcomplete
def to_xml(self, parent):
""" Convert object to XML """
box_elem = ET.SubElement(parent, "box")
box_elem.set("gamelevel", "%s" % str(self.game_level.number))
ET.SubElement(box_elem, "name").text = self.name
ET.SubElement(box_elem, "operatingsystem").text = self._operating_system
ET.SubElement(box_elem, "description").text = self._description
ET.SubElement(box_elem, "capture_message").text = self.capture_message
ET.SubElement(box_elem, "value").text = str(self.value)
ET.SubElement(box_elem, "flag_submission_type").text = FlagsSubmissionType(
self.flag_submission_type
).name
ET.SubElement(box_elem, "difficulty").text = self._difficulty
ET.SubElement(box_elem, "garbage").text = str(self.garbage)
if self.category_id:
ET.SubElement(box_elem, "category").text = Category.by_id(
self.category_id
).category
flags_elem = ET.SubElement(box_elem, "flags")
flags_elem.set("count", "%s" % str(len(self.flags)))
for flag in self.flags:
flag.to_xml(flags_elem)
hints_elem = ET.SubElement(box_elem, "hints")
count = 0
for hint in self.hints:
if hint.flag_id is None:
hint.to_xml(hints_elem)
count += 1
hints_elem.set("count", "%s" % str(count))
ips_elem = ET.SubElement(box_elem, "ipaddresses")
ips_elem.set("count", "%s" % str(len(self.ip_addresses)))
for ip in self.ip_addresses:
ip.to_xml(ips_elem)
avatarfile = os.path.join(options.avatar_dir, self.avatar)
if self.avatar and os.path.isfile(avatarfile):
with open(avatarfile, mode="rb") as _avatar:
data = _avatar.read()
ET.SubElement(box_elem, "avatar").text = encode(data, "base64")
else:
ET.SubElement(box_elem, "avatar").text = "none"
def to_dict(self):
""" Returns editable data as a dictionary """
corp = Corporation.by_id(self.corporation_id)
game_level = GameLevel.by_id(self.game_level_id)
cat = Category.by_id(self.category_id)
if cat:
category = cat.uuid
else:
category = ""
return {
"name": self.name,
"uuid": self.uuid,
"corporation": corp.uuid,
"category": category,
"operating_system": self.operating_system,
"description": self._description,
"capture_message": self.capture_message,
"difficulty": self.difficulty,
"game_level": game_level.uuid,
"flag_submission_type": self.flag_submission_type,
"flaglist": self.flaglist(self.id),
"value": self.value,
}
def __repr__(self):
return "<Box - name: %s>" % (self.name,)
def __str__(self):
return self.name
|
python
|
# License: BSD 3 clause
from datetime import datetime
from typing import Dict, Iterable
from os.path import join
from pyspark.sql import DataFrame
from pyspark.sql.functions import col, datediff, floor, lit, months_between
from scalpel.core.io import get_logger, read_data_frame
from scalpel.core import io
from scalpel.core.util import data_frame_equality
from .util import fold_right
class Cohort(object):
"""A data representation that encapsulates a cohort. A cohort is a set of
subjects who experienced a common event in a selected time period.
Parameters
----------
name: `str`, name of the Cohort
characteristics: `str`, string characterising the cohort contents
subjects: `pyspark.sql.DataFrame`, DataFrame containing subject ids in the column
'PatientID' (str). It can also contain information regarding gender, birthdate and
deathdate of the subjects.
events: `pyspark.sql.DataFrame` or None, DataFrame containing events associated to
the subjects, it can be None.
# TODO: Add Schema of the two dataframes
"""
def __init__(
self,
name: str,
characteristics: str,
subjects: DataFrame,
events: DataFrame = None,
):
self._name = name
self._characteristics = characteristics
self._subjects = subjects
self._events = events
def __eq__(self, other):
if isinstance(other, Cohort):
if self.events is not None and other.events is not None:
return data_frame_equality(
self.subjects, other.subjects
) and data_frame_equality(self.events, other.events)
else:
return data_frame_equality(self.subjects, other.subjects)
else:
return False
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, value):
if not isinstance(value, str):
raise TypeError("Expected a string")
self._name = value
@property
def characteristics(self) -> str:
return self._characteristics
@characteristics.setter
def characteristics(self, value):
if not isinstance(value, str):
raise TypeError("Expected a string")
self._characteristics = value
@property
def subjects(self) -> DataFrame:
return self._subjects
@subjects.setter
def subjects(self, value):
if not isinstance(value, DataFrame):
raise TypeError("Expected a Spark DataFrame")
self._subjects = value
@property
def events(self) -> DataFrame:
return self._events
@events.setter
def events(self, value):
if not isinstance(value, DataFrame):
raise TypeError("Expected a Spark DataFrame")
self._events = value
def describe(self) -> str:
if self.events is None:
return (
"This a subject cohort, no event needed. "
+ "Subjects are from operation {}.".format(self.name)
)
else:
return "Events are {}. ".format(
self.name
) + "Events contain only {}.".format(self.characteristics)
def has_subject_information(self) -> bool:
"""Returns true if this cohort is the Base Cohort. The base population contains
extra columns specifically birthDate, deathDate and gender"""
return set(self.subjects.columns).issuperset(
{"gender", "patientID", "deathDate", "birthDate"}
)
def add_subject_information(
self, base_cohort: "Cohort", missing_patients="error"
) -> None:
"""
Add information of gender and birthDate to subjects in place.
WARNING: This methods results in a state mutation.
Parameters
----------
missing_patients: behaviour from when missing patients are detected.
possible values are "error" or "omit_all" to omit patients and their events
or "omit_patients" to omit events and keep their events.
cohort that contain information.
Returns
-------
None. Mutation in place.
"""
if missing_patients == "error":
subjects = self.subjects.join(
base_cohort.subjects, on="patientID", how="left"
)
extra_subjects_count = subjects.where(col("gender").isNull()).count()
if extra_subjects_count > 0:
raise ValueError(
"Cohort {} contains {} subjects not in base cohort {}".format(
self.name, extra_subjects_count, base_cohort.name
)
)
else:
self._subjects = subjects
elif missing_patients == "omit_all":
get_logger().warning("Some patients and their events might be ommited")
self._subjects = self.subjects.join(
base_cohort.subjects, on="patientID", how="inner"
)
if self.events is not None:
self._events = self.events.join(
self.subjects.select("patientID").distinct(),
on="patientID",
how="inner",
)
elif missing_patients == "omit":
get_logger().warning(
"Some patients might be ommited." + " Their events are kept"
)
self._subjects = self.subjects.join(
base_cohort.subjects, on="patientID", how="inner"
)
else:
raise ValueError(
"missing_patients is erroneous. Possible options are "
+ "error, omit, omit_all"
)
def add_age_information(self, date: datetime) -> None:
self._subjects = self.subjects.withColumn(
"age", floor(months_between(lit(date), col("birthDate")) / 12)
).withColumn("ageBucket", floor(col("age") / 5))
def is_duration_events(self) -> bool:
"""Returns true if the Events have a defined start and end for every line in
events dataframe.
Returns
-------
Boolean.
"""
if self.events is None:
return False
else:
return (
self.events.where(
self.events.end.isNull() | self.events.start.isNull()
).count()
== 0
)
def add_duration_information(self) -> "Cohort":
"""Adds a column with duration in days between start and end for the Events
DataFrame.
WARNING: This methods results in a state mutation.
Returns
-------
self with duration column added to events"""
if self.is_duration_events():
self._events = self.events.withColumn(
"duration", datediff(col("end"), col("start"))
)
return self
else:
raise ValueError(
"This Cohort is not a duration events cohort",
" please check is_duration_events method",
)
def save_cohort(self, output_path: str, mode="overwrite") -> Dict:
"""Saves current Cohort to "output_path/name".
Parameters
----------
output_path
root directory for output.
mode
Writing mode for parquet files.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``(default case): Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists``: Throw an exception if data already \
exists.
Returns
-------
A dict with entries to describe where the data is written and the type of the
Cohort.
"""
output = dict()
subjects_filepath = join(output_path, self.name, "subjects")
io.write_data_frame(self.subjects, subjects_filepath, mode)
if self.events is not None:
events_filtepath = join(output_path, self.name, "data")
io.write_data_frame(self.events, events_filtepath, mode)
output["population_path"] = subjects_filepath
output["output_path"] = events_filtepath
output["name"] = self.name
output["output_type"] = "events"
else:
output["output_path"] = subjects_filepath
output["name"] = self.name
output["output_type"] = "patients"
return output
def union(self, other: "Cohort") -> "Cohort":
return _union(self, other)
def intersection(self, other: "Cohort") -> "Cohort":
return _intersection(self, other)
def difference(self, other: "Cohort") -> "Cohort":
return _difference(self, other)
@staticmethod
def union_all(cohorts: Iterable["Cohort"]) -> "Cohort":
return fold_right(_union, cohorts)
@staticmethod
def intersect_all(cohorts: Iterable["Cohort"]) -> "Cohort":
return fold_right(_intersection, cohorts)
@staticmethod
def difference_all(cohorts: Iterable["Cohort"]) -> "Cohort":
return fold_right(_difference, cohorts)
@staticmethod
def load(input: Dict) -> "Cohort":
if input["output_type"] == "patients":
return Cohort(
input["name"], input["name"], read_data_frame(input["output_path"])
)
else:
return Cohort(
input["name"],
"subjects with event {}".format(input["name"]),
read_data_frame(input["population_path"]),
read_data_frame(input["output_path"]),
)
@staticmethod
def from_description(description: str) -> "Cohort":
raise NotImplementedError
def _union(a: Cohort, b: Cohort) -> Cohort:
if a.events is None or b.events is None:
return Cohort(
"{} Or {}".format(a.name, b.name),
"{} Or {}".format(a.characteristics, b.characteristics),
a.subjects.union(b.subjects),
)
else:
return Cohort(
"{} Or {}".format(a.name, b.name),
"{} Or {}".format(a.characteristics, b.characteristics),
a.subjects.union(b.subjects),
a.events.union(b.events),
)
def _intersection(a: Cohort, b: Cohort) -> Cohort:
subjects_id = a.subjects.select("patientID").intersect(
b.subjects.select("patientID")
)
subjects = a.subjects.join(subjects_id, on="patientID", how="right")
events = None
if a.events is not None:
events = a.events.join(subjects_id, on="patientID", how="right")
return Cohort(
a.name,
"{} with {}".format(a.characteristics, b.characteristics),
subjects,
events,
)
def _difference(a: Cohort, b: Cohort) -> Cohort:
subjects_id = a.subjects.select("patientID").subtract(
b.subjects.select("patientID")
)
subjects = a.subjects.join(subjects_id, on="patientID", how="right")
events = None
if a.events is not None:
events = a.events.join(subjects_id, on="patientID", how="right")
return Cohort(
a.name,
"{} without {}".format(a.characteristics, b.characteristics),
subjects,
events,
)
|
python
|
"""Bagging classifier trained on balanced bootstrap samples."""
# Authors: Guillaume Lemaitre <[email protected]>
# Christos Aridas
# License: MIT
import numbers
import numpy as np
from sklearn.base import clone
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
from ..pipeline import Pipeline
from ..under_sampling import RandomUnderSampler
from ..under_sampling.base import BaseUnderSampler
from ..utils import Substitution, check_target_type, check_sampling_strategy
from ..utils._docstring import _n_jobs_docstring
from ..utils._docstring import _random_state_docstring
from ..utils._validation import _deprecate_positional_args
@Substitution(
sampling_strategy=BaseUnderSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
random_state=_random_state_docstring,
)
class BalancedBaggingClassifier(BaggingClassifier):
"""A Bagging classifier with additional balancing.
This implementation of Bagging is similar to the scikit-learn
implementation. It includes an additional step to balance the training set
at fit time using a ``RandomUnderSampler``.
Read more in the :ref:`User Guide <bagging>`.
Parameters
----------
base_estimator : estimator object, default=None
The base estimator to fit on random subsets of the dataset.
If None, then the base estimator is a decision tree.
n_estimators : int, default=10
The number of base estimators in the ensemble.
max_samples : int or float, default=1.0
The number of samples to draw from X to train each base estimator.
- If int, then draw ``max_samples`` samples.
- If float, then draw ``max_samples * X.shape[0]`` samples.
max_features : int or float, default=1.0
The number of features to draw from X to train each base estimator.
- If int, then draw ``max_features`` features.
- If float, then draw ``max_features * X.shape[1]`` features.
bootstrap : bool, default=True
Whether samples are drawn with replacement.
bootstrap_features : bool, default=False
Whether features are drawn with replacement.
oob_score : bool, default=False
Whether to use out-of-bag samples to estimate
the generalization error.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit
a whole new ensemble.
{sampling_strategy}
replacement : bool, default=False
Whether or not to sample randomly with replacement or not.
{n_jobs}
{random_state}
verbose : int, default=0
Controls the verbosity of the building process.
Attributes
----------
base_estimator_ : estimator
The base estimator from which the ensemble is grown.
n_features_ : int
The number of features when `fit` is performed.
estimators_ : list of estimators
The collection of fitted base estimators.
estimators_samples_ : list of ndarray
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator. Each subset is defined by a boolean mask.
estimators_features_ : list of ndarray
The subset of drawn features for each base estimator.
classes_ : ndarray of shape (n_classes,)
The classes labels.
n_classes_ : int or list
The number of classes.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : ndarray of shape (n_samples, n_classes)
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
``oob_decision_function_`` might contain NaN.
See Also
--------
BalancedRandomForestClassifier : Random forest applying random-under
sampling to balance the different bootstraps.
EasyEnsembleClassifier : Ensemble of AdaBoost classifier trained on
balanced bootstraps.
RUSBoostClassifier : AdaBoost classifier were each bootstrap is balanced
using random-under sampling at each round of boosting.
Notes
-----
This is possible to turn this classifier into a balanced random forest [5]_
by passing a :class:`~sklearn.tree.DecisionTreeClassifier` with
`max_features='auto'` as a base estimator.
See
:ref:`sphx_glr_auto_examples_ensemble_plot_comparison_ensemble_classifier.py`.
References
----------
.. [1] L. Breiman, "Pasting small votes for classification in large
databases and on-line", Machine Learning, 36(1), 85-103, 1999.
.. [2] L. Breiman, "Bagging predictors", Machine Learning, 24(2), 123-140,
1996.
.. [3] T. Ho, "The random subspace method for constructing decision
forests", Pattern Analysis and Machine Intelligence, 20(8), 832-844,
1998.
.. [4] G. Louppe and P. Geurts, "Ensembles on Random Patches", Machine
Learning and Knowledge Discovery in Databases, 346-361, 2012.
.. [5] Chen, Chao, Andy Liaw, and Leo Breiman. "Using random forest to
learn imbalanced data." University of California, Berkeley 110,
2004.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.metrics import confusion_matrix
>>> from imblearn.ensemble import \
BalancedBaggingClassifier # doctest: +NORMALIZE_WHITESPACE
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,
... random_state=0)
>>> bbc = BalancedBaggingClassifier(random_state=42)
>>> bbc.fit(X_train, y_train) # doctest: +ELLIPSIS
BalancedBaggingClassifier(...)
>>> y_pred = bbc.predict(X_test)
>>> print(confusion_matrix(y_test, y_pred))
[[ 23 0]
[ 2 225]]
"""
@_deprecate_positional_args
def __init__(
self,
base_estimator=None,
n_estimators=10,
*,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
oob_score=False,
warm_start=False,
sampling_strategy="auto",
replacement=False,
n_jobs=None,
random_state=None,
verbose=0,
):
super().__init__(
base_estimator,
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
bootstrap=bootstrap,
bootstrap_features=bootstrap_features,
oob_score=oob_score,
warm_start=warm_start,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
)
self.sampling_strategy = sampling_strategy
self.replacement = replacement
def _validate_y(self, y):
y_encoded = super()._validate_y(y)
if isinstance(self.sampling_strategy, dict):
self._sampling_strategy = {
np.where(self.classes_ == key)[0][0]: value
for key, value in check_sampling_strategy(
self.sampling_strategy,
y,
"under-sampling",
).items()
}
else:
self._sampling_strategy = self.sampling_strategy
return y_encoded
def _validate_estimator(self, default=DecisionTreeClassifier()):
"""Check the estimator and the n_estimator attribute, set the
`base_estimator_` attribute."""
if not isinstance(self.n_estimators, (numbers.Integral, np.integer)):
raise ValueError(
f"n_estimators must be an integer, " f"got {type(self.n_estimators)}."
)
if self.n_estimators <= 0:
raise ValueError(
f"n_estimators must be greater than zero, " f"got {self.n_estimators}."
)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = clone(default)
self.base_estimator_ = Pipeline(
[
(
"sampler",
RandomUnderSampler(
sampling_strategy=self._sampling_strategy,
replacement=self.replacement,
),
),
("classifier", base_estimator),
]
)
def fit(self, X, y):
"""Build a Bagging ensemble of estimators from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples.
y : array-like of shape (n_samples,)
The target values.
Returns
-------
self : object
Returns self.
"""
check_target_type(y)
# RandomUnderSampler is not supporting sample_weight. We need to pass
# None.
return self._fit(X, y, self.max_samples, sample_weight=None)
def _more_tags(self):
tags = super()._more_tags()
tags_key = "_xfail_checks"
failing_test = "check_estimators_nan_inf"
reason = "Fails because the sampler removed infinity and NaN values"
if tags_key in tags:
tags[tags_key][failing_test] = reason
else:
tags[tags_key] = {failing_test: reason}
return tags
|
python
|
# import theano.sandbox.cuda
# theano.sandbox.cuda.use('gpu0')
import numpy as np
import cPickle as cP
import theano as TH
import theano.tensor as T
import scipy.misc as sm
import nnet.lasagnenetsCFCNN as LN
import lasagne as L
import datetime
def unpickle(file):
import cPickle
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
return dict
trainX1 = unpickle('./datasets/cifar/cifar-10-batches-py/data_batch_1')
trainX2 = unpickle('./datasets/cifar/cifar-10-batches-py/data_batch_2')
trainX3 = unpickle('./datasets/cifar/cifar-10-batches-py/data_batch_3')
trainX4 = unpickle('./datasets/cifar/cifar-10-batches-py/data_batch_4')
trainX5 = unpickle('./datasets/cifar/cifar-10-batches-py/data_batch_5')
testX = unpickle('./datasets/cifar/cifar-10-batches-py/test_batch')
labelname = unpickle('./datasets/cifar/cifar-10-batches-py/batches.meta')
trainX = np.vstack((trainX1['data'], trainX2['data'], trainX3[
'data'], trainX4['data'], trainX5['data']))
trainY = np.hstack((trainX1['labels'], trainX2['labels'], trainX3[
'labels'], trainX4['labels'], trainX5['labels']))
testX, testY = testX['data'], testX['labels']
testY = np.asarray(testY)
temp = np.zeros((trainY.shape[0], 10), dtype=np.float32)
for i in xrange(trainY.shape[0]):
temp[i][trainY[i]] = 1
trainY = temp
temp = np.zeros((testY.shape[0], 10), dtype=np.float32)
for i in xrange(testY.shape[0]):
temp[i][testY[i]] = 1
testY = temp
trainX = np.asarray(trainX / 256.0, dtype=np.float32)
testX = np.asarray(testX / 256.0, dtype=np.float32)
trainX = trainX.reshape(-1, 3, 32, 32)
testX = testX.reshape(-1, 3, 32, 32)
def main():
'''
Creates neural networks with various parameters and trains them.
'''
'''
n_out: output size
h_layer: hidden layer sizes
l_drops: dropout rates of hidden layers.
Set as None if dropout not to be used.
nonlinearity: activation function to be used.
lam: weight of the L2 regularizer.
Set as None if L2 regualizer not to be used.
clam: weight of VR regularizer.
Input size has been hardcoded to (3, 32, 32).
'''
####################################################
# VR + L2
nnet = LN.nnet(
n_out=10,
h_layers=[1000, 1000],
lam=500,
Temp=1,
nonlinearity=L.nonlinearities.rectify,
clam=20
)
nnet.train(x=trainX, y=trainY, testx=testX, testy=testY,
lrate=0.01, gamma=0.9, batch_size=100, iters=200,
thresh=70, filename='runs/Cifar_Vr_L2')
####################################################
# L2
nnet = LN.nnet(
n_out=10,
h_layers=[1000, 1000],
lam=500,
Temp=1,
nonlinearity=L.nonlinearities.rectify,
clam=None
)
nnet.train(x=trainX, y=trainY, testx=testX, testy=testY,
lrate=0.01, gamma=0.9, batch_size=100, iters=200,
thresh=70, filename='runs/Cifar_L2')
####################################################
# Vr + L2 + Dr
nnet = LN.nnet(
n_out=10,
h_layers=[1000, 1000],
l_drops=[0.3, 0.3],
lam=10,
Temp=1,
nonlinearity=L.nonlinearities.rectify,
clam=300
)
nnet.train(x=trainX, y=trainY, testx=testX, testy=testY,
lrate=0.005, gamma=0.9, batch_size=100, iters=200,
thresh=70, filename='runs/Cifar_Vr_L2_Dr')
####################################################
# L2 + Dr
nnet = LN.nnet(
n_out=10,
h_layers=[1000, 1000],
l_drops=[0.3, 0.3],
lam=10,
Temp=1,
nonlinearity=L.nonlinearities.rectify,
clam=None
)
nnet.train(x=trainX, y=trainY, testx=testX, testy=testY,
lrate=0.001, gamma=0.9, batch_size=100, iters=200,
thresh=70, filename='runs/Cifar_L2_Dr')
####################################################
# Dr
nnet = LN.nnet(
n_out=10,
h_layers=[1000, 1000],
l_drops=[0.3, 0.3],
lam=None,
Temp=1,
nonlinearity=L.nonlinearities.rectify,
clam=None
)
nnet.train(x=trainX, y=trainY, testx=testX, testy=testY,
lrate=0.001, gamma=0.9, batch_size=100, iters=200,
thresh=70, filename='runs/Cifar_Dr')
####################################################
# Vr + Dr
nnet = LN.nnet(
n_out=10,
h_layers=[1000, 1000],
l_drops=[0.3, 0.3],
lam=None,
Temp=1,
nonlinearity=L.nonlinearities.rectify,
clam=100
)
nnet.train(x=trainX, y=trainY, testx=testX, testy=testY,
lrate=0.005, gamma=0.9, batch_size=100, iters=200,
thresh=70, filename='runs/Cifar_Vr_Dr')
####################################################
if __name__ == '__main__':
main()
|
python
|
import tequila as tq
class QMBackend():
"""
A class that holds information about a backend and can be given as a parameter to an algorithm.
Attributes
----------
backend : str
The name of the used backend (simulator).
"""
def __init__(self, backend: str = None):
"""
Creates a QMBackend object.
Parameters
----------
backend : str, optional
The name of the backend (simulator) to be used.
Supported simulators are 'qulacs_gpu', 'qulacs','qibo', 'qiskit', 'cirq', 'pyquil'
and 'symbolic'.
"""
self.backend = backend
@property
def backend(self):
"""The name of the used backend (simulator)."""
return self._backend
@backend.setter
def backend(self, backend: str = None):
self._backend = tq.pick_backend(backend)
|
python
|
#!/usr/bin/env python3.7
"""
The copyrights of this software are owned by Duke University.
Please refer to the LICENSE and README.md files for licensing instructions.
The source code can be found on the following GitHub repository: https://github.com/wmglab-duke/ascent
"""
import os
import sys
sys.path.append(os.path.sep.join([os.getcwd(), '']))
import numpy as np
import matplotlib.pyplot as plt
from src.core.query import Query
# set default fig size
plt.rcParams['figure.figsize'] = list(np.array([16.8, 10.14*2]) / 2)
q = Query({
'partial_matches': False,
'include_downstream': True,
'indices': {
'sample': [3008],
'model': [0,1,2,11],
'sim': [3001]
}
}).run()
q.ap_time_and_location(
delta_V=60,
plot=False,
absolute_voltage=False,
# n_sim_label_override='7.3 µm MRG Fiber',
# model_labels=[
# '5000 µm model radius',
# '7500 µm model radius',
# '10000 µm model radius',
# ],
# n_sim_filter=[0, 1, 2],
save=True,
subplots = True,
nodes_only = True)
|
python
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.smv
~~~~~~~~~~~~~~~~~~~
Lexers for the SMV languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words
from pygments.token import Comment, Generic, Keyword, Name, Number, \
Operator, Punctuation, Text
__all__ = ['NuSMVLexer']
class NuSMVLexer(RegexLexer):
"""
Lexer for the NuSMV language.
"""
name = 'NuSMV'
aliases = ['nusmv']
filenames = ['*.smv']
mimetypes = []
tokens = {
'root': [
# Comments
(r'(?s)\/\-\-.*?\-\-/', Comment),
(r'--.*\n', Comment),
# Reserved
(words(('MODULE','DEFINE','MDEFINE','CONSTANTS','VAR','IVAR',
'FROZENVAR','INIT','TRANS','INVAR','SPEC','CTLSPEC','LTLSPEC',
'PSLSPEC','COMPUTE','NAME','INVARSPEC','FAIRNESS','JUSTICE',
'COMPASSION','ISA','ASSIGN','CONSTRAINT','SIMPWFF','CTLWFF',
'LTLWFF','PSLWFF','COMPWFF','IN','MIN','MAX','MIRROR','PRED',
'PREDICATES'), suffix=r'(?![\w$#-])'), Keyword.Declaration),
(r'process(?![\w$#-])', Keyword),
(words(('array','of','boolean','integer','real','word'),
suffix=r'(?![\w$#-])'), Keyword.Type),
(words(('case','esac'), suffix=r'(?![\w$#-])'), Keyword),
(words(('word1','bool','signed','unsigned','extend','resize',
'sizeof','uwconst','swconst','init','self','count','abs','max',
'min'), suffix=r'(?![\w$#-])'), Name.Builtin),
(words(('EX','AX','EF','AF','EG','AG','E','F','O','G','H','X','Y',
'Z','A','U','S','V','T','BU','EBF','ABF','EBG','ABG','next',
'mod','union','in','xor','xnor'), suffix=r'(?![\w$#-])'),
Operator.Word),
(words(('TRUE','FALSE'), suffix=r'(?![\w$#-])'), Keyword.Constant),
# Names
(r'[a-zA-Z_][\w$#-]*', Name.Variable),
# Operators
(r':=', Operator),
(r'[&\|\+\-\*/<>!=]', Operator),
# Literals
(r'\-?\d+\b', Number.Integer),
(r'0[su][bB]\d*_[01_]+', Number.Bin),
(r'0[su][oO]\d*_[01234567_]+', Number.Oct),
(r'0[su][dD]\d*_[\d_]+', Number.Dec),
(r'0[su][hH]\d*_[\da-fA-F_]+', Number.Hex),
# Whitespace, punctuation and the rest
(r'\s+', Text.Whitespace),
(r'[\(\)\[\]\{\};\?:\.,]', Punctuation),
(r'.', Generic.Error),
]
}
|
python
|
fahrenheit = float(input("Enter temperature in Fahrenheit: "))
print("The temperature in Celsius: ", (fahrenheit-32)*(5/9))
|
python
|
""" Tool for extending the label information with the chess game state, i.e. the camera piece
color and square content.
"""
import argparse
import cv2 as cv
from rookowl.global_var import PIECE_DISPLAY_SHAPE
from rookowl.label import load_labels, save_label
# =≡=-=♔=-=≡=-=♕=-=≡=-=♖=-=≡=-=♗=-=≡=-=♘=-=≡=-=♙=-=≡=-=♚=-=≡=-=♛=-=≡=-=♜=-=≡=-=♝=-=≡=-=♞=-=≡=-=♟︎=-
def parse_state_text(state_text: str, near_color: str):
state = 64 * ["."]
def put(piece: str, file: int, rank: int):
assert piece in ["P", "p", "B", "b", "N",
"n", "R", "r", "Q", "q", "K", "k"]
assert file >= 1 and file <= 8
assert rank >= 1 and rank <= 8
assert near_color in ["b", "w"]
index = 8 * (rank-1) + (file-1)
if near_color == "b":
index = 63 - index
state[index] = piece
file, rank = 1, 8
for c in state_text:
if c == " ":
break
elif c == "/":
file = 1
rank -= 1
elif c.isdigit():
file += int(c)
else:
put(c, file, rank)
file += 1
return "".join(state)
# =≡=-=♔=-=≡=-=♕=-=≡=-=♖=-=≡=-=♗=-=≡=-=♘=-=≡=-=♙=-=≡=-=♚=-=≡=-=♛=-=≡=-=♜=-=≡=-=♝=-=≡=-=♞=-=≡=-=♟︎=-
class App:
MAIN_WIN_NAME = "Piece Labelling Tool"
def __init__(self, dirpath):
self.labels = load_labels(dirpath)
def run(self):
self.make_window()
for label in self.labels:
if "state" in label:
continue
image = cv.imread(label["image_filepath"])
image = cv.resize(
image, (PIECE_DISPLAY_SHAPE[1], PIECE_DISPLAY_SHAPE[0]), interpolation=cv.INTER_CUBIC)
cv.imshow(self.MAIN_WIN_NAME, image)
cv.waitKey(1)
print(80 * "_")
print(f"Now presenting: {label['image_filename']}")
near_color = input(
"Which player holds the camera and it is a known state? [b/w/bs/ws/be/we] ")
if not near_color in ["b", "w", "bs", "ws", "be", "we"]:
print("Skipping to the next photo")
continue
if near_color in ["b", "w"]:
state_text = input("What is this state? ")
elif near_color == "bs":
near_color = "b"
state_text = "rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1"
elif near_color == "ws":
near_color = "w"
state_text = "rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1"
elif near_color == "be":
near_color = "b"
state_text = ""
elif near_color == "we":
near_color = "w"
state_text = ""
cv.waitKey(1)
state = parse_state_text(state_text, near_color)
print(f"Parsed state: '{state}'")
label_path = label["source_path"]
label["state"] = state
label["side"] = near_color
save_label(label, copylabel=False)
print(f"Label '{label_path}' updated.")
cv.waitKey(1)
cv.destroyAllWindows()
def make_window(self):
cv.namedWindow(self.MAIN_WIN_NAME)
def update_view():
pass
# =≡=-=♔=-=≡=-=♕=-=≡=-=♖=-=≡=-=♗=-=≡=-=♘=-=≡=-=♙=-=≡=-=♚=-=≡=-=♛=-=≡=-=♜=-=≡=-=♝=-=≡=-=♞=-=≡=-=♟︎=-
def parse_args():
parser = argparse.ArgumentParser(
description=__doc__)
parser.add_argument("-d", dest="dataset_dir", default="dataset",
help="dataset directory path, where chessboard photographs are placed")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
app = App(args.dataset_dir)
app.run()
# =≡=-=♔=-=≡=-=♕=-=≡=-=♖=-=≡=-=♗=-=≡=-=♘=-=≡=-=♙=-=≡=-=♚=-=≡=-=♛=-=≡=-=♜=-=≡=-=♝=-=≡=-=♞=-=≡=-=♟︎=-
|
python
|
import torch
import torch.nn as nn
import torch.nn.functional as F
# 한 Batch에서의 loss 값
def batch_loss(loss, images):
return loss.item() * images.size(0)
class FocalLoss(nn.Module):
def __init__(self, weight=None, gamma=2.0, reduction="mean"):
nn.Module.__init__(self)
self.weight = weight
self.gamma = gamma
self.reduction = reduction
def forward(self, input_tensor, target_tensor):
log_prob = F.log_softmax(input_tensor, dim=-1)
prob = torch.exp(log_prob)
return F.nll_loss(
((1 - prob) ** self.gamma) * log_prob,
target_tensor,
weight=self.weight,
reduction=self.reduction,
)
|
python
|
def spread_pixels(Nside_low, Nside_high, ID):
from math import log
Llow = int(log(Nside_low, 2))
Lhigh = int(log(Nside_high, 2))
print(Llow, Lhigh)
b = bin(ID)
DN = Lhigh-Llow
a = [bin(i)[2:].zfill(2**DN) for i in range(4**DN)]
pix_IDs = []
for i in a:
x = (b[2:].zfill(Llow) + i)
pix_IDs.append(int(x, 2))
return(pix_IDs)
|
python
|
#!/usr/bin/env python
"""Play a fixed frequency sound."""
from __future__ import division
import math
from pyaudio import PyAudio # sudo apt-get install python{,3}-pyaudio
try:
from itertools import izip
except ImportError: # Python 3
izip = zip
xrange = range
def sine_tone(stream, frequency, duration, volume=1, sample_rate=22050):
n_samples = int(sample_rate * duration)
restframes = n_samples % sample_rate
s = lambda t: volume * math.sin(2 * math.pi * frequency * t / sample_rate)
samples = (int(s(t) * 0x7f + 0x80) for t in xrange(n_samples))
for buf in izip(*[samples]*sample_rate): # write several samples at a time
#print(buf)
stream.write(bytes(bytearray(buf)))
# fill remainder of frameset with silence
stream.write(b'\x80' * restframes)
def get_target_pitch_freq():
target_pitch = [
1046.5, 987.77, 698.46, # bar 06
880.00, 783.99, 698.46, 587.33, # bar 07
523.25, 587.33, 392.00, 587.33, # bar 08
659.26, 261.63, 329.63, 392.00, 523.25, 659.26, 783.99, # bar 09
1046.5, 987.77, 698.46, # bar 10
880.00, 783.99, 698.46, 587.33, # bar 11
523.25, 659.26, 587.33, 659.26, # bar 12
587.33, 523.25 # bar 13
]
return target_pitch
p = PyAudio()
stream = p.open(format=p.get_format_from_width(1), # 8bit
channels=1, # mono
rate=44100,
output=True)
for f in get_target_pitch_freq():
sine_tone(stream, frequency=f, duration=1, volume=0.6,sample_rate=44100)
stream.stop_stream()
stream.close()
p.terminate()
|
python
|
#!/usr/bin/env python3
"""Forward kinematic example using pinocchio
Sends zero-torque commands to the robot and prints finger tip position.
"""
import os
import numpy as np
from ament_index_python.packages import get_package_share_directory
import pinocchio
import robot_interfaces
import robot_fingers
if __name__ == "__main__":
urdf_pkg_path = get_package_share_directory("robot_properties_manipulator")
urdf_path = os.path.join(urdf_pkg_path, "urdf", "finger.urdf")
model = pinocchio.buildModelFromUrdf(urdf_path)
data = model.createData()
tip_link_id = model.getFrameId("finger_tip_link")
robot = robot_fingers.Robot(
robot_interfaces.finger,
robot_fingers.create_real_finger_backend,
"finger.yml",
)
robot.initialize()
action = robot.Action()
while True:
t = robot.frontend.append_desired_action(action)
joint_positions = robot.frontend.get_observation(t).position
# compute the forward kinematics
pinocchio.framesForwardKinematics(model, data, joint_positions)
# get the position of the tip link
pos = data.oMf[tip_link_id].translation
# convert from np.matrix to a flat array (for easy printing)
pos = np.asarray(pos).reshape(-1)
n_joints = len(pos)
format_string = "\r" + ", ".join(["{: 6.3f}"] * n_joints)
print(format_string.format(*pos), end="")
|
python
|
if __name__ == '__main__' and __package__ is None:
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ))
import os
import torch
from utils.model_serialization import strip_prefix_if_present
from utils import zipreader
import argparse
from tqdm import tqdm
import pickle
import cv2
import numpy as np
parser = argparse.ArgumentParser(description="PyTorch Keypoints Training")
parser.add_argument(
"--src",
default="~/datasets",
help="source model",
type=str,
)
parser.add_argument(
"--dst",
default="~/local/datasets/h36m/undistortedimages",
help="dst model",
type=str,
)
parser.add_argument(
"--anno",
default="~/datasets/h36m/annot/h36m_validation.pkl",
type=str,
)
args = parser.parse_args()
src = os.path.expanduser(args.src)
dst = os.path.expanduser(args.dst)
with open(os.path.expanduser(args.anno), 'rb') as f:
data = pickle.load(f)
for db_rec in tqdm(data):
path = db_rec['image']
image_dir = 'images.zip@'
image_file = os.path.join(src, db_rec['source'], image_dir, 'images', db_rec['image'])
output_path = os.path.join(dst, path)
if os.path.exists(output_path):
continue
output_dir = os.path.dirname(output_path)
os.makedirs(output_dir, exist_ok=True)
data_numpy = zipreader.imread(
image_file, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
camera = db_rec['camera']
K = np.array([
[float(camera['fx']), 0, float(camera['cx'])],
[0, float(camera['fy']), float(camera['cy'])],
[0, 0, 1.],
])
distCoeffs = np.array([float(i) for i in [camera['k'][0], camera['k'][1], camera['p'][0], camera['p'][1], camera['k'][2]]])
data_numpy = cv2.undistort(data_numpy, K, distCoeffs)
#cv2.imwrite(output_path, data_numpy, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
#cv2.imwrite(output_path, data_numpy)
cv2.imwrite(output_path, data_numpy, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
|
python
|
import datetime
from termcolor import cprint
import get_all_caches as caches
def calculate_all_prices(fiat='EUR', coin='XMR', start='04-02-2020', high_low='high', coin_amount=int(42), fiat_amount_spent=int(0)):
coin_amount = int(coin_amount) # double casting to int here prevents crashes later on
historical_prices = caches.get_historical_price_cache(fiat, coin, start)
# to mitigate a ZeroDivisionError when the start date requested precedes the earliest data for coin in API database:
if len(historical_prices[0]['data']['quotes']) == 0:
historical_coin_price = 0
cprint(f" No coin data found for this date for {coin}, try a later date.", 'red', 'on_white', attrs=['bold'])
else:
historical_coin_price = float(historical_prices[0]['data']['quotes'][0]['quote'][fiat][high_low])
fiat_amount_spent = int(coin_amount) * historical_coin_price
cprint(
f" On {start}, at its {high_low}est price, 1 {coin} would have cost {historical_coin_price:,.2f} {fiat} &"
f" to buy your desired amount of {coin_amount:,} {coin} would have cost {fiat_amount_spent:,.2f} {fiat}.",
'yellow', 'on_grey', attrs=['bold', 'dark'])
todays_prices = caches.get_todays_price_cache(fiat=fiat)
for k, v in enumerate(todays_prices['data']):
if todays_prices['data'][k]['symbol'] == coin:
todays_coin_price = todays_prices['data'][k]['quote'][fiat]['price']
new_fiat_amount_spent = f"{round(coin_amount * todays_coin_price)}"
new_fiat_amount_spent = float(new_fiat_amount_spent)
current_coin_amount = fiat_amount_spent / todays_coin_price
cprint(f" Whereas today, {fiat_amount_spent:,.2f} {fiat} would buy you {current_coin_amount:,} {coin}.", 'yellow',
'on_grey', attrs=['bold', 'dark'])
return fiat_amount_spent, fiat, todays_coin_price, coin, coin_amount, new_fiat_amount_spent, start, \
current_coin_amount, historical_coin_price
def calculate_single_profit_loss(fiat_amount_spent, fiat, coin, coin_amount, new_fiat_amount, start):
cprint(f" Had you bought {coin_amount} {coin} for {fiat_amount_spent:,.2f} {fiat} on {start} and held on to "
f"it, today {coin_amount} {coin} would be worth {new_fiat_amount:,.2f} {fiat}.\n", 'yellow', 'on_grey',
attrs=['bold', 'dark'])
cprint(f" Pulling the trigger on {start} would have caused your fiat holdings to:", 'yellow', 'on_grey',
attrs=['bold', 'dark'])
if fiat_amount_spent > new_fiat_amount:
loss = fiat_amount_spent - new_fiat_amount
state = 'loss'
cprint(f' Decrease by {loss:,.2f} {fiat}, so sad, quelle dommage!\n', 'yellow', 'on_grey', attrs=['bold',
'dark'])
return loss, state
elif fiat_amount_spent < new_fiat_amount:
profit = new_fiat_amount - fiat_amount_spent
state = 'profit'
cprint(f' Increase by {profit:,.2f} {fiat}, what joy, rich as Croesus you could have been!\n', 'yellow',
'on_grey', attrs=['bold', 'dark'])
return profit, state
elif fiat_amount_spent == new_fiat_amount:
stasis = fiat_amount_spent == new_fiat_amount
state = 'stasis'
cprint(f" My my, how curious, your new and old {fiat} holdings remain identical - welcome to stasis!\n",
'yellow', 'on_grey', attrs=['bold', 'dark'])
return stasis, state
def calculate_multiple_profit_loss(total_fiat_amount_spent, portfolio, start, fiat_amount_spent, fiat,
sum_fiat_profit_amount, sum_fiat_loss_amount, new_fiat_amount):
cprint(' Total of all wallets combined:', 'grey', 'on_yellow', attrs=['dark', 'bold'])
cprint(f" All of your wallets purchased on {start} would have cost {total_fiat_amount_spent:,.2f} {fiat}.",
'yellow', 'on_grey', attrs=['dark', 'bold'])
total_profits_losses = sum_fiat_profit_amount - sum_fiat_loss_amount
cprint(f" Had you held onto all them coins, today the total profits of your portfolio would have been"
f" {total_profits_losses:,.2f} {fiat}.\n", 'yellow', 'on_grey', attrs=['dark', 'bold'])
cprint(f" Pulling the trigger on {start} would have caused your fiat holdings to:", 'yellow', 'on_grey',
attrs=['bold', 'dark'])
if total_fiat_amount_spent > total_profits_losses:
loss = total_fiat_amount_spent - total_profits_losses
state = 'loss'
cprint(f' Decrease by {loss:,.2f} {fiat}, so sad, quelle dommage!\n', 'yellow', 'on_grey', attrs=['bold',
'dark'])
return loss, state
elif total_fiat_amount_spent < total_profits_losses:
profit = total_profits_losses - total_fiat_amount_spent
state = 'profit'
cprint(f' Increase by {profit:,.2f} {fiat}, what joy, rich as Croesus you could have been!\n', 'yellow',
'on_grey', attrs=['bold', 'dark'])
return profit, state
elif fiat_amount_spent == new_fiat_amount:
stasis = fiat_amount_spent - new_fiat_amount
state = 'stasis'
cprint(f" My my, how curious, your new and old {fiat} holdings remain identical - welcome to stasis! \n",
'yellow', 'on_grey', attrs=['bold', 'dark'])
return stasis, state
def output_coin_histdata(coin_cache, high_or_low, coin, start_date, end_date, fiat):
# coin price at start date based on user's high/low request
cprint(f" This was the {high_or_low}est price point for 1 {coin} on "
f"{datetime.datetime.utcfromtimestamp(start_date).strftime('%d-%m-%Y')} in {fiat}: "
f"{coin_cache['data']['quotes'][0]['quote'][fiat][high_or_low]:,.2f}", 'red', 'on_grey', attrs=['bold',
'dark'])
# coin price at end date based on user's high/low request
length = range(len(coin_cache['data']['quotes']))
cprint(f" This was the {high_or_low}est price point for 1 {coin} on "
f"{datetime.datetime.utcfromtimestamp(end_date).strftime('%d-%m-%Y')} in {fiat}: "
f"{coin_cache['data']['quotes'][length[-1]]['quote'][fiat][high_or_low]:,.2f}", 'red', 'on_grey',
attrs=['bold', 'dark'])
cprint(f" Here is your requested output for {coin_cache['data']['name']}'s historical data from "
f"{datetime.datetime.utcfromtimestamp(start_date).strftime('%d-%m-%Y')} to "
f"{datetime.datetime.utcfromtimestamp(end_date).strftime('%d-%m-%Y')}\ncoin id: {coin_cache['data']['id']}"
f" // coin name: {coin_cache['data']['name']} // coin symbol: {coin_cache['data']['symbol']}", 'red',
'on_grey', attrs=['bold', 'dark'])
for k, v in enumerate(coin_cache['data']['quotes']):
cprint(v, 'yellow', 'on_grey', attrs=['bold', 'dark'])
return
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, James Cammarata <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: accelerate
short_description: Enable accelerated mode on remote node
deprecated:
removed_in: "2.4"
why: Replaced by ControlPersist
alternative: Use SSH with ControlPersist instead.
removed: True
description:
- This module has been removed, this file is kept for historical documentation purposes.
- This modules launches an ephemeral I(accelerate) daemon on the remote node which
Ansible can use to communicate with nodes at high speed.
- The daemon listens on a configurable port for a configurable amount of time.
- Fireball mode is AES encrypted
version_added: "1.3"
options:
port:
description:
- TCP port for the socket connection
required: false
default: 5099
aliases: []
timeout:
description:
- The number of seconds the socket will wait for data. If none is received when the timeout value is reached, the connection will be closed.
required: false
default: 300
aliases: []
minutes:
description:
- The I(accelerate) listener daemon is started on nodes and will stay around for
this number of minutes before turning itself off.
required: false
default: 30
ipv6:
description:
- The listener daemon on the remote host will bind to the ipv6 localhost socket
if this parameter is set to true.
required: false
default: false
multi_key:
description:
- When enabled, the daemon will open a local socket file which can be used by future daemon executions to
upload a new key to the already running daemon, so that multiple users can connect using different keys.
This access still requires an ssh connection as the uid for which the daemon is currently running.
required: false
default: no
version_added: "1.6"
notes:
- See the advanced playbooks chapter for more about using accelerated mode.
requirements:
- "python >= 2.4"
- "python-keyczar"
author: "James Cammarata (@jimi-c)"
'''
EXAMPLES = '''
# To use accelerate mode, simply add "accelerate: true" to your play. The initial
# key exchange and starting up of the daemon will occur over SSH, but all commands and
# subsequent actions will be conducted over the raw socket connection using AES encryption
- hosts: devservers
accelerate: true
tasks:
- command: /usr/bin/anything
'''
from ansible.module_utils.common.removed import removed_module
if __name__ == '__main__':
removed_module()
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# make it possible to run as standalone program
import sys
import re
import random
import string
sys.path.append('/srv/chemminetools')
sys.path.append('/srv/chemminetools/sdftools') # allow tools.py to be imported
from django.core.management import setup_environ
import chemminetools.settings
import argparse
setup_environ(chemminetools.settings)
from django.contrib.auth.models import User
from django.conf import settings
from compounddb.tools import parse_annotation, insert_single_compound
from compounddb.models import Compound, SDFFile
MAX_COMPOUND_LIMIT = settings.MAX_COMPOUND_LIMIT
MAX_SDF_LENGTH = settings.MAX_SDF_LENGTH
parser = \
argparse.ArgumentParser(description='Add new compounds to a users workbench'
)
parser.add_argument('-u', '--user', help='numeric user id',
required=True)
parser.add_argument('-o', '--outfile', help='output file',
required=True)
args = vars(parser.parse_args())
def addMyCompounds(sdf, user):
sdffile = u''
counter = 0
linecounter = 0
namekey = 'PUBCHEM_IUPAC_NAME'
message = 'ERROR: bad input data.'
added_ids = []
try:
if not isinstance(sdf, unicode):
sdf = unicode(sdf, 'utf-8')
sdf = sdf.encode('ascii', 'ignore')
sdf = sdf.split('\n')
for line in sdf:
linecounter += 1
if linecounter > MAX_SDF_LENGTH:
message = 'ERROR: an input sdf exceeds ' \
+ str(MAX_SDF_LENGTH) + ' lines.'
raise Exception
if linecounter == 1:
# clean up cid with regexes
line = re.match(r"^\W*(.*?)\W*$", line).group(1)
if line == '':
line = 'unspecified_' \
+ ''.join(random.sample(string.digits, 6))
line = re.sub(r"[^a-zA-Z_0-9-]", '_', line, count=0)
# loop adding postfix numbers to the cid until we find a unique cid in the database
appendNumber = 1
oldCid = line
while len(Compound.objects.filter(cid=line, user=user)) \
> 0:
appendNumber += 1
line = oldCid + '_' + str(appendNumber)
sdffile += line
sdffile += '\n'
if line.startswith('$$$$'):
try:
moldata = parse_annotation(sdffile, namekey)
except:
message = 'ERROR: invalid input format.'
raise Exception
counter += 1
linecounter = 0
if counter > MAX_COMPOUND_LIMIT:
message = 'ERROR: upload exceeds ' \
+ str(MAX_COMPOUND_LIMIT) + ' compounds.'
raise Exception
try:
newid = insert_single_compound(moldata, sdffile,
namekey, 'id', user)
except:
message = \
'ERROR: Database error, possibly excessively large compound?'
raise Exception
added_ids.append(newid)
sdffile = u''
if counter > 0:
return 'Success: Added ' + str(counter) + ' compounds.'
else:
return 'ERROR: No valid input found.'
except:
for id in added_ids:
try:
Compound.objects.get(id=id).delete()
except:
pass
return message
def main():
sdf = sys.stdin.read()
user = User.objects.get(id=args['user'])
output = addMyCompounds(sdf, user)
f = open(args['outfile'], 'w')
f.write(output)
f.close()
if __name__ == '__main__':
main()
|
python
|
import os
import argparse
import numpy as np
class SceneGraphNode(object):
def __init__(self):
pass
def set_attribute(self, attr, value):
if attr not in self.__dict__.keys():
raise ValueError(f"Unknown attribute: {attr}")
self.__dict__[attr] = value
def get_attribute(self, attr):
if attr not in self.__dict__.keys():
raise ValueError(f"Unknown attribute: {attr}")
return self.__dict__[attr]
class Building(SceneGraphNode):
def __init__(self):
# 2D floor area (sq. meters)
self.floor_area = None
# Functionality of the building
self.function = None
# Gibson split (tiny, medium, large)
self.gibson_split = None
# Unique building id
self.id = None
# Name of the Gibson model
self.name = None
# Number of panoramic cameras in the model
self.num_cameras = None
# Number of floors in the building
self.num_floors = None
# Number of objects in the building
self.num_objects = None
# Number of rooms in the building
self.num_rooms = None
# Building reference point
self.reference_point = None
# 3D size of building
self.size = None
# 3D volume of building (in cubic meters, computed from the 3D convex hull)
self.volume = None
# Size of each voxel
self.voxel_size = None
# 3D coordinates of voxel centers (N x 3)
self.voxel_centers = None
# Number of voxels per axis (k x l x m)
self.voxel_resolution = None
# Minimum Spanning Tree
self.MST = None
# Instantiate other layers in the graph
self.room = {}
self.camera = {}
self.object = {}
def print_attributes(self):
print(f'--- Building ID: {self.id} ---')
for key in self.__dict__.keys():
if key not in ['room', 'camera', 'object', 'voxel_centers']:
print(f"Key: {key} | Value: {self.get_attribute(key)}")
class Room(SceneGraphNode):
def __init__(self):
# 2D floor area (in square meters)
self.floor_area = None
# Index of the floor that contains this room
self.floor_number = None
# Unique space id per building
self.id = None
# 3D coordinates of room center
self.location = None
# Building face indices that correspond to this room
self.inst_segmentation = None
# Functionality of the room
self.scene_category = None
# 3D size of the room
self.size = None
# Building's voxel indices tha correspond to this space
self.voxel_occupancy = None
# 3D volume of the room (in cubic meters, computed from the 3D convex hull)
self.volume = None
# Parent building that contains this room
self.parent_building = None
# Connected Rooms
self.connected_rooms = set()
def print_attributes(self):
print(f'--- Room ID: {self.id} ---')
for key in self.__dict__.keys():
print(f"Key: {key} | Value: {self.get_attribute(key)}")
class SceneObject(SceneGraphNode):
def __init__(self):
# List of possible actions
self.action_affordance = None
# 2D floor area (in square meters)
self.floor_area = None
# Total surface coverage (in square meters)
self.surface_coverage = None
# Object label
self.class_ = None
# Unique object id per building
self.id = None
# 3D coordinates of object center
self.location = None
# List of main object materials
self.material = None
# 3D object size
self.size = None
# Building face indices that correspond to this object
self.inst_segmentation = None
# Main tactile texture (may be None)
self.tactile_texture = None
# Main visible texture (may be None)
self.visual_texture = None
# 3D volume of object (in cubic meters, computed from the 3D convex hull)
self.volume = None
# Building voxel indices corresponding to this object
self.voxel_occupancy = None
# Parent room that contains this object
self.parent_room = None
def print_attributes(self):
print(f'--- Object ID: {self.id} ---')
for key in self.__dict__.keys():
print(f"Key: {key} | Value: {self.get_attribute(key)}")
class Camera(SceneGraphNode):
def __init__(self):
# Name of the camera
self.name = None
# Unique camera id
self.id = None
# Camera field of view
self.FOV = None
# 3D location of camera in the model
self.location = None
# 3D orientation of camera (quaternion)
self.rotation = None
# Camera modality (e.g., RGB, grayscale, depth, etc.)
self.modality = None
# Camera resolution
self.resolution = None
# Parent room that contains this camera
self.parent_room = None
def load_scenegraph(datapath):
data = np.load(datapath, allow_pickle=True)["output"].item()
building = Building()
# Set building attributes
for key in data["building"].keys():
if key in [
"object_inst_segmentation",
"room_inst_segmentation",
"object_voxel_occupancy",
"room_voxel_occupancy",
]:
continue
building.set_attribute(key, data["building"][key])
res = building.voxel_resolution
voxel_centers = np.reshape(building.voxel_centers, (res[0], res[1], res[2], 3))
building.set_attribute("voxel_centers", voxel_centers)
# Set room attributes
unique_rooms = np.unique(data["building"]["room_inst_segmentation"])
for room_id in unique_rooms:
if room_id == 0:
continue
building.room[room_id] = Room()
room_faces = np.where(data["building"]["room_inst_segmentation"] == room_id)[0]
building.room[room_id].set_attribute("inst_segmentation", room_faces)
room_voxels = np.where(data["building"]["room_voxel_occupancy"] == room_id)[0]
building.room[room_id].set_attribute("voxel_occupancy", room_voxels)
for key in data["room"][room_id].keys():
building.room[room_id].set_attribute(key, data["room"][room_id][key])
# Set object attributes
unique_objects = np.unique(data["building"]["object_inst_segmentation"])
for object_id in unique_objects:
if object_id == 0:
continue
building.object[object_id] = SceneObject()
object_faces = np.where(data["building"]["object_inst_segmentation"] == object_id)[0]
building.object[object_id].set_attribute("inst_segmentation", object_faces)
object_voxels = np.where(data["building"]["object_voxel_occupancy"] == object_id)[0]
building.object[object_id].set_attribute("voxel_occupancy", object_voxels)
for key in data["object"][object_id].keys():
building.object[object_id].set_attribute(key, data["object"][object_id][key])
# Set camera attributes
for cam_id in data["camera"].keys():
if cam_id == 0:
continue
building.camera[cam_id] = Camera()
for key in data["camera"][cam_id].keys():
building.camera[cam_id].set_attribute(key, data["camera"][cam_id][key])
kruskals_mst(building)
return building
def kruskals_mst(building):
"""Apply Kruskal's algorithm to find the minimum spanning tree of room connectivities, where edge
weights are determined by the distance between rooms' centroids.
"""
room_map = dict()
floor_map = dict()
floor_map_inv = dict()
location = dict()
floor = dict()
floor_count = 0
for i, room_id in enumerate(building.room):
room_map[i] = room_id
location[i] = building.room[room_id].location
floor_name = building.room[room_id].floor_number
if floor_name not in floor_map_inv:
floor_map_inv[floor_name] = floor_count
floor_map[floor_count] = floor_name
floor_count += 1
floor[i] = floor_map_inv[floor_name]
floor_to_room_map = dict()
for room_id, floor_id in floor.items():
if floor_id not in floor_to_room_map:
floor_to_room_map[floor_id] = set()
floor_to_room_map[floor_id].add(room_id)
if building.num_rooms is None:
building.num_rooms = len(building.room)
assert(len(building.room) == building.num_rooms)
num_floors_with_rooms = len(floor_map_inv)
# compute room-room distances
adj_rooms = np.zeros((building.num_rooms, building.num_rooms))
for i in range(building.num_rooms):
for j in range(i+1, building.num_rooms):
dist = np.linalg.norm(location[i] - location[j], 2)
adj_rooms[i, j] = dist
adj_rooms[j, i] = dist
# compute minimum spanning tree for all rooms
room_graph = Graph(building.num_rooms)
# find average-minimum distances of rooms between floors
if num_floors_with_rooms > 1:
adj_floors = np.zeros((num_floors_with_rooms, num_floors_with_rooms))
adj_floors_count = np.ones((num_floors_with_rooms, num_floors_with_rooms))
for room_id_a, floor_id_a in floor.items():
for floor_id_b in floor_to_room_map:
if floor_id_a == floor_id_b:
continue
# compute minimum room-room distance between different floors
room_id_bs = np.array(list(floor_to_room_map[floor_id_b]), dtype=int)
room_id_as = np.ones_like(room_id_bs, dtype=int) * room_id_a
min_dist = adj_rooms[room_id_as, room_id_bs].min()
# compute running average
n = adj_floors_count[floor_id_a, floor_id_b]
adj_floors[floor_id_a, floor_id_b] += (1/n) * (min_dist - adj_floors[floor_id_a, floor_id_b])
adj_floors_count[floor_id_a, floor_id_b] += 1
# compute minimum spanning floor tree
floor_graph = Graph(num_floors_with_rooms)
for floor_id_a in range(num_floors_with_rooms):
for floor_id_b in range(floor_id_a, num_floors_with_rooms):
floor_graph.addEdge(floor_id_a, floor_id_b, adj_floors[floor_id_a, floor_id_b])
floor_mst = floor_graph.KruskalMST()
# add minimum edge across floors
for floor_id_a, floor_id_b, w in floor_mst:
room_id_as = np.array(list(floor_to_room_map[floor_id_a]), dtype=int)
room_id_bs = np.array(list(floor_to_room_map[floor_id_b]), dtype=int)
ones_b = np.ones_like(room_id_bs, dtype=int)
distances = np.zeros(len(room_id_as) * len(room_id_bs))
room_coords = np.empty((2, len(room_id_as) * len(room_id_bs)), dtype=int)
i = 0
for room_a in room_id_as:
room_idx_as = ones_b.copy() * room_a
distances[i:i+len(ones_b)] = adj_rooms[room_idx_as, room_id_bs]
room_coords[:, i:i+len(ones_b)] = np.stack((room_idx_as, room_id_bs))
i += len(ones_b)
min_edge = np.min(distances)
min_room_a, min_room_b = room_coords[:, np.argmin(distances)]
room_graph.addEdge(min_room_a, min_room_b, min_edge)
for floor_id in floor_to_room_map:
room_ids = np.array(list(floor_to_room_map[floor_id]), dtype=int)
for i, room_i in enumerate(room_ids):
for j, room_j in enumerate(room_ids, i+1):
room_graph.addEdge(room_i, room_j, adj_rooms[room_i, room_j])
# compute room MST
room_mst = room_graph.KruskalMST()
building.MST = room_mst
connected_rooms = set()
for i, j, w in room_mst:
connected_rooms.add(room_map[i])
connected_rooms.add(room_map[j])
building.room[room_map[i]].connected_rooms.add(room_map[j])
building.room[room_map[j]].connected_rooms.add(room_map[i])
assert(len(connected_rooms) == building.num_rooms)
assert(len(building.MST) == building.num_rooms - 1)
class Graph:
def __init__(self, vertices):
self.V = vertices # No. of vertices
self.graph = [] # default dictionary
# to store graph
def addEdge(self, u, v, w):
self.graph.append([u, v, w])
def find(self, parent, i):
if parent[i] == i:
return i
return self.find(parent, parent[i])
def union(self, parent, rank, x, y):
xroot = self.find(parent, x)
yroot = self.find(parent, y)
if rank[xroot] < rank[yroot]:
parent[xroot] = yroot
elif rank[xroot] > rank[yroot]:
parent[yroot] = xroot
else:
parent[yroot] = xroot
rank[xroot] += 1
def KruskalMST(self):
result = []
i = 0
e = 0
self.graph = sorted(self.graph, key=lambda item: item[2])
parent = []
rank = []
# Create V subsets with single elements
for node in range(self.V):
parent.append(node)
rank.append(0)
# Number of edges to be taken is equal to V-1
while e < self.V - 1:
u, v, w = self.graph[i]
i = i + 1
x = self.find(parent, u)
y = self.find(parent, v)
if x != y:
e = e + 1
result.append([u, v, w])
self.union(parent, rank, x, y)
minimumCost = 0
for u, v, weight in result:
minimumCost += weight
return result
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--data-path', type=str, default="/home/agiachris/data/3dscenegraph/tiny")
parser.add_argument('--model', type=str, default='Marstons')
args = parser.parse_args()
model_type = "verified_graph" if os.path.basename(args.data_path) == 'tiny' else "automated_graph"
datapath = os.path.join(args.data_path, model_type, "3DSceneGraph_" + args.model + ".npz")
scenegraph = {}
scenegraph[args.model] = {}
scenegraph[args.model]["graph"] = load_scenegraph(datapath)
|
python
|
c = {'z': '\033[m'}
l = ['w', 'r', 'g', 'y', 'b', 'm', 'c', 'k']
i = j = 0
for lin in range(30, 38):
c[l[i]] = f'\033[{lin}m'
i += 1
i = j = 0
for lin in range(30, 38):
for col in range(40, 48):
c[l[i] + l[j]] = f'\033[{lin};{col}m'
j += 1
i += 1
j = 0
def clr(text='', value='z'):
return c[value] + str(text) + c['z']
|
python
|
# Generated by Django 3.2.9 on 2022-01-15 21:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20220115_1425'),
]
operations = [
migrations.AlterField(
model_name='comments',
name='description',
field=models.CharField(blank=True, max_length=500),
),
migrations.AlterField(
model_name='course',
name='comments',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.comments'),
),
]
|
python
|
import sys
import os
import configparser
import tkinter as tk
import tkinter.ttk as ttk
from tkinter import messagebox
from functools import partial
from PIL import ImageTk, Image
from tkinter import filedialog
class Gui:
"""
This class represents a GUI for reading, changing and writing purposes.
The input is read from the parameters.db file.
In the GUI the values can be edited and finally saved in beispiel.cfg
The GUI is configured by a grid layout using themed widgets (tkinter.ttk).
The section must be set as the parameter on the command line.
command line: python3<path-to-mini_topsim>gui.py <section-name>
At the beginning the default parameters of the section are shown.
Use the OK-Button for saving the config and the Cancel-Button for closing
the session.
If you try to close the window, you will be asked how to close the session
if you have still unsaved parameters in comparison to beispiel.cfg
"""
def __init__(self, section, parameter_file, button_file, cfg_file):
"""
This function sets all needed configurations of the Tkinter Window.
At the beginning the GUI is set in the middle of the screen.
Attributes
----------
section : input from user in the command line
parameter_file : path to the parameter file
button_file : path to the button file
cfg_file : path to the config file
"""
self.section = section
self.parameter_file = parameter_file
self.button_file = button_file
self.cfg_file = cfg_file
self.data = self.get_data()
self.root = tk.Tk()
screen_width = self.root.winfo_screenwidth()
screen_height = self.root.winfo_screenheight()
gui_width = 500
gui_height = 100 + 30 * len(self.data)
if gui_width > screen_width:
gui_width = screen_width
if gui_height > screen_height:
gui_height = screen_height
self.root.geometry('{}x{}'.format(gui_width, gui_height))
x_position = int(screen_width/2 - gui_width/2)
y_position = int(screen_height/2 - gui_height/2)
self.root.geometry('+{}+{}'.format(x_position, y_position))
self.root.columnconfigure(0, weight=0)
self.root.columnconfigure(1, weight=1)
self.root.columnconfigure(2, weight=0)
self.display()
def start(self):
"""
This function starts the GUI.
"""
self.root.title('MiniTopSim-Project')
self.root.mainloop()
def get_data(self):
"""
This function reads parameters.db and copies all data in a dictionary.
Returns
-------
data_from_file : dict
"""
data_from_file = {}
possible_sections = []
cp = configparser.ConfigParser()
cp.optionxform = str
cp.read(self.parameter_file)
for possible_section in cp.sections():
possible_sections.append(possible_section)
if self.section not in possible_sections:
print('The given section \"{}\" is not available!'.format(
self.section))
print('Try to use one of these next time and type in without '
'quotes:')
print(possible_sections)
sys.exit()
for option in cp[self.section]:
value = list(eval(cp[self.section][option]))
data_from_file[option] = value
globals()[option] = value[0]
if len(data_from_file) == 0:
print('The given section \"{}\" doesn\'t contain any data!'.format(
self.section))
sys.exit()
return data_from_file
def display(self):
"""
This function sets the grid layout and the basic visualisation.
If the value is type int or double -> spinbox
Else if the value is type bool -> checkbutton
else -> entry
If the user hits a new content in the GUI, it will be automatically
verified if the data is valid too.
On the right handside every parameter has got a help button, if the
user wants to get further information on the parameter and its
condition.
At the Bottom there are the two buttons called 'OK' and 'Cancel'
situated.
If you press 'OK' and the data isn't already set in beispiel.config
a filedialog will appear to get the right directory.
If you press 'Cancel' the GUI will just close.
If you press the 'X' button on the right hand corner then it depends
on the current state. If the current config is already saved in
beispiel.cfg then it close like the 'Cancel' Button. But if there are
unsaved parameters you will be asked if you want to save the current
state or discard it.
"""
self.entries = {}
self.root.protocol('WM_DELETE_WINDOW', self.exit_window)
info_image = Image.open(self.button_file)
self.button_image = ImageTk.PhotoImage(info_image)
label = ttk.Label(self.root, relief='sunken', text='PARAMETER')
label.grid(row=0, column=0, padx='5', pady='5', sticky='ew')
label = ttk.Label(self.root, relief='sunken', text='VALUE')
label.grid(row=0, column=1, padx='5', pady='5', sticky='ew')
label = ttk.Label(self.root, relief='sunken', text='HELP')
label.grid(row=0, column=2, padx='5', pady='5', sticky='ew')
current_row = 0
for key, value in self.data.items():
current_row = current_row + 1
label = ttk.Label(self.root, text=key)
label.grid(row=current_row, column=0, padx='5', sticky='ew')
self.defaultvalue = value[0]
defaulttype = type(self.defaultvalue)
if defaulttype is int or defaulttype is float:
content = tk.DoubleVar()
entry = ttk.Spinbox(self.root, textvariable=content,
validate='focusout', from_=-1000.0, to=1000.0,
validatecommand=partial(self.change_update, key))
elif defaulttype is bool:
content = tk.BooleanVar()
entry = ttk.Checkbutton(self.root, variable=content,
command=partial(self.change_update, key))
else:
content = tk.StringVar()
entry = ttk.Entry(self.root, textvariable=content,
validate='focusout', validatecommand=partial(
self.change_update, key))
content.set(self.defaultvalue)
entry.grid(row=current_row, column=1, padx='5', sticky='ew')
help_button = ttk.Button(self.root, image=self.button_image,
command=partial(self.info, key))
help_button.grid(row=current_row, column=2, padx='5', sticky='ew')
self.entries[key] = {'content': content, 'entry': entry,
'default': self.defaultvalue}
save_button = ttk.Button(self.root, text='OK',
command=self.save)
save_button.grid(column=1, row=current_row+1, padx='5', pady='5',
sticky='nsw')
cancel_button = ttk.Button(self.root, text='Cancel',
command=self.close)
cancel_button.grid(column=1, row=current_row+1, padx='5', pady='5',
sticky='nse')
def info(self, key):
"""
This function shows the information on user defined parameter.
"""
help_text = str(self.data[key][2])
if self.data[key][1] is None:
value_text = 'None'
else:
value_text = str(self.data[key][1])
info_text = 'Info:\n' + help_text + '\n' + 'Condition:\n' + value_text
messagebox.showinfo(key, info_text)
def change_update(self, key):
"""
This function processes the user input of the GUI.
If the new value doesn't match with the belonging condition, then the
old one is set again and the user gets an error message.
Returns
-------
Bool : error occured or not
"""
old_value = globals()[key]
globals()[key] = self.entries[key]['content'].get()
if self.data[key][1] is not None and not eval(self.data[key][1]):
error = True
else:
error = False
if error:
self.entries[key]['content'].set(old_value)
globals()[key] = old_value
messagebox.showerror(key, 'The new value isn\'t compatible with '\
'the existing condition!')
self.info(key)
return False
else:
self.defaultvalue = self.data[key][0]
self.data[key][0] = self.entries[key]['content'].get()
return True
def check_data(self):
"""
This function checks the data if all values correspond with their
conditions.
This function will be called before saving the data.
Returns
-------
Bool : warnings occured or not
"""
warnings = 0
for key, condition in self.data.items():
if condition[1] is not None:
if not eval(condition[1]):
warnings = warnings + 1
messagebox.showwarning(key, key + ' doesn\'t '\
'match the requirements!\n\n' + 'Value: ' +
str(self.data[key][0]) + '\nCondition: ' +
str(self.data[key][1]))
if warnings > 0:
return False
else:
return True
def save(self):
"""
This function will call CreateConfigFile class if everything is valid.
This function will be called by pressing the 'OK' button or the 'X'.
If the data exists already in beispiel.cfg then an information will be
printed on the screen.
Returns
-------
Bool : saving procedure successfully or not
"""
result = self.data_already_in_file()
if result:
messagebox.showinfo('Save', 'Data exists already in beispiel.cfg')
else:
valid_data = self.check_data()
if valid_data:
config = CreateConfigFile(self.data)
success = config.save_file(self.section, self.cfg_file)
if success:
messagebox.showinfo('Save', 'Save of config successfully!')
return True
else:
messagebox.showerror('Save', 'Please correct the error(s) as '\
'displayed!')
return False
def exit_window(self):
"""
This function will be called when you try to leave the GUI by the 'X'.
If there is no unsaved work, it just will close like the 'Cancel'
button. If there is something new you will be asked for saving it.
"""
equal = self.data_already_in_file()
answer = False
success = True
if not equal:
messagebox.showwarning('Unsaved Data', 'The data isn\'t equal to '\
'the file beispiel.cfg')
answer = messagebox.askyesno('Create Config file?', 'Do you want '\
'to save the parameters?')
if answer:
success = self.save()
if success:
self.close()
def data_already_in_file(self):
"""
This function evaluates if the data of current state already exists.
Returns
-------
Bool : savind successfully or not
"""
path_to_file = os.path.join(self.cfg_file, 'beispiel.cfg')
if os.path.exists(path_to_file):
data_file = {}
cp = configparser.ConfigParser()
cp.optionxform = str
cp.read(path_to_file)
if self.section in cp.sections():
for option in cp[self.section]:
value = list(eval(cp[self.section][option]))
data_file[option] = value
if self.data == data_file:
return True
return False
def close(self):
"""
This function closes the GUI.
"""
self.root.destroy()
class CreateConfigFile:
"""
This class represents the procedure of saving your config.
The data is given to the class in the __init__ function, which will be
used to create a config file.
Default is beispiel.cfg in the directory .../work/Aufgabe13_gui/
"""
def __init__(self, data):
"""
This function enables the usage of the config data.
Attributes
----------
data : dict of the given section
"""
self.data = data
def save_file(self, section, cfg_file):
"""
This function saves the data in a config file.
Therefore a filedioalog is opened to help you to get the right
directory.
Attributes
----------
section : input from user in the command line
cfg_file : path to the config file
Returns
-------
Bool : success in saving the data or not
"""
cp = configparser.ConfigParser()
cp.optionxform = str
file_name = filedialog.asksaveasfilename(initialdir=cfg_file,
title='Save config', filetypes=(('cfg-files', '*.cfg'),
('all files', '*.*')), defaultextension='.cfg',
initialfile='beispiel.cfg')
if file_name:
cp.add_section(section)
for key, value in self.data.items():
cp.set(section, key, str(tuple(value)))
with open(file_name, 'w') as file:
cp.write(file)
return True
return False
def main(command_line_input):
"""
This function calls and starts the GUI.
The function proves the existence of parameters.db and if the command line
is valid.
If this is the case then the GUI is called.
"""
current_dir = os.path.dirname(os.path.abspath(__file__))
path_to_parameter_file = os.path.join(current_dir, 'parameters.db')
if not os.path.exists(path_to_parameter_file):
print('File parameters.db cannot be found in directory {}'.format(
path_to_parameter_file))
sys.exit()
parent_dir = os.path.dirname(current_dir)
path_to_button_file = os.path.join(current_dir, 'info.png')
path_to_cfg_file = os.path.join(parent_dir, 'work', 'Aufgabe13_gui')
if len(command_line_input) == 2:
section_name = command_line_input[1]
elif len(command_line_input) > 2:
section_name = ' '.join(command_line_input[1:])
else:
print('Syntax from command line is not valid!')
sys.exit()
gui = Gui(section_name, path_to_parameter_file, path_to_button_file,
path_to_cfg_file)
gui.start()
if __name__ == "__main__":
main(sys.argv)
|
python
|
"""
Tests CoreML Imputer converter.
"""
import numpy as np
import unittest
try:
from sklearn.impute import SimpleImputer as Imputer
import sklearn.preprocessing
if not hasattr(sklearn.preprocessing, 'Imputer'):
# coremltools 3.1 does not work with scikit-learn 0.22
setattr(sklearn.preprocessing, 'Imputer', Imputer)
except ImportError:
from sklearn.preprocessing import Imputer
import sklearn.preprocessing
from onnxmltools.utils import dump_data_and_model
class TestCoreMLImputerConverter(unittest.TestCase):
def test_imputer(self):
try:
model = Imputer(missing_values='NaN', strategy='mean', axis=0)
except TypeError:
model = Imputer(missing_values=np.nan, strategy='mean')
model.axis = 0
data = [[1, 2], [np.nan, 3], [7, 6]]
model.fit(data)
from onnxmltools.convert.coreml.convert import convert
import coremltools # noqa
try:
model_coreml = coremltools.converters.sklearn.convert(model)
except ValueError as e:
if 'not supported' in str(e):
# Python 2.7 + scikit-learn 0.22
return
model_onnx = convert(model_coreml.get_spec())
self.assertTrue(model_onnx is not None)
dump_data_and_model(np.array(data, dtype=np.float32),
model, model_onnx, basename="CmlImputerMeanFloat32")
if __name__ == "__main__":
unittest.main()
|
python
|
from django.core.exceptions import ValidationError
def following_changed(sender, action, instance, *args, **kwargs):
"""Raise an error if admin tries to assign User to the Users follow list"""
# m2mchanged.connect specified in apps.py
following = instance.following.all()
creator = instance.user
if creator in following:
raise ValidationError ("can't like own post")
|
python
|
from arcutils.settings import PrefixedSettings
DEFAULTS = {
'default': {
# Notes:
# - There are two LDAP hosts, ldap-bulk and ldap-login; Elliot has
# said that ldap-login "has more servers in the pool" and he
# recommends using it over ldap-bulk (RT ticket #580686); note that
# despite the name, ldap-login does not actually require auth
# - It's possible to do unauthenticated requests over SSL
# - It's also possible to do authenticated requests over non-SSL
# - To use SSL, set ``use_ssl`` to ``true``
# - A project will need an LDAP service account if it does LDAP
# requests that return more than 2,000 results
# - The defaults here support a typical Odin autocomplete scenario
'host': 'ldap-login.oit.pdx.edu',
'port': None,
'use_ssl': False,
'search_base': 'ou=people,dc=pdx,dc=edu',
'username': None,
'password': None,
'strategy': 'SYNC',
'tls': {
'ca_certs_file': 'certifi:cacert.pem',
'validate': 'CERT_REQUIRED',
# This can be set to one of the protocol versions in the ssl module
# (e.g., "PROTOCOL_SSLv23"); if it's not set, the ldap3 library will
# choose a default value (which is "PROTOCOL_SSLv23" currently)
'version': None,
}
},
# Active Directory
# To connect to AD, a service account is required; request it from
# cis-windows.
'ad': {
'hosts': ['oitdcpsu01.psu.ds.pdx.edu', 'oitdcpsu02.psu.ds.pdx.edu'],
'use_ssl': True,
'strategy': 'SYNC',
'search_base': 'ou=people,dc=psu,dc=ds,dc=pdx,dc=edu',
# These are required for AD and must be in the project's local settings:
# 'username': None,
# 'password': None,
}
}
class Settings(PrefixedSettings):
def get(self, key, default=None, using='default'):
using_key = '{using}.{key}'.format(using=using, key=key)
return super().get(using_key, default)
settings = Settings('LDAP', DEFAULTS)
|
python
|
from django.contrib import admin
from ledger.accounts.models import EmailUser
from disturbance.components.proposals import models
from disturbance.components.proposals import forms
from disturbance.components.main.models import ActivityMatrix, SystemMaintenance, ApplicationType
#from disturbance.components.main.models import Activity, SubActivityLevel1, SubActivityLevel2, SubCategory
from reversion.admin import VersionAdmin
from django.conf.urls import url
from django.template.response import TemplateResponse
from django.http import HttpResponse, HttpResponseRedirect
from disturbance.utils import create_helppage_object
# Register your models here.
@admin.register(models.ProposalType)
class ProposalTypeAdmin(admin.ModelAdmin):
list_display = ['name','description', 'version']
ordering = ('name', '-version')
list_filter = ('name',)
#exclude=("site",)
class ProposalDocumentInline(admin.TabularInline):
model = models.ProposalDocument
extra = 0
@admin.register(models.AmendmentReason)
class AmendmentReasonAdmin(admin.ModelAdmin):
list_display = ['reason']
@admin.register(models.Proposal)
class ProposalAdmin(VersionAdmin):
inlines =[ProposalDocumentInline,]
@admin.register(models.ProposalAssessorGroup)
class ProposalAssessorGroupAdmin(admin.ModelAdmin):
list_display = ['name','default']
filter_horizontal = ('members',)
form = forms.ProposalAssessorGroupAdminForm
readonly_fields = ['default']
#readonly_fields = ['regions', 'activities']
def has_delete_permission(self, request, obj=None):
if obj and obj.default:
return False
return super(ProposalAssessorGroupAdmin, self).has_delete_permission(request, obj)
@admin.register(models.ProposalApproverGroup)
class ProposalApproverGroupAdmin(admin.ModelAdmin):
list_display = ['name','default']
filter_horizontal = ('members',)
form = forms.ProposalApproverGroupAdminForm
readonly_fields = ['default']
#readonly_fields = ['default', 'regions', 'activities']
def has_delete_permission(self, request, obj=None):
if obj and obj.default:
return False
return super(ProposalApproverGroupAdmin, self).has_delete_permission(request, obj)
@admin.register(models.ProposalStandardRequirement)
class ProposalStandardRequirementAdmin(admin.ModelAdmin):
list_display = ['code','text','obsolete']
@admin.register(models.HelpPage)
class HelpPageAdmin(admin.ModelAdmin):
list_display = ['application_type','help_type', 'description', 'version']
form = forms.DisturbanceHelpPageAdminForm
change_list_template = "disturbance/help_page_changelist.html"
ordering = ('application_type', 'help_type', '-version')
list_filter = ('application_type', 'help_type')
def get_urls(self):
urls = super(HelpPageAdmin, self).get_urls()
my_urls = [
url('create_disturbance_help/', self.admin_site.admin_view(self.create_disturbance_help)),
url('create_apiary_help/', self.admin_site.admin_view(self.create_apiary_help)),
url('create_disturbance_help_assessor/', self.admin_site.admin_view(self.create_disturbance_help_assessor)),
url('create_apiary_help_assessor/', self.admin_site.admin_view(self.create_apiary_help_assessor)),
]
return my_urls + urls
def create_disturbance_help(self, request):
create_helppage_object(application_type='Disturbance', help_type=models.HelpPage.HELP_TEXT_EXTERNAL)
return HttpResponseRedirect("../")
def create_apiary_help(self, request):
create_helppage_object(application_type='Apiary', help_type=models.HelpPage.HELP_TEXT_EXTERNAL)
return HttpResponseRedirect("../")
def create_disturbance_help_assessor(self, request):
create_helppage_object(application_type='Disturbance', help_type=models.HelpPage.HELP_TEXT_INTERNAL)
return HttpResponseRedirect("../")
def create_apiary_help_assessor(self, request):
create_helppage_object(application_type='Apiary', help_type=models.HelpPage.HELP_TEXT_INTERNAL)
return HttpResponseRedirect("../")
@admin.register(ActivityMatrix)
class ActivityMatrixAdmin(admin.ModelAdmin):
list_display = ['name', 'description', 'version']
ordering = ('name', '-version')
@admin.register(SystemMaintenance)
class SystemMaintenanceAdmin(admin.ModelAdmin):
list_display = ['name', 'description', 'start_date', 'end_date', 'duration']
ordering = ('start_date',)
readonly_fields = ('duration',)
form = forms.SystemMaintenanceAdminForm
@admin.register(ApplicationType)
class ApplicationTypeAdmin(admin.ModelAdmin):
list_display = ['name', 'order', 'visible']
ordering = ('order',)
|
python
|
# Copyright 2019 Belma Turkovic
# TU Delft Embedded and Networked Systems Group.
# NOTICE: THIS FILE IS BASED ON https://github.com/p4lang/behavioral-model/blob/master/mininet/p4_mininet.py, BUT WAS MODIFIED UNDER COMPLIANCE
# WITH THE APACHE 2.0 LICENCE FROM THE ORIGINAL WORK.
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mininet.net import Mininet
from mininet.node import Switch, Host
from mininet.log import setLogLevel, info
class P4Host(Host):
def config(self, **params):
r = super(Host, self).config(**params)
self.defaultIntf().rename("eth0")
for off in ["rx", "tx", "sg"]:
cmd = "/sbin/ethtool --offload eth0 %s off" % off
self.cmd(cmd)
# disable IPv6
self.cmd("sysctl -w net.ipv6.conf.all.disable_ipv6=1")
self.cmd("sysctl -w net.ipv6.conf.default.disable_ipv6=1")
self.cmd("sysctl -w net.ipv6.conf.lo.disable_ipv6=1")
return r
def describe(self):
print "**********"
print self.name
print "default interface: %s\t%s\t%s" %(
self.defaultIntf().name,
self.defaultIntf().IP(),
self.defaultIntf().MAC()
)
print "**********"
class P4Switch(Switch):
"""P4 virtual switch"""
device_id = 0
def __init__( self, name, sw_path = None, json_path = None, grpc_port = None,
thrift_port = None,
pcap_dump = False,
verbose = False,
device_id = None,
enable_debugger = False,
cpu_port = None,
**kwargs ):
Switch.__init__( self, name, **kwargs )
assert(sw_path)
self.sw_path = sw_path
self.json_path = json_path
self.verbose = verbose
self.pcap_dump = pcap_dump
self.enable_debugger = enable_debugger
self.cpu_port = cpu_port
if device_id is not None:
self.device_id = device_id
P4Switch.device_id = max(P4Switch.device_id, device_id)
else:
self.device_id = P4Switch.device_id
P4Switch.device_id += 1
self.nanomsg = "ipc:///tmp/bm-%d-log.ipc" % self.device_id
@classmethod
def setup( cls ):
pass
def start( self, controllers ):
"Start up a new P4 switch"
print "Starting P4 switch", self.name
args = [self.sw_path]
for port, intf in self.intfs.items():
if not intf.IP():
args.extend( ['-i', str(port) + "@" + intf.name] )
if self.pcap_dump:
args.append("--pcap")
args.extend( ['--device-id', str(self.device_id)] )
P4Switch.device_id += 1
notificationAddr = 'ipc:///tmp/bmv2-'+str(self.device_id)+'-notifications.ipc'
args.extend( ['--notifications-addr', str(notificationAddr)] )
if self.json_path:
args.append(self.json_path)
else:
args.append("--no-p4")
if self.enable_debugger:
args.append("--debugger")
args.append("-- --enable-swap")
logfile = 'p4s.%s.log' % self.name
print ' '.join(args)
self.cmd( ' '.join(args) + ' >' + logfile + ' 2>&1 &' )
print "switch has been started"
def stop( self ):
"Terminate IVS switch."
self.output.flush()
self.cmd( 'kill %' + self.sw_path )
self.cmd( 'wait' )
self.deleteIntfs()
def attach( self, intf ):
"Connect a data port"
assert(0)
def detach( self, intf ):
"Disconnect a data port"
assert(0)
class P4GrpcSwitch(Switch):
"""P4 virtual switch"""
device_id = 0
def __init__( self, name, sw_path = None, json_path = None,
thrift_port = None,
grpc_port = None,
pcap_dump = False,
verbose = False,
device_id = None,
enable_debugger = False,
cpu_port = None,
**kwargs ):
Switch.__init__( self, name, **kwargs )
assert(sw_path)
self.sw_path = sw_path
self.json_path = json_path
self.verbose = verbose
self.thrift_port = thrift_port
self.grpc_port = grpc_port
self.enable_debugger = enable_debugger
self.cpu_port = cpu_port
if device_id is not None:
self.device_id = device_id
P4Switch.device_id = max(P4Switch.device_id, device_id)
else:
self.device_id = P4Switch.device_id
P4Switch.device_id += 1
@classmethod
def setup( cls ):
pass
def start( self, controllers ):
"Start up a new P4 switch"
print "Starting P4 switch", self.name
args = [self.sw_path]
for port, intf in self.intfs.items():
if not intf.IP():
args.extend( ['-i', str(port) + "@" + intf.name] )
if self.thrift_port:
args.extend( ['--thrift-port', str(self.thrift_port)] )
args.extend( ['--device-id', str(self.device_id)] )
P4Switch.device_id += 1
if self.json_path:
args.append(self.json_path)
else:
args.append("--no-p4")
args.append("--log-flush --log-level trace --log-file %s.log" % self.name)
if self.grpc_port:
args.append("-- --grpc-server-addr 0.0.0.0:"+str(self.grpc_port)+" --cpu-port "+self.cpu_port)
print ' '.join(args)
self.cmd( ' '.join(args) + ' > %s.log 2>&1 &' % self.name)
print "switch has been started"
def stop( self ):
"Terminate IVS switch."
self.cmd( 'kill %' + self.sw_path )
self.cmd( 'wait' )
self.deleteIntfs()
def attach( self, intf ):
"Connect a data port"
assert(0)
def detach( self, intf ):
"Disconnect a data port"
assert(0)
|
python
|
from rl.agent import DiscreteAgent
from rl.environment import DiscreteEnvironment
from rl.experience_tuple import ExperienceTupleSerializer
class ExperimentConfiguration:
"""Configuration to run an Experiment."""
def __init__(self, agent: DiscreteAgent, environment: DiscreteEnvironment,
num_of_episodes: int, results_directory: str,
experience_tuple_serializer: ExperienceTupleSerializer):
self._validate_input(agent, environment, num_of_episodes,
results_directory, experience_tuple_serializer)
self.results_directory = results_directory
self.environment = environment
self.num_of_episodes = num_of_episodes
self.agent = agent
self.experience_tuple_serializer = experience_tuple_serializer
@staticmethod
def _validate_input(agent, environment, num_of_episodes,
results_directory, experience_tuple_serializer):
assert agent is not None
assert environment is not None
assert num_of_episodes is not None
assert results_directory is not None
assert experience_tuple_serializer is not None
|
python
|
"""
DREAprep.py - Allows for inputs to be passed to multiple components
"""
from openmdao.main.api import Component, Slot
from openmdao.lib.datatypes.api import Float
from MEflows import MEflows
class DREAprep(Component):
""" Passes input variables to output variables so they can be passed to multiple components. """
# Variables from MEflows variable tree
# -------------------------
flow_out = Slot(MEflows, iotype='out')
# Input variables
# -----------------------------
alt = Float(0.0, iotype='in', units='ft', desc='Altitude')
Mach = Float(0.0, iotype='in', desc='Freestream Mach number')
dTs = Float(0.0, iotype='in', units='degR', desc='Delta in total temperature from standard day')
PC = Float(100.0, iotype='in', desc='Engine power code')
deltaPt = Float(0.0, iotype='in', units='lbf/ft**2', desc='Change in primary total pressure from baseline')
deltaTt = Float(0.0, iotype='in', units='degR', desc='Change in primary total temperature from baseline')
deltaM = Float(0.0, iotype='in', desc='Change in primary Mach number from baseline')
gamma = Float(1.4, iotype='in', desc='Ratio of specific heats')
# Output variables
# -----------------------------
#MPRI = Float(iotype='out', desc='Design Mach number of the primary nozzle')
#PPRI = Float(iotype='out', units='lbf/ft**2', desc='Primary jet total pressure')
#PSEC = Float(iotype='out', units='lbf/ft**2', desc='Secondary jet total pressure')
#TPRI = Float(iotype='out', units='degR', desc='Primary jet total temperature')
#TSEC = Float(iotype='out', units='degR', desc='Secondary jet total temperature')
#Pstatic = Float(iotype='out', units='lbf/ft**2', desc='Freestream static pressure')
def __init__(self):
super(DREAprep,self).__init__()
self.add('flow_out', MEflows())
def execute(self):
""" Pass variables from inputs to outputs """
# Calculate freestream flow conditions based on regression of
# NPSS Ambient element and isentropic relationships
# (regressions valid to 15,000 ft altitude)
# --------------------------------------------------------------
self.flow_out.Pstatic = (0.00000000655102*self.alt**2-0.000524157*self.alt+14.6889)*144
Tstatic = (-0.00356559*self.alt+518.668)+self.dTs
self.flow_out.sec.Pt = self.flow_out.Pstatic*(1+(self.gamma-1)/2*self.Mach**2)**(self.gamma/(self.gamma-1))
self.flow_out.sec.Tt = Tstatic*(1+(self.gamma-1)/2*self.Mach**2)
self.flow_out.sec.Mach = 0.5
# Calculate primary flow conditions based on regression of an
# NPSS model provided by Jon Seidel
# (deltas can be applied to each regression)
# --------------------------------------------------------------
self.flow_out.pri.Pt = (1890.56567716063+1159.46218542673*self.Mach-0.0723607918787817*self.alt+20.1498381047601*self.PC+self.Mach**2*733.160417217374+self.Mach*self.alt*-0.0429594524792568+self.alt**2*0.0000013101980809133+self.Mach*self.PC*4.15624348452779+self.alt*self.PC*-0.00074491162440819+self.PC**2*0.0544282167543358)+self.deltaPt
self.flow_out.pri.Tt = (650.074441611223+48.9268880551154*self.Mach+0.000461490510019391*self.alt+4.80356170478831*self.PC+self.Mach**2*195.739109744029+self.Mach*self.alt*0.000946845170794506+self.alt**2*-0.0000004886019754503+self.Mach*self.PC*-0.820921508833817+self.alt*self.PC*-0.0000319239880056947+self.PC**2*-0.00610646669887009)+self.deltaTt
self.flow_out.pri.Mach = (0.0278253956300395+1.08027846320383*self.Mach-0.0000001216209649966*self.alt+0.0160466635733684*self.PC+self.Mach**2*-0.0746368646020014+self.Mach*self.alt*0.0000025326149842305+self.alt**2*-4.55813373388211e-11+self.Mach*self.PC*-0.00857252256331783+self.alt*self.PC*0.0000000077897757696+self.PC**2*-0.0000520093017579281)+self.deltaM
if __name__ == "__main__":
MyComp = DREAprep()
MyComp.dTs = 0.0
MyComp.APRI = 12.0
MyComp.AsAp = 1.75
MyComp.AR = 3.0
MyComp.XMAR = 0.95
MyComp.Mach = 0.0
MyComp.alt = 0.0
MyComp.PC = 90
MyComp.deltaPt = 0.0
MyComp.deltaTt = 0.0
MyComp.deltaM = 0.0
MyComp.run()
print '\n\n'
print '----------OUTPUTS----------'
print 'APRI_DREA : ',MyComp.APRI_DREA
print 'ASEC_DREA : ',MyComp.ASEC_DREA
print 'AEXIT_DREA : ',MyComp.AEXIT_DREA
print 'Width : ',MyComp.width
print 'Pstatic : ',MyComp.Pstatic
print 'PSEC : ',MyComp.PSEC
print 'TSEC : ',MyComp.TSEC
print 'PPRI : ',MyComp.PPRI
print 'TPRI : ',MyComp.TPRI
print 'MPRI : ',MyComp.MPRI
|
python
|
from multiprocessing import current_process, Value, Process
from time import sleep
from random import random
from reader_writer_lock import MultiprocessingFactory
from reader_writer_lock.MpFactory import Integer
def test(option):
test_name = ["No priority", "Read priority", "Write priority"]
print(f"Testing [{test_name[option]}]")
rw = MultiprocessingFactory(option)
r_lock = rw.get_read_lock()
w_lock = rw.get_write_lock()
val = Integer(0)
def read_val():
nonlocal val
with r_lock:
sleep(random())
print(f"Reading from {current_process().name}, val = {val.value()}")
def write_val():
nonlocal val
with w_lock:
sleep(random())
val.incre()
print(f"Writing from {current_process().name}, val = {val.value()}")
r = []
w = []
for i in range(10):
t_r = Process(target=read_val, name=f"read_{i}")
t_w = Process(target=write_val, name=f"write_{i}")
w.append(t_w)
r.append(t_r)
if option == 1:
# start write first, but then the read thread will be prioritized
t_w.start()
t_r.start()
elif option == 2:
# start read first, but then the write thread will be prioritized
t_r.start()
t_w.start()
else:
# random case
t_w.start()
t_r.start()
for ele in r:
ele.join()
for ele in w:
ele.join()
assert val.value() == 10
print("===============================")
if __name__ == "__main__":
print("Starting test for multiprocessing ....")
print("####################################")
test(0)
test(1)
test(2)
|
python
|
import numpy as np
class FCM:
"""Fuzzy C-means
Parameters
----------
n_clusters: int, optional (default=10)
The number of clusters to form as well as the number of
centroids to generate
max_iter: int, optional (default=150)
Hard limit on iterations within solver.
m: float, optional (default=2.0)
Exponent for the fuzzy partition matrix, specified as a
scalar greater than 1.0. This option controls the amount of
fuzzy overlap between clusters, with larger values indicating
a greater degree of overlap.
error: float, optional (default=1e-5)
Tolerance for stopping criterion.
random_state: int, optional (default=42)
Determines random number generation for centroid initialization. Use
an int to make the randomness deterministic.
Attributes
----------
n_samples: int
Number of examples in the data set
n_features: int
Number of features in samples of the data set
u: array, shape = [n_samples, n_clusters]
Fuzzy partition array, returned as an array with n_samples rows
and n_clusters columns. Element u[i,j] indicates the degree of
membership of the jth data point in the ith cluster. For a given
data point, the sum of the membership values for all clusters is one.
centers: array, shape = [n_class-1, n_SV]
Final cluster centers, returned as an array with n_clusters rows
containing the coordinates of each cluster center. The number of
columns in centers is equal to the dimensionality of the data being
clustered.
Methods
-------
fit(X)
fit the data
_predict(X)
use fitted model and output cluster memberships
predict(X)
use fitted model and output 1 cluster for each sample
References
----------
.. [1] `Pattern Recognition with Fuzzy Objective Function Algorithms
<https://doi.org/10.1007/978-1-4757-0450-1>`_
.. [2] `FCM: The fuzzy c-means clustering algorithm
<https://doi.org/10.1016/0098-3004(84)90020-7>`_
"""
def __init__(
self, n_clusters=10, max_iter=150, m=2, error=1e-5, random_state=42
):
assert m > 1
self.u, self.centers = None, None
self.n_clusters = n_clusters
self.max_iter = max_iter
self.m = m
self.error = error
self.rng = np.random.default_rng(random_state)
def fit(self, X):
"""Compute fuzzy C-means clustering.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training instances to cluster.
"""
self.n_samples = X.shape[0]
self.u = self.rng.uniform(size=(self.n_samples, self.n_clusters))
self.u = self.u / np.tile(self.u.sum(axis=1)
[np.newaxis].T, self.n_clusters)
for iteration in range(self.max_iter):
u_old = self.u.copy()
self.centers = FCM._next_centers(X, self.u, self.m)
self.u = self.__predict(X)
# Stopping rule
if np.linalg.norm(self.u - u_old) < self.error:
break
def __predict(self, X):
"""
Parameters
----------
X : array, shape = [n_samples, n_features]
New data to predict.
Returns
-------
u: array, shape = [n_samples, n_clusters]
Fuzzy partition array, returned as an array with n_samples rows
and n_clusters columns.
"""
temp = FCM._dist(X, self.centers) ** float(2 / (self.m - 1))
denominator_ = temp.reshape(
(X.shape[0], 1, -1)).repeat(temp.shape[-1], axis=1)
denominator_ = temp[:, :, np.newaxis] / denominator_
return 1 / denominator_.sum(2)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : array, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape = [n_samples,]
Index of the cluster each sample belongs to.
"""
X = np.expand_dims(X, axis=0) if len(X.shape) == 1 else X
return self.__predict(X).argmax(axis=-1)
@staticmethod
def _dist(A, B):
"""Compute the euclidean distance two matrices"""
return np.sqrt(np.einsum("ijk->ij", (A[:, None, :] - B) ** 2))
@staticmethod
def _next_centers(X, u, m):
"""Update cluster centers"""
um = u ** m
return (X.T @ um / np.sum(um, axis=0)).T
# partition coefficient (Equation 12a of https://doi.org/10.1016/0098-3004(84)90020-7)
@property
def partition_coefficient(self):
if hasattr(self, "u"):
return np.sum(self.u ** 2) / self.n_samples
else:
raise ReferenceError(
"You need to train the model first. You can use `.fit()` "
"method to this."
)
@property
def partition_entropy_coefficient(self):
if hasattr(self, "u"):
return -np.sum(self.u * np.log2(self.u)) / self.n_samples
else:
raise ReferenceError(
"You need to train the model first. You can use `.fit()` "
"method to this."
)
|
python
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 19-1-24 下午4:04
# @Author : Hubery
# @File : helper.py
# @Software: PyCharm
import json
import requests
from django.core.cache import cache
from HuberyBlog.settings import BASE_HEADERS
def page_cache(timeout):
def wrapper1(view_func):
def wrapper2(request):
key = 'PageCache-%s' % request.get_full_path()
response = cache.get(key)
if response is None:
response = view_func(request)
cache.set(key, response, timeout)
return response
return wrapper2
return wrapper1
def get_ip_address(ip):
"""
获取ip地址
:param ip:
:return:
"""
url = 'http://ip.taobao.com//service/getIpInfo.php?ip={}'.format(ip)
headers = BASE_HEADERS
try:
res = requests.get(url, headers=headers, timeout=5)
if res.status_code == 200:
res_dict_data = json.loads(res.text).get('data', '')
country = res_dict_data.get('country', '')
region = res_dict_data.get('region', '')
city = res_dict_data.get('city', '')
isp = res_dict_data.get('isp', '')
ip_address = '/'.join([country, region, city, isp])
return ip_address
else:
return ''
except Exception as e:
print('请求淘宝地址失败, 失败失败原因{}'.format(e))
return None
|
python
|
import re
#!/usr/bin/env python3
def getStarSystems():
starsfound = re.compile("\Stars Visited\": (\alnum+\.\alnum+)")
result = starsfound.findall(content)
stars = []
if result:
for r in result:
stars.append(r)
print(result)
return stars
|
python
|
import os
import sys
import tempfile
import subprocess
import contextlib
import json
import time
# https://stackoverflow.com/questions/6194499/pushd-through-os-system
@contextlib.contextmanager
def pushd(new_dir):
previous_dir = os.getcwd()
os.chdir(new_dir)
try:
yield
finally:
os.chdir(previous_dir)
heartbeat_file = sys.argv[1] + ".json"
with tempfile.TemporaryDirectory() as tmpdir, pushd(tmpdir):
subprocess.run(
["git", "clone", "https://github.com/regro/autotick-bot.git"],
check=True,
)
with pushd("autotick-bot"):
subprocess.run("git checkout heartbeats", check=True, shell=True)
heartbeat = int(time.time())
with open(heartbeat_file, "w") as fp:
json.dump({"heartbeat": heartbeat}, fp)
subprocess.run(
["git", "add", heartbeat_file],
check=True,
)
subprocess.run(
"git commit --allow-empty -am '[ci skip] heartbeat %s'" % sys.argv[1],
check=True,
shell=True,
)
subprocess.run(
"git remote set-url --push origin "
"https://${PASSWORD}@github.com/regro/autotick-bot.git",
shell=True,
check=True,
)
i = 0
pushed = False
while not pushed and i < 10:
try:
subprocess.run(
"git push",
check=True,
shell=True,
)
pushed = True
except subprocess.CalledProcessError:
subprocess.run("git pull --rebase", shell=True)
i += 1
if not pushed:
sys.exit(1)
|
python
|
import pandas as pd
import shutil
from shareplum import Site
from shareplum.site import Version
from shareplum import Office365
# SharePoint
user_name = '@eafit.edu.co'
password = ''
authcookie = Office365('https://eafit.sharepoint.com', username=user_name, password=password).GetCookies()
site = Site('https://eafit.sharepoint.com/sites/Proyectoinformedecoyunturaeconomica/', version=Version.v2016, authcookie=authcookie)
folder = site.Folder('Documentos Compartidos/General')
allfiles = folder.files
#De aqui tomo la relative_url (server relative url) del archivo que necesitamos, en este caso es el "último", ya que es el
#primero que se subió al sharepoint
#Esto solo lo tengo que usar para la identificar la relative url
vistazo_file_info = allfiles[-1]
relative_url = vistazo_file_info['ServerRelativeUrl']
nombre_actual_archivo = vistazo_file_info['Name']
data = folder.get_file('NData vistazo 1.09.2020 .xlsx') #O poner aqui nombre_actual_archivo
with open('holu.xlsx', 'wb') as f:
f.write(data)
f.close()
relative_url
nombre_actual_archivo
# Organizing files
# Preprocessing: Production
# Preprocessing: Prices
# Preprocessing: Labor
# Preprocessing: External sector
# Preprocessing: Fiscal
# Preprocessing: Finance
# Export data
|
python
|
# # Copyright (c) 2019, Corey Smith
# # Distributed under the MIT License.
# # See LICENCE file for full terms.
# """
# Tests for the data examples.
# """
# # import sys
# import inspect
# # import os
# from pathlib import Path
# import numpy as np
# # import paramiko
# # import pytest
# # import sqlalchemy
# # sys.path.append(("../"))
# # from ingestion.sql import SQLDatabase, psql_engine
# # from ingestion.helper_functions import ssh_tunnel
# from instacart_dataset import create_adjancency_matrix
# def test_instacart_definition(postgres_db):
# postgres_db.define_instacart_db()
# ignored_attributes = [
# "conn",
# "engine",
# "tables",
# "db_string",
# "metadata",
# "table_names",
# ]
# attributes = inspect.getmembers(postgres_db, lambda x: not (inspect.isroutine(x)))
# instacart_table_attributes = [
# a[0]
# for a in attributes
# if not (
# a[0].startswith("__") and a[0].endswith("__") or a[0] in ignored_attributes
# )
# ]
# postgres_db.close()
# if not np.all([x in instacart_table_attributes for x in postgres_db.table_names]):
# raise AssertionError()
# def test_adjancency_matrix_creation(postgres_db, test_data_dir=""):
# num_orders = 10
# create_adjancency_matrix(
# postgres_db, save_folder=test_data_dir, num_orders=num_orders
# )
# full_info_filename = Path(
# test_data_dir + "full_info_{}_prior_orders.csv".format(num_orders)
# )
# weighted_matrix_filename = Path(
# test_data_dir + "weighted_adjacency_matrix_{}_orders.csv".format(num_orders)
# )
# if full_info_filename.exists():
# full_info_filename.unlink()
# else:
# raise AssertionError()
# if weighted_matrix_filename.exists():
# weighted_matrix_filename.unlink()
# else:
# raise AssertionError()
|
python
|
"""
Helios Signals
Effectively callbacks that other apps can wait and be notified about
"""
import django.dispatch
# when an election is created
election_created = django.dispatch.Signal(providing_args=["election"])
# when a vote is cast
vote_cast = django.dispatch.Signal(
providing_args=["user", "voter", "election", "cast_vote"]
)
# when an election is tallied
election_tallied = django.dispatch.Signal(providing_args=["election"])
|
python
|
from setuptools import setup, find_packages
import sys
import os
import re
org = 'PEtab-dev'
repo = 'petab_select'
def read(fname):
"""Read a file."""
return open(fname).read()
def absolute_links(txt):
"""Replace relative petab github links by absolute links."""
raw_base = \
f"(https://raw.githubusercontent.com/{org}/{repo}/main/"
embedded_base = \
f"(https://github.com/{org}/{repo}/tree/main/"
# iterate over links
for var in re.findall(r'\[.*?\]\((?!http).*?\)', txt):
if re.match(r'.*?.(png|svg)\)', var):
# link to raw file
rep = var.replace("(", raw_base)
else:
# link to github embedded file
rep = var.replace("(", embedded_base)
txt = txt.replace(var, rep)
return txt
# 3.7.1 for NumPy
minimum_python_version = '3.7.1'
if sys.version_info < tuple(map(int, minimum_python_version.split('.'))):
sys.exit(f'PEtab Select requires Python >= {minimum_python_version}')
# read version from file
__version__ = ''
version_file = os.path.join('petab_select', 'version.py')
# sets __version__
exec(read(version_file)) # pylint: disable=W0122 # nosec
ENTRY_POINTS = {
'console_scripts': [
'petab_select = petab_select.cli:cli',
]
}
# project metadata
# noinspection PyUnresolvedReferences
setup(
name='petab_select',
version=__version__,
description='PEtab Select: an extension to PEtab for model selection.',
long_description=absolute_links(read('README.md')),
long_description_content_type="text/markdown",
#author='The PEtab Select developers',
#author_email='[email protected]',
url=f'https://github.com/{org}/{repo}',
packages=find_packages(exclude=['doc*', 'test*']),
install_requires=[
# TODO fix versions
'more-itertools',
'numpy',
'pyyaml',
#'numpy>=1.15.1',
#'pandas>=1.2.0',
#'matplotlib>=2.2.3',
#'python-libsbml>=5.17.0',
#'sympy',
#'colorama',
#'seaborn',
#'pyyaml',
#'jsonschema',
# required for CLI
'click',
'dill',
],
include_package_data=True,
tests_require=[
#'flake8',
'pytest',
#'python-libcombine',
'amici',
'fides',
'pypesto',
],
python_requires=f'>={minimum_python_version}',
entry_points=ENTRY_POINTS,
extras_require={
#'reports': ['Jinja2'],
#'combine': ['python-libcombine>=0.2.6'],
#'doc': [
# 'sphinx>=3.5.3',
# 'sphinxcontrib-napoleon>=0.7',
# 'sphinx-markdown-tables>=0.0.15',
# 'sphinx-rtd-theme>=0.5.1',
# 'recommonmark>=0.7.1',
# 'nbsphinx>=0.8.2',
# 'm2r>=0.2.1',
# 'ipython>=7.21.0',
#]
}
)
|
python
|
REGISTRY = {}
from .episode_runner import EpisodeRunner
REGISTRY["episode"] = EpisodeRunner
from .parallel_runner import ParallelRunner
REGISTRY["parallel"] = ParallelRunner
from .episode_cross_runner import EpisodeCrossRunner
REGISTRY["cross"] = EpisodeCrossRunner
|
python
|
import numpy as np
import pandas as pd
from sklearn.metrics.pairwise import euclidean_distances
from scipy.io import loadmat
from matplotlib import pyplot as plt
from sklearn.metrics import normalized_mutual_info_score
from sklearn.metrics import adjusted_rand_score
def autoselect_dc(max_id, max_dis, min_dis, distances):
dc = (max_dis + min_dis) / 2
while True:
nneighs = sum([1 for v in distances if v < dc]) / max_id ** 2
if nneighs >= 0.01 and nneighs <= 0.02:
break
# binary search
if nneighs < 0.01:
min_dis = dc
else:
max_dis = dc
dc = (max_dis + min_dis) / 2
if max_dis - min_dis < 0.0001:
break
return dc
class DPC(object):
def __init__(self,center_rate=0.6,noise_rate=0.1,dc=None,auto_filt=False):
self.distance = None
self.dc = None
self.rho = None
self.sigma = None
self.cluster = None
self.center = None
self.tup = None
self.center_rate = center_rate
self.noise_rate = noise_rate
self.auto_filt = auto_filt
if dc != None:
self.dc = dc
def info_cal(self,X):
distance = euclidean_distances(X)
if self.dc == None:
self.dc = autoselect_dc(distance.shape[0],np.max(distance),np.min(distance),distance.flatten())
rdistance = distance - self.dc
rho = np.array([rdistance[rdistance[idx] < 0].shape[0] for idx in range(rdistance.shape[0])])
sigma = np.array([np.min(distance[idx,(rho>value)]) if value < np.max(rho) else np.max(distance[idx]) for idx,value in enumerate(rho)])
map_idx = np.array([np.argwhere(distance[idx] == np.min(distance[idx,(rho>value)])).flatten() if value < np.max(rho) else np.argmax(distance[idx]).flatten() for idx,value in enumerate(rho)])
map_idx = np.array([arr[0] for arr in map_idx])
idx = np.argsort(rho)
tup = np.array([i for i in zip(rho[idx],sigma[idx],idx)])
self.distance, self.dc, self.rho, self.sigma = distance, self.dc, rho, sigma
return tup,map_idx
def remove_noise(self,cluster,tup,map_idx):
y_pred = np.array([cluster[idx] for idx in range(map_idx.shape[0])])
distance = self.distance
rho = self.rho
group = {}
for i in list(set(y_pred)):
group[i] = np.argwhere(y_pred == i).flatten()
for label in group.keys():
max_rho = -1
for i in group[label]:
for idx,dis in enumerate(distance[i]):
if idx not in group[label] and distance[i,idx] < self.dc and max_rho < rho[i]:
max_rho = rho[idx]
break
if max_rho != -1:
for i in group[label]:
if self.rho[i] < max_rho:
cluster[i] = -1
return cluster
def fit_transform(self,X):
tup,map_idx = self.info_cal(X)
self.tup = tup
origin_center_idx = tup[(tup[:,0]>np.max(tup[:,0])*self.center_rate) & (tup[:,1]>np.max(tup[:,1])*self.center_rate)][:,2].astype("int64")
self.center = origin_center_idx
cluster = {}
for idx,center in enumerate(origin_center_idx):
cluster[center] = idx
if not self.auto_filt:
origin_noise_idx = tup[(tup[:,0]<np.max(tup[:,0])*self.noise_rate)][:,2].astype("int64")
for center in origin_noise_idx:
cluster[center] = -1
for density,distance,idx in tup[::-1]:
idx = int(idx)
if idx in cluster.keys():
continue
if map_idx[idx] in cluster.keys():
cluster[idx] = cluster[map_idx[idx]]
else:
cluster[idx] = -1
if self.auto_filt:
cluster = self.remove_noise(cluster,tup,map_idx)
self.cluster = cluster
y_pred = np.array([cluster[idx] for idx in range(X.shape[0])])
return y_pred
if __name__ == '__main__':
data = loadmat(r"D:\jupyter root\data\multi-shape.mat")
X = data['X']
y = data['y'][0]
# plt.plot(X[y == 0,0],X[y == 0,1],'.',color='red')
# plt.plot(X[y == 1,0],X[y == 1,1],'.',color='blue')
# plt.show()
dpc = DPC(0.55,0.1)
y_pred = dpc.fit_transform(X)
cluster = dpc.cluster
# dpc = DPC(center_rate=0.5,auto_filt=True)
# y_pred = dpc.fit_transform(X)
# cluster = dpc.cluster
for i in set(y_pred):
plt.plot(X[y_pred == i,0],X[y_pred == i,1],'.',label=i)
plt.legend(loc='best')
plt.show()
# tup = dpc.tup
# origin_center_idx = dpc.center
# print(origin_center_idx)
# plt.plot(tup[:,0],tup[:,1],'.',color='blue')
# plt.show()
# plt.plot(X[:,0],X[:,1],'.',color='red')
# for i in origin_center_idx:
# plt.plot(X[i,0],X[i,1],'.',color='blue')
# plt.show()
# for i in origin_center_idx:
# print(X[i])
nmi = normalized_mutual_info_score(y_pred,y)
print(round(nmi,4))
ari = adjusted_rand_score(y_pred,y)
print(round(ari,4))
|
python
|
from __future__ import print_function
import math, types
import numpy as N
import matplotlib.pyplot as P
def plotres(psr,deleted=False,group=None,**kwargs):
"""Plot residuals, compute unweighted rms residual."""
res, t, errs = psr.residuals(), psr.toas(), psr.toaerrs
if (not deleted) and N.any(psr.deleted != 0):
res, t, errs = res[psr.deleted == 0], t[psr.deleted == 0], errs[psr.deleted == 0]
print("Plotting {0}/{1} nondeleted points.".format(len(res),psr.nobs))
meanres = math.sqrt(N.mean(res**2)) / 1e-6
if group is None:
i = N.argsort(t)
P.errorbar(t[i],res[i]/1e-6,yerr=errs[i],fmt='x',**kwargs)
else:
if (not deleted) and N.any(psr.deleted):
flagmask = psr.flagvals(group)[~psr.deleted]
else:
flagmask = psr.flagvals(group)
unique = list(set(flagmask))
for flagval in unique:
f = (flagmask == flagval)
flagres, flagt, flagerrs = res[f], t[f], errs[f]
i = N.argsort(flagt)
P.errorbar(flagt[i],flagres[i]/1e-6,yerr=flagerrs[i],fmt='x',**kwargs)
P.legend(unique,numpoints=1,bbox_to_anchor=(1.1,1.1))
P.xlabel('MJD'); P.ylabel('res [us]')
P.title("{0} - rms res = {1:.2f} us".format(psr.name,meanres))
# select parameters by name or number, omit non-existing
def _select(p,pars,select):
sel = []
for s in select:
if isinstance(s,str) and s in pars:
sel.append(pars.index(s))
elif isinstance(s,int) and s < p:
sel.append(s)
return len(sel), sel
def plothist(data,pars=[],offsets=[],norms=[],select=[],weights={},ranges={},labels={},skip=[],append=False,
bins=50,color='k',linestyle=None,linewidth=1,title=None):
if hasattr(data,'data') and not isinstance(data,N.ndarray):
# parse a multinestdata structure
if not pars and hasattr(data,'parnames'):
pars = data.parnames
data = data.data
p = data.shape[-1]
if not pars:
pars = map('p{0}'.format,range(p))
if offsets:
data = data.copy()
if isinstance(offsets,dict):
for i,par in enumerate(pars):
if par in offsets:
data[:,i] = data[:,i] - offsets[par]
else:
if len(offsets) < p:
offsets = offsets + [0.0] * (p - len(offsets))
data = data - N.array(offsets)
if norms:
if len(norms) < p:
norms = norms + [1.0] * (p - len(norms))
data = data / norms
if select:
p, sel = _select(p,pars,select)
data, pars = data[:,sel], [pars[s] for s in sel]
if weights:
weight = 1
for i,par in enumerate(pars):
if par in weights:
if isinstance(weights[par],types.FunctionType):
weight = weight * N.vectorize(weights[par])(data[:,i])
else:
weight = weight * weights[par]
else:
weight = None
# only need lines for multiple plots
# lines = ['dotted','dashdot','dashed','solid']
if not append:
P.figure(figsize=(16*(min(p,4)/4.0),3*(int((p-1)/4)+1)))
for i in range(p):
# figure out how big the multiplot needs to be
if type(append) == int: # need this since isinstance(False,int) == True
q = append
elif isinstance(append,(list,tuple)):
q = len(append)
else:
q = p
# increment subplot index if we're skipping
sp = i + 1
for s in skip:
if i >= s:
sp = sp + 1
# if we're given the actual parnames of an existing plot, figure out where we fall
if isinstance(append,(list,tuple)):
try:
sp = append.index(pars[i]) + 1
except ValueError:
continue
P.subplot(int((q-1)/4)+1,min(q,4),sp)
if append:
P.hold(True)
if pars[i] in ranges:
dx = ranges[pars[i]]
P.hist(data[:,i],bins=int(bins * (N.max(data[:,i]) - N.min(data[:,i])) / (dx[1] - dx[0])),
weights=weight,normed=True,histtype='step',color=color,linestyle=linestyle,linewidth=linewidth)
P.xlim(dx)
else:
P.hist(data[:,i],bins=bins,
weights=weight,normed=True,histtype='step',color=color,linestyle=linestyle,linewidth=linewidth)
P.xlabel(labels[pars[i]] if pars[i] in labels else pars[i])
# P.ticklabel_format(style='sci',axis='both',scilimits=(-3,4),useoffset='True')
P.locator_params(axis='both',nbins=6)
P.minorticks_on()
fx = P.ScalarFormatter(useOffset=True,useMathText=True)
fx.set_powerlimits((-3,4)); fx.set_scientific(True)
fy = P.ScalarFormatter(useOffset=True,useMathText=True)
fy.set_powerlimits((-3,4)); fy.set_scientific(True)
P.gca().xaxis.set_major_formatter(fx)
P.gca().yaxis.set_major_formatter(fy)
P.hold(False)
if title and not append:
P.suptitle(title)
P.tight_layout()
# to do: should fix this histogram so that the contours are correct
# even for restricted ranges...
def _plotonehist2(x,y,parx,pary,smooth=False,colormap=True,ranges={},labels={},bins=50,levels=3,weights=None,
color='k',linewidth=1):
hold = P.ishold()
hrange = [ranges[parx] if parx in ranges else [N.min(x),N.max(x)],
ranges[pary] if pary in ranges else [N.min(y),N.max(y)]]
[h,xs,ys] = N.histogram2d(x,y,bins=bins,normed=True,range=hrange,weights=weights)
if colormap:
P.contourf(0.5*(xs[1:]+xs[:-1]),0.5*(ys[1:]+ys[:-1]),h.T,cmap=P.get_cmap('YlOrBr')); P.hold(True)
H,tmp1,tmp2 = N.histogram2d(x,y,bins=bins,range=hrange,weights=weights)
if smooth:
# only need scipy if we're smoothing
import scipy.ndimage.filters as SNF
H = SNF.gaussian_filter(H,sigma=1.5 if smooth is True else smooth)
if weights is None:
H = H / len(x)
else:
H = H / N.sum(H) # I think this is right...
Hflat = -N.sort(-H.flatten()) # sort highest to lowest
cumprob = N.cumsum(Hflat) # sum cumulative probability
levels = [N.interp(level,cumprob,Hflat) for level in [0.6826,0.9547,0.9973][:levels]]
xs = N.linspace(hrange[0][0],hrange[0][1],bins)
ys = N.linspace(hrange[1][0],hrange[1][1],bins)
P.contour(xs,ys,H.T,levels,
colors=color,linestyles=['-','--','-.'][:len(levels)],linewidths=linewidth)
P.hold(hold)
if parx in ranges:
P.xlim(ranges[parx])
if pary in ranges:
P.ylim(ranges[pary])
P.xlabel(labels[parx] if parx in labels else parx)
P.ylabel(labels[pary] if pary in labels else pary)
P.locator_params(axis='both',nbins=6)
P.minorticks_on()
fx = P.ScalarFormatter(useOffset=True,useMathText=True)
fx.set_powerlimits((-3,4)); fx.set_scientific(True)
fy = P.ScalarFormatter(useOffset=True,useMathText=True)
fy.set_powerlimits((-3,4)); fy.set_scientific(True)
P.gca().xaxis.set_major_formatter(fx)
P.gca().yaxis.set_major_formatter(fy)
def plothist2(data,pars=[],offsets=[],smooth=False,colormap=True,select=[],ranges={},labels={},bins=50,levels=3,weights=None,cuts=None,
diagonal=True,title=None,color='k',linewidth=1,append=False):
if hasattr(data,'data') and not isinstance(data,N.ndarray):
# parse a multinestdata structure
if not pars and hasattr(data,'parnames'):
pars = data.parnames
data = data.data
m = data.shape[-1]
if not pars:
pars = map('p{0}'.format,range(m))
if offsets:
if len(offsets) < m:
offsets = offsets + [0.0] * (m - len(offsets))
data = data - N.array(offsets)
if cuts:
for i,par in enumerate(pars):
if par in cuts:
data = data[data[:,i] > cuts[par][0],:]
data = data[data[:,i] < cuts[par][1],:]
if weights:
weight = 1
for i,par in enumerate(pars):
if par in weights:
if isinstance(weights[par],types.FunctionType):
weight = weight * N.vectorize(weights[par])(data[:,i])
else:
weight = weight * weights[par]
else:
weight = None
if select:
m, sel = _select(m,pars,select)
data, pars = data[:,sel], [pars[s] for s in sel]
if not append:
fs = min((m if diagonal else m-1)*4,16)
P.figure(figsize=(fs,fs))
data = data.T
if diagonal:
for i in range(m):
if not append:
P.subplot(m,m,i*(m+1)+1)
if pars[i] in ranges:
dx = ranges[pars[i]]
P.hist(data[i],bins=int(50 * (N.max(data[i]) - N.min(data[i])) / (dx[1] - dx[0])),
weights=weight,normed=True,histtype='step',color='k')
P.xlim(dx)
else:
P.hist(data[i],bins=50,weights=weight,normed=True,histtype='step',color='k')
P.xlabel(labels[pars[i]] if pars[i] in labels else pars[i])
P.ticklabel_format(style='sci',axis='both',scilimits=(-2,2),useoffset='True')
# P.tick_params(labelsize=12)
for j in range(0,i):
if not append:
P.subplot(m,m,i*m+j+1)
_plotonehist2(data[j],data[i],pars[j],pars[i],smooth,colormap,ranges,labels,bins,levels,weights=weight,
color=color,linewidth=linewidth)
else:
for i in range(m-1):
for j in range(i+1,m):
if not append:
P.subplot(m-1,m-1,(m-1)*i+j)
_plotonehist2(data[j],data[i],pars[j],pars[i],smooth,colormap,ranges,labels,bins,levels,weights=weight,
color=color,linewidth=linewidth)
P.tight_layout()
if title and not append:
P.suptitle(title)
elif title:
P.title(title)
# if save:
# P.savefig('figs/{0}-{1}-2.png'.format(psr,flms[0]))
def plotgwsrc(gwb):
"""
Plot a GWB source population as a mollweide projection.
"""
theta, phi, omega, polarization = gwb.gw_dist()
rho = phi-N.pi
eta = 0.5*N.pi - theta
# I don't know how to get rid of the RuntimeWarning -- RvH, Oct 10, 2014:
# /Users/vhaaster/env/dev/lib/python2.7/site-packages/matplotlib/projections/geo.py:485:
# RuntimeWarning: invalid value encountered in arcsin theta = np.arcsin(y / np.sqrt(2))
#old_settings = N.seterr(invalid='ignore')
P.title("GWB source population")
ax = P.axes(projection='mollweide')
foo = P.scatter(rho, eta, marker='.', s=1)
#bar = N.seterr(**old_settings)
return foo
|
python
|
# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
'''Tests for mghformat reading writing'''
from __future__ import with_statement
import os
from io import StringIO, BytesIO
import numpy as np
from .. import load, save, MGHImage
from ..mghformat import MGHError
from ...tmpdirs import InTemporaryDirectory
from ...py3k import unicode
from ...fileholders import FileHolder
from ...testing import data_path
from numpy.testing import assert_equal, assert_array_equal, \
assert_array_almost_equal, assert_almost_equal, assert_raises
# sample voxel to ras matrix (mri_info --vox2ras)
v2r = np.array([[1, 2, 3, -13], [2, 3, 1, -11.5],
[3, 1, 2, -11.5], [0, 0, 0, 1]], dtype=np.float32)
# sample voxel to ras - tkr matrix (mri_info --vox2ras-tkr)
v2rtkr = np.array([[-1.0, 0.0, 0.0, 1.5],
[0.0, 0.0, 1.0, -2.5],
[0.0, -1.0, 0.0, 2.0],
[0.0, 0.0, 0.0, 1.0]], dtype=np.float32)
def test_read_mgh():
# test.mgz was generated by the following command
# mri_volsynth --dim 3 4 5 2 --vol test.mgz
# --cdircos 1 2 3 --rdircos 2 3 1 --sdircos 3 1 2
# mri_volsynth is a FreeSurfer command
mgz_path = os.path.join(data_path, 'test.mgz')
mgz = load(mgz_path)
# header
h = mgz.get_header()
assert_equal(h['version'], 1)
assert_equal(h['type'], 3)
assert_equal(h['dof'], 0)
assert_equal(h['goodRASFlag'], 1)
assert_array_equal(h['dims'], [3, 4, 5, 2])
assert_array_almost_equal(h['mrparms'], [2.0, 0.0, 0.0, 0.0])
assert_array_almost_equal(h.get_zooms(), 1)
assert_array_almost_equal(h.get_vox2ras(), v2r)
assert_array_almost_equal(h.get_vox2ras_tkr(), v2rtkr)
# data. will be different for your own mri_volsynth invocation
v = mgz.get_data()
assert_almost_equal(v[1, 2, 3, 0], -0.3047, 4)
assert_almost_equal(v[1, 2, 3, 1], 0.0018, 4)
def test_write_mgh():
# write our data to a tmp file
v = np.arange(120)
v = v.reshape((5, 4, 3, 2)).astype(np.float32)
# form a MGHImage object using data and vox2ras matrix
img = MGHImage(v, v2r)
with InTemporaryDirectory():
save(img, 'tmpsave.mgz')
# read from the tmp file and see if it checks out
mgz = load('tmpsave.mgz')
h = mgz.get_header()
dat = mgz.get_data()
# Delete loaded image to allow file deletion by windows
del mgz
# header
assert_equal(h['version'], 1)
assert_equal(h['type'], 3)
assert_equal(h['dof'], 0)
assert_equal(h['goodRASFlag'], 1)
assert_array_equal(h['dims'], [5, 4, 3, 2])
assert_array_almost_equal(h['mrparms'], [0.0, 0.0, 0.0, 0.0])
assert_array_almost_equal(h.get_vox2ras(), v2r)
# data
assert_almost_equal(dat, v, 7)
def test_write_noaffine_mgh():
# now just save the image without the vox2ras transform
# and see if it uses the default values to save
v = np.ones((7, 13, 3, 22)).astype(np.uint8)
# form a MGHImage object using data
# and the default affine matrix (Note the "None")
img = MGHImage(v, None)
with InTemporaryDirectory():
save(img, 'tmpsave.mgz')
# read from the tmp file and see if it checks out
mgz = load('tmpsave.mgz')
h = mgz.get_header()
# Delete loaded image to allow file deletion by windows
del mgz
# header
assert_equal(h['version'], 1)
assert_equal(h['type'], 0) # uint8 for mgh
assert_equal(h['dof'], 0)
assert_equal(h['goodRASFlag'], 1)
assert_array_equal(h['dims'], [7, 13, 3, 22])
assert_array_almost_equal(h['mrparms'], [0.0, 0.0, 0.0, 0.0])
# important part -- whether default affine info is stored
ex_mdc = np.array([[-1, 0, 0],
[0, 0, -1],
[0, 1, 0]], dtype=np.float32)
assert_array_almost_equal(h['Mdc'], ex_mdc)
ex_pxyzc = np.array([0, 0, 0], dtype=np.float32)
assert_array_almost_equal(h['Pxyz_c'], ex_pxyzc)
def bad_dtype_mgh():
''' This function raises an MGHError exception because
uint16 is not a valid MGH datatype.
'''
# try to write an unsigned short and make sure it
# raises MGHError
v = np.ones((7, 13, 3, 22)).astype(np.uint16)
# form a MGHImage object using data
# and the default affine matrix (Note the "None")
img = MGHImage(v, None)
with TemporaryDirectory() as tmpdir:
save(img, os.path.join(tmpdir, 'tmpsave.mgz'))
# read from the tmp file and see if it checks out
mgz = load(os.path.join(tmpdir, 'tmpsave.mgz'))
def test_bad_dtype_mgh():
# Now test the above function
assert_raises(MGHError, bad_dtype_mgh)
def test_filename_exts():
# Test acceptable filename extensions
v = np.ones((7, 13, 3, 22)).astype(np.uint8)
# form a MGHImage object using data
# and the default affine matrix (Note the "None")
img = MGHImage(v, None)
# Check if these extensions allow round trip
for ext in ('.mgh', '.mgz', '.mgh.gz'):
with InTemporaryDirectory():
fname = 'tmpname' + ext
save(img, fname)
# read from the tmp file and see if it checks out
img_back = load(fname)
assert_array_equal(img_back.get_data(), v)
del img_back
def _mgh_rt(img, fobj):
file_map = {'image': FileHolder(fileobj=fobj)}
img.to_file_map(file_map)
return MGHImage.from_file_map(file_map)
def test_header_updating():
# Don't update the header information if the affine doesn't change.
# Luckily the test.mgz dataset had a bad set of cosine vectors, so these
# will be changed if the affine gets updated
mgz_path = os.path.join(data_path, 'test.mgz')
mgz = load(mgz_path)
hdr = mgz.get_header()
# Test against mri_info output
exp_aff = np.loadtxt(StringIO(unicode("""
1.0000 2.0000 3.0000 -13.0000
2.0000 3.0000 1.0000 -11.5000
3.0000 1.0000 2.0000 -11.5000
0.0000 0.0000 0.0000 1.0000""")))
assert_almost_equal(mgz.get_affine(), exp_aff, 6)
assert_almost_equal(hdr.get_affine(), exp_aff, 6)
# Test that initial wonky header elements have not changed
assert_equal(hdr['delta'], 1)
assert_almost_equal(hdr['Mdc'], exp_aff[:3, :3].T)
# Save, reload, same thing
img_fobj = BytesIO()
mgz2 = _mgh_rt(mgz, img_fobj)
hdr2 = mgz2.get_header()
assert_almost_equal(hdr2.get_affine(), exp_aff, 6)
assert_equal(hdr2['delta'], 1)
# Change affine, change underlying header info
exp_aff_d = exp_aff.copy()
exp_aff_d[0, -1] = -14
# This will (probably) become part of the official API
mgz2._affine[:] = exp_aff_d
mgz2.update_header()
assert_almost_equal(hdr2.get_affine(), exp_aff_d, 6)
RZS = exp_aff_d[:3, :3]
assert_almost_equal(hdr2['delta'], np.sqrt(np.sum(RZS ** 2, axis=0)))
assert_almost_equal(hdr2['Mdc'], (RZS / hdr2['delta']).T)
def test_cosine_order():
# Test we are interpreting the cosine order right
data = np.arange(60).reshape((3, 4, 5)).astype(np.int32)
aff = np.diag([2., 3, 4, 1])
aff[0] = [2, 1, 0, 10]
img = MGHImage(data, aff)
assert_almost_equal(img.get_affine(), aff, 6)
img_fobj = BytesIO()
img2 = _mgh_rt(img, img_fobj)
hdr2 = img2.get_header()
RZS = aff[:3, :3]
zooms = np.sqrt(np.sum(RZS ** 2, axis=0))
assert_almost_equal(hdr2['Mdc'], (RZS / zooms).T)
assert_almost_equal(hdr2['delta'], zooms)
|
python
|
import pathlib
import shutil
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--supplementary_pdf", type=str)
args = parser.parse_args()
supplementary = pathlib.Path(args.supplementary_pdf) if args.supplementary_pdf else None
assert supplementary.exists() and supplementary.is_file()
root = (pathlib.Path(__file__).parent / "..")
ignore_file_extensions = ["png", "gif"]
forbidden_words = ["jb", "cordonnier", "loukas", "jaggi", "epfml", "epfl", "mlbench", "epfml", "github", "arxiv", "eprint"]
filter_files = [".git", "tools", "__pycache__", ".DS_Store", ".zip"]
def copy_anonymized(source_file, target_file):
with open(source_file, "r") as inf, open(target_file, "w") as outf:
for line in inf:
if any(forbidden.lower() in line.lower() for forbidden in forbidden_words):
outf.write("#- anonymized\n")
else:
outf.write(line)
try:
tmp_directory = root / "tmp"
if tmp_directory.exists():
shutil.rmtree(tmp_directory)
tmp_directory.mkdir()
files = [f for f in root.glob("**/*") if not any(s in str(f) for s in filter_files)]
print(root, files)
for source_file in files:
target_file = tmp_directory / str(source_file)[3:]
print(f"{source_file} -> {target_file}")
if source_file.is_dir():
continue
target_file.parent.mkdir(parents=True, exist_ok=True)
# Simply copy if not a text file
if any(str(source_file).endswith(ext) for ext in ignore_file_extensions):
shutil.copy(source_file, target_file)
# Otherwise anonymise the file line by line
else:
copy_anonymized(source_file, target_file)
# If we provide a PDF, we move the content of tmp/ into tmp/code/ and add the PDF
if supplementary:
tmp_directory.rename(root / "code")
tmp_directory.mkdir()
(root / "code").rename(tmp_directory / "code")
shutil.copy(supplementary, tmp_directory / "supplementary.pdf")
print(f"Added PDF file '{supplementary}'.")
# Make the zip archive.
zip_file = (root / "supplementary").absolute().resolve()
shutil.make_archive(zip_file, 'zip', tmp_directory)
print(f"Anonymous supplementary archive saved in '{zip_file}.zip'")
finally:
# Clean up
shutil.rmtree(tmp_directory)
|
python
|
import datetime
import json
from datetime import date,timedelta
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from operis.log import log
from ws4redis.redis_store import RedisMessage
from ws4redis.publisher import RedisPublisher
from selenium_tests.webdriver import PpfaWebDriver
#from selenium_tests.models import PpfaTest, PpfaTestAssertion
class BaseTests( PpfaWebDriver ):
def runTest(self):
self.assert_failed_requests = False
self.broadcast("Loading http://www.python.org")
self.browser.get("http://www.python.org")
self.runassertion( "Welcome to Python.org", "equals", "title" )
self.runassertion( "Python", "in", "title" )
|
python
|
from flask import Flask
from flask_assets import Bundle, Environment
from flask_compress import Compress
from flask_talisman import Talisman
from flask_wtf.csrf import CSRFProtect
from govuk_frontend_wtf.main import WTFormsHelpers
from jinja2 import ChoiceLoader, PackageLoader, PrefixLoader
from config import Config
csrf = CSRFProtect()
compress = Compress()
talisman = Talisman()
assets = Environment()
def create_app(config_class=Config):
app = Flask(__name__, static_url_path="/assets")
app.config.from_object(config_class)
app.jinja_env.lstrip_blocks = True
app.jinja_env.trim_blocks = True
app.jinja_loader = ChoiceLoader(
[
PackageLoader("app"),
PrefixLoader(
{
"govuk_frontend_jinja": PackageLoader("govuk_frontend_jinja"),
"govuk_frontend_wtf": PackageLoader("govuk_frontend_wtf"),
}
),
]
)
assets.init_app(app)
csrf.init_app(app)
compress.init_app(app)
csp = {
"default-src": "'self'",
"script-src": [
"'self'",
"'sha256-+6WnXIl4mbFTCARd8N3COQmT3bJJmo32N8q8ZSQAIcU='",
"'sha256-l1eTVSK8DTnK8+yloud7wZUqFrI0atVo6VlC6PJvYaQ='",
],
"img-src": ["data:", "'self'"],
}
talisman.init_app(app, content_security_policy=csp)
js = Bundle("src/js/*.js", filters="jsmin", output="dist/js/custom-%(version)s.js")
if "js" not in assets:
assets.register("js", js)
WTFormsHelpers(app)
# Register blueprints
from app.demo import bp as demo_bp
from app.main import bp as main_bp
app.register_blueprint(main_bp)
app.register_blueprint(demo_bp)
return app
|
python
|
def operate(operator, *args):
return eval(operator.join([str(x) for x in args]))
print(operate("+", 1, 2, 3))
print(operate("*", 3, 4))
|
python
|
# Generated by Django 2.0.8 on 2019-06-05 09:32
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('protein', '0007_proteingproteinpair_references'),
('structure', '0017_structure_contact_representative_score'),
('residue', '0002_auto_20180504_1417'),
('contactnetwork', '0009_auto_20190605_1131'),
]
operations = [
migrations.RenameModel(
old_name='ConsensusInteractions',
new_name='ConsensusInteraction',
),
]
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Utility functions."""
import fnmatch
import logging
import os
import sys
import tarfile
from distutils.version import LooseVersion
import numpy as np
import torch
import yaml
def find_files(root_dir, query="*.wav", include_root_dir=True):
"""Find files recursively.
Args:
root_dir (str): Root root_dir to find.
query (str): Query to find.
include_root_dir (bool): If False, root_dir name is not included.
Returns:
list: List of found filenames.
"""
files = []
for root, dirnames, filenames in os.walk(root_dir, followlinks=True):
for filename in fnmatch.filter(filenames, query):
files.append(os.path.join(root, filename))
if not include_root_dir:
files = [file_.replace(root_dir + "/", "") for file_ in files]
return files
class NpyScpLoader(object):
"""Loader class for a fests.scp file of npy file.
Examples:
key1 /some/path/a.npy
key2 /some/path/b.npy
key3 /some/path/c.npy
key4 /some/path/d.npy
...
>>> loader = NpyScpLoader("feats.scp")
>>> array = loader["key1"]
"""
def __init__(self, feats_scp):
"""Initialize npy scp loader.
Args:
feats_scp (str): Kaldi-style feats.scp file with npy format.
"""
with open(feats_scp) as f:
lines = [line.replace("\n", "") for line in f.readlines()]
self.data = {}
for line in lines:
key, value = line.split()
self.data[key] = value
def get_path(self, key):
"""Get npy file path for a given key."""
return self.data[key]
def __getitem__(self, key):
"""Get ndarray for a given key."""
return np.load(self.data[key])
def __len__(self):
"""Return the length of the scp file."""
return len(self.data)
def __iter__(self):
"""Return the iterator of the scp file."""
return iter(self.data)
def keys(self):
"""Return the keys of the scp file."""
return self.data.keys()
def values(self):
"""Return the values of the scp file."""
for key in self.keys():
yield self[key]
def load_model(checkpoint, config=None):
"""Load trained model.
Args:
checkpoint (str): Checkpoint path.
config (dict): Configuration dict.
Return:
torch.nn.Module: Model instance.
"""
# load config if not provided
if config is None:
dirname = os.path.dirname(checkpoint)
config = os.path.join(dirname, "config.yml")
with open(config) as f:
config = yaml.load(f, Loader=yaml.Loader)
# lazy load for circular error
import parallel_wavegan.models
# get model and load parameters
model_class = getattr(
parallel_wavegan.models,
config.get("generator_type", "ParallelWaveGANGenerator")
)
model = model_class(**config["generator_params"])
model.load_state_dict(
torch.load(checkpoint, map_location="cpu")["model"]["generator"]
)
# add pqmf if needed
if config["generator_params"]["out_channels"] > 1:
# lazy load for circular error
from parallel_wavegan.layers import PQMF
pqmf_params = {}
# if LooseVersion(config.get("version", "0.1.0")) <= LooseVersion("0.4.2"):
# For compatibility, here we set default values in version <= 0.4.2
# pqmf_params.update(taps=62, cutoff_ratio=0.15, beta=9.0)
model.pqmf = PQMF(
subbands=config["generator_params"]["out_channels"],
**config.get("pqmf_params", pqmf_params),
)
return model
def download_pretrained_model(tag, download_dir=None):
"""Download pretrained model form google drive.
Args:
tag (str): Pretrained model tag.
download_dir (str): Directory to save downloaded files.
Returns:
str: Path of downloaded model checkpoint.
"""
assert tag in PRETRAINED_MODEL_LIST, f"{tag} does not exists."
id_ = PRETRAINED_MODEL_LIST[tag]
if download_dir is None:
download_dir = os.path.expanduser("~/.cache/parallel_wavegan")
output_path = f"{download_dir}/{tag}.tar.gz"
os.makedirs(f"{download_dir}", exist_ok=True)
if not os.path.exists(output_path):
# lazy load for compatibility
import gdown
gdown.download(f"https://drive.google.com/uc?id={id_}", output_path, quiet=False)
with tarfile.open(output_path, 'r:*') as tar:
for member in tar.getmembers():
if member.isreg():
member.name = os.path.basename(member.name)
tar.extract(member, f"{download_dir}/{tag}")
checkpoint_path = find_files(f"{download_dir}/{tag}", "checkpoint*.pkl")
return checkpoint_path[0]
|
python
|
from Attribute import Attribute
from Distribution.DistributionFactory import DistributionFactory
class AttributeFactory:
"""
Class to generate and deploy specific attributes that
will share a distribution factory. Attributes types are:
- custom
- boolean
- string
- int or int64
- float or float64
Implements:
- create(name -> str, dtype -> str) --> Attribute()
Raises:
- AttributeError - dtype is not valid
"""
def __init__(self):
self.central_distribution_factory = DistributionFactory()
def create(self, name, dtype, drange = None):
attribute_class = None
if dtype == 'boolean':
attribute_class = Attribute.BooleanAttribute(name, self.central_distribution_factory)
elif dtype == 'string':
attribute_class = Attribute.StringAttribute(name, self.central_distribution_factory)
elif dtype == 'int64' or dtype == 'int':
attribute_class = Attribute.IntegerAttribute(name, self.central_distribution_factory)
elif dtype == 'float64' or dtype == 'float':
attribute_class = Attribute.FloatAttribute(name, self.central_distribution_factory)
elif dtype == 'custom':
attribute_class = Attribute.Attribute(name, self.central_distribution_factory)
else:
raise AttributeError('dtype is not valid (must be one of [boolean, string, int, int64, float, float64, custom])')
return attribute_class
def __str__(self):
return 'AttributeFactory()'
def __rep__(self):
return self.__str__()
|
python
|
from django.core.management.base import BaseCommand, CommandError
from charts.models import ChartConfig
class Command(BaseCommand):
help = "Delete all ChartConfig objects"
def handle(self, *args, **options):
all = ChartConfig.objects.all()
self.stdout.write("{} ChartConfig Type objects will be deleted".format(all.count()))
all.delete()
self.stdout.write("All ChartConfig objects are gone")
|
python
|
#!/usr/bin/env python
#
# Author: Qiming Sun <[email protected]>
#
'''
Dirac Hartree-Fock
'''
import ctypes
import time
from functools import reduce
import numpy
import scipy.linalg
from pyscf import lib
from pyscf.lib import logger
from pyscf.scf import hf
from pyscf.scf import _vhf
import pyscf.scf.chkfile
def kernel(mf, conv_tol=1e-9, conv_tol_grad=None,
dump_chk=True, dm0=None, callback=None):
'''the modified SCF kernel for Dirac-Hartree-Fock. In this kernel, the
SCF is carried out in three steps. First the 2-electron part is
approximated by large component integrals (LL|LL); Next, (SS|LL) the
interaction between large and small components are added; Finally,
converge the SCF with the small component contributions (SS|SS)
'''
if conv_tol_grad is None:
conv_tol_grad = numpy.sqrt(conv_tol)
logger.info(mf, 'Set gradient conv threshold to %g', conv_tol_grad)
if dm0 is None:
dm = mf.get_init_guess()
else:
dm = dm0
mf._coulomb_now = 'LLLL'
if dm0 is None and mf._coulomb_now.upper() == 'LLLL':
scf_conv, e_tot, mo_energy, mo_coeff, mo_occ \
= hf.kernel(mf, 1e-2, 1e-1,
dump_chk, dm0=dm, callback=callback)
dm = mf.make_rdm1(mo_coeff, mo_occ)
mf._coulomb_now = 'SSLL'
if dm0 is None and (mf._coulomb_now.upper() == 'SSLL' or
mf._coulomb_now.upper() == 'LLSS'):
scf_conv, e_tot, mo_energy, mo_coeff, mo_occ \
= hf.kernel(mf, 1e-3, 1e-1,
dump_chk, dm0=dm, callback=callback)
dm = mf.make_rdm1(mo_coeff, mo_occ)
mf._coulomb_now = 'SSSS'
if mf.with_ssss:
mf._coulomb_now = 'SSSS'
else:
mf._coulomb_now = 'SSLL'
return hf.kernel(mf, conv_tol, conv_tol_grad, dump_chk, dm0=dm,
callback=callback)
def get_jk_coulomb(mol, dm, hermi=1, coulomb_allow='SSSS',
opt_llll=None, opt_ssll=None, opt_ssss=None):
if coulomb_allow.upper() == 'LLLL':
logger.info(mol, 'Coulomb integral: (LL|LL)')
j1, k1 = _call_veff_llll(mol, dm, hermi, opt_llll)
n2c = j1.shape[1]
vj = numpy.zeros_like(dm)
vk = numpy.zeros_like(dm)
vj[...,:n2c,:n2c] = j1
vk[...,:n2c,:n2c] = k1
elif coulomb_allow.upper() == 'SSLL' \
or coulomb_allow.upper() == 'LLSS':
logger.info(mol, 'Coulomb integral: (LL|LL) + (SS|LL)')
vj, vk = _call_veff_ssll(mol, dm, hermi, opt_ssll)
j1, k1 = _call_veff_llll(mol, dm, hermi, opt_llll)
n2c = j1.shape[1]
vj[...,:n2c,:n2c] += j1
vk[...,:n2c,:n2c] += k1
else: # coulomb_allow == 'SSSS'
logger.info(mol, 'Coulomb integral: (LL|LL) + (SS|LL) + (SS|SS)')
vj, vk = _call_veff_ssll(mol, dm, hermi, opt_ssll)
j1, k1 = _call_veff_llll(mol, dm, hermi, opt_llll)
n2c = j1.shape[1]
vj[...,:n2c,:n2c] += j1
vk[...,:n2c,:n2c] += k1
j1, k1 = _call_veff_ssss(mol, dm, hermi, opt_ssss)
vj[...,n2c:,n2c:] += j1
vk[...,n2c:,n2c:] += k1
return vj, vk
def get_jk(mol, dm, hermi=1, coulomb_allow='SSSS'):
return get_jk_coulomb(mol, dm, hermi=hermi, coulomb_allow=coulomb_allow)
def get_hcore(mol):
n2c = mol.nao_2c()
n4c = n2c * 2
c = lib.param.LIGHT_SPEED
t = mol.intor_symmetric('cint1e_spsp') * .5
vn = mol.intor_symmetric('cint1e_nuc')
wn = mol.intor_symmetric('cint1e_spnucsp')
h1e = numpy.empty((n4c, n4c), numpy.complex)
h1e[:n2c,:n2c] = vn
h1e[n2c:,:n2c] = t
h1e[:n2c,n2c:] = t
h1e[n2c:,n2c:] = wn * (.25/c**2) - t
return h1e
def get_ovlp(mol):
n2c = mol.nao_2c()
n4c = n2c * 2
c = lib.param.LIGHT_SPEED
s = mol.intor_symmetric('cint1e_ovlp')
t = mol.intor_symmetric('cint1e_spsp')
s1e = numpy.zeros((n4c, n4c), numpy.complex)
s1e[:n2c,:n2c] = s
s1e[n2c:,n2c:] = t * (.5/c)**2
return s1e
def make_rdm1(mo_coeff, mo_occ):
return numpy.dot(mo_coeff*mo_occ, mo_coeff.T.conj())
def init_guess_by_minao(mol):
'''Initial guess in terms of the overlap to minimal basis.'''
dm = hf.init_guess_by_minao(mol)
return _proj_dmll(mol, dm, mol)
def init_guess_by_1e(mol):
'''Initial guess from one electron system.'''
mf = UHF(mol)
return mf.init_guess_by_1e(mol)
def init_guess_by_atom(mol):
'''Initial guess from atom calculation.'''
dm = hf.init_guess_by_atom(mol)
return _proj_dmll(mol, dm, mol)
def init_guess_by_chkfile(mol, chkfile_name, project=True):
from pyscf.scf import addons
chk_mol, scf_rec = pyscf.scf.chkfile.load_scf(chkfile_name)
if numpy.iscomplexobj(scf_rec['mo_coeff']):
mo = scf_rec['mo_coeff']
mo_occ = scf_rec['mo_occ']
if project:
dm = make_rdm1(addons.project_mo_r2r(chk_mol, mo, mol), mo_occ)
else:
dm = make_rdm1(mo, mo_occ)
else:
if scf_rec['mo_coeff'].ndim == 2: # nr-RHF
mo = scf_rec['mo_coeff']
mo_occ = scf_rec['mo_occ']
dm = reduce(numpy.dot, (mo*mo_occ, mo.T))
else: # nr-UHF
mo = scf_rec['mo_coeff']
mo_occ = scf_rec['mo_occ']
dm = reduce(numpy.dot, (mo[0]*mo_occ[0], mo[0].T)) \
+ reduce(numpy.dot, (mo[1]*mo_occ[1], mo[1].T))
dm = _proj_dmll(chk_mol, dm, mol)
return dm
def get_init_guess(mol, key='minao'):
if callable(key):
return key(mol)
elif key.lower() == '1e':
return init_guess_by_1e(mol)
elif key.lower() == 'atom':
return init_guess_by_atom(mol)
elif key.lower() == 'chkfile':
raise RuntimeError('Call pyscf.scf.hf.init_guess_by_chkfile instead')
else:
return init_guess_by_minao(mol)
def time_reversal_matrix(mol, mat):
''' T(A_ij) = A[T(i),T(j)]^*
'''
n2c = mol.nao_2c()
tao = numpy.asarray(mol.time_reversal_map())
# tao(i) = -j means T(f_i) = -f_j
# tao(i) = j means T(f_i) = f_j
idx = abs(tao)-1 # -1 for C indexing convention
#:signL = [(1 if x>0 else -1) for x in tao]
#:sign = numpy.hstack((signL, signL))
#:tmat = numpy.empty_like(mat)
#:for j in range(mat.__len__()):
#: for i in range(mat.__len__()):
#: tmat[idx[i],idx[j]] = mat[i,j] * sign[i]*sign[j]
#:return tmat.conjugate()
sign_mask = tao<0
if mat.shape[0] == n2c*2:
idx = numpy.hstack((idx, idx+n2c))
sign_mask = numpy.hstack((sign_mask, sign_mask))
tmat = mat.take(idx,axis=0).take(idx,axis=1)
tmat[sign_mask,:] *= -1
tmat[:,sign_mask] *= -1
return tmat.T
def analyze(mf, verbose=logger.DEBUG, **kwargs):
#from pyscf.tools import dump_mat
if isinstance(verbose, logger.Logger):
log = verbose
else:
log = logger.Logger(mf.stdout, verbose)
mo_energy = mf.mo_energy
mo_occ = mf.mo_occ
#mo_coeff = mf.mo_coeff
log.info('**** MO energy ****')
for i in range(len(mo_energy)):
if mo_occ[i] > 0:
log.info('occupied MO #%d energy= %.15g occ= %g', \
i+1, mo_energy[i], mo_occ[i])
else:
log.info('virtual MO #%d energy= %.15g occ= %g', \
i+1, mo_energy[i], mo_occ[i])
#TODO if mf.verbose >= logger.DEBUG:
#TODO log.debug(' ** MO coefficients **')
#TODO label = mf.mol.spinor_labels(True)
#TODO dump_mat.dump_rec(mf.stdout, mo_coeff, label, start=1)
#TODO dm = mf.make_rdm1(mo_coeff, mo_occ)
#TODO return mf.mulliken_pop(mf.mol, dm, mf.get_ovlp(), log)
def get_grad(mo_coeff, mo_occ, fock_ao):
'''DHF Gradients'''
occidx = numpy.where(mo_occ> 0)[0]
viridx = numpy.where(mo_occ==0)[0]
nocc = len(occidx)
nvir = len(viridx)
fock = reduce(numpy.dot, (mo_coeff.T.conj(), fock_ao, mo_coeff))
g = fock[occidx[:,None],viridx].T
return g.reshape(-1)
class UHF(hf.SCF):
__doc__ = hf.SCF.__doc__ + '''
Attributes for Dirac-Hartree-Fock
with_ssss : bool, for Dirac-Hartree-Fock only
If False, ignore small component integrals (SS|SS). Default is True.
with_gaunt : bool, for Dirac-Hartree-Fock only
Default is False.
with_breit : bool, for Dirac-Hartree-Fock only
Gaunt + gauge term. Default is False.
Examples:
>>> mol = gto.M(atom='H 0 0 0; H 0 0 1', basis='ccpvdz', verbose=0)
>>> mf = scf.RHF(mol)
>>> e0 = mf.scf()
>>> mf = scf.DHF(mol)
>>> e1 = mf.scf()
>>> print('Relativistic effects = %.12f' % (e1-e0))
Relativistic effects = -0.000008854205
'''
def __init__(self, mol):
hf.SCF.__init__(self, mol)
self.conv_tol = 1e-8
self.with_ssss = True
self._coulomb_now = 'SSSS' # 'SSSS' ~ LLLL+LLSS+SSSS
self.with_gaunt = False
self.with_breit = False
self.opt = (None, None, None, None) # (opt_llll, opt_ssll, opt_ssss, opt_gaunt)
self._keys = set(self.__dict__.keys())
def dump_flags(self):
hf.SCF.dump_flags(self)
logger.info(self, 'with_ssss %s, with_gaunt %s, with_breit %s',
self.with_ssss, self.with_gaunt, self.with_breit)
logger.info(self, 'light speed = %s', lib.param.LIGHT_SPEED)
return self
def get_hcore(self, mol=None):
if mol is None:
mol = self.mol
return get_hcore(mol)
def get_ovlp(self, mol=None):
if mol is None:
mol = self.mol
return get_ovlp(mol)
def get_grad(self, mo_coeff, mo_occ, fock=None):
if fock is None:
dm1 = self.make_rdm1(mo_coeff, mo_occ)
fock = self.get_hcore(self.mol) + self.get_veff(self.mol, dm1)
return get_grad(mo_coeff, mo_occ, fock)
def init_guess_by_minao(self, mol=None):
'''Initial guess in terms of the overlap to minimal basis.'''
if mol is None: mol = self.mol
return init_guess_by_minao(mol)
def init_guess_by_atom(self, mol=None):
if mol is None: mol = self.mol
return init_guess_by_atom(mol)
def init_guess_by_chkfile(self, chkfile=None, project=True):
if chkfile is None: chkfile = self.chkfile
return init_guess_by_chkfile(self.mol, chkfile, project=project)
def build(self, mol=None):
if self.verbose >= logger.WARN:
self.check_sanity()
if self.direct_scf:
self.opt = self.init_direct_scf(self.mol)
def get_occ(self, mo_energy=None, mo_coeff=None):
if mo_energy is None: mo_energy = self.mo_energy
mol = self.mol
c = lib.param.LIGHT_SPEED
n4c = len(mo_energy)
n2c = n4c // 2
mo_occ = numpy.zeros(n2c * 2)
if mo_energy[n2c] > -1.999 * c**2:
mo_occ[n2c:n2c+mol.nelectron] = 1
else:
n = 0
for i, e in enumerate(mo_energy):
if e > -1.999 * c**2 and n < mol.nelectron:
mo_occ[i] = 1
n += 1
if self.verbose >= logger.INFO:
logger.info(self, 'HOMO %d = %.12g LUMO %d = %.12g',
n2c+mol.nelectron, mo_energy[n2c+mol.nelectron-1],
n2c+mol.nelectron+1, mo_energy[n2c+mol.nelectron])
logger.debug(self, 'NES mo_energy = %s', mo_energy[:n2c])
logger.debug(self, 'PES mo_energy = %s', mo_energy[n2c:])
return mo_occ
# full density matrix for UHF
def make_rdm1(self, mo_coeff=None, mo_occ=None):
if mo_coeff is None: mo_coeff = self.mo_coeff
if mo_occ is None: mo_occ = self.mo_occ
return make_rdm1(mo_coeff, mo_occ)
def init_direct_scf(self, mol=None):
if mol is None: mol = self.mol
def set_vkscreen(opt, name):
opt._this.contents.r_vkscreen = _vhf._fpointer(name)
opt_llll = _vhf.VHFOpt(mol, 'cint2e', 'CVHFrkbllll_prescreen',
'CVHFrkbllll_direct_scf',
'CVHFrkbllll_direct_scf_dm')
opt_llll.direct_scf_tol = self.direct_scf_tol
set_vkscreen(opt_llll, 'CVHFrkbllll_vkscreen')
opt_ssss = _vhf.VHFOpt(mol, 'cint2e_spsp1spsp2',
'CVHFrkbllll_prescreen',
'CVHFrkbssss_direct_scf',
'CVHFrkbssss_direct_scf_dm')
opt_ssss.direct_scf_tol = self.direct_scf_tol
set_vkscreen(opt_ssss, 'CVHFrkbllll_vkscreen')
opt_ssll = _vhf.VHFOpt(mol, 'cint2e_spsp1',
'CVHFrkbssll_prescreen',
'CVHFrkbssll_direct_scf',
'CVHFrkbssll_direct_scf_dm')
opt_ssll.direct_scf_tol = self.direct_scf_tol
set_vkscreen(opt_ssll, 'CVHFrkbssll_vkscreen')
#TODO: prescreen for gaunt
opt_gaunt = None
return opt_llll, opt_ssll, opt_ssss, opt_gaunt
def get_jk(self, mol=None, dm=None, hermi=1):
if mol is None: mol = self.mol
if dm is None: dm = self.make_rdm1()
t0 = (time.clock(), time.time())
verbose_bak, mol.verbose = mol.verbose, self.verbose
stdout_bak, mol.stdout = mol.stdout , self.stdout
if self.direct_scf and self.opt[0] is None:
self.opt = self.init_direct_scf(mol)
opt_llll, opt_ssll, opt_ssss, opt_gaunt = self.opt
vj, vk = get_jk_coulomb(mol, dm, hermi, self._coulomb_now,
opt_llll, opt_ssll, opt_ssss)
if self.with_breit:
if 'SSSS' in self._coulomb_now.upper() or not self.with_ssss:
vj1, vk1 = _call_veff_gaunt_breit(mol, dm, hermi, opt_gaunt, True)
logger.info(self, 'Add Breit term')
vj += vj1
vk += vk1
elif self.with_gaunt and 'SS' in self._coulomb_now.upper():
logger.info(self, 'Add Gaunt term')
vj1, vk1 = _call_veff_gaunt_breit(mol, dm, hermi, opt_gaunt, False)
vj += vj1
vk += vk1
mol.verbose = verbose_bak
mol.stdout = stdout_bak
logger.timer(self, 'vj and vk', *t0)
return vj, vk
def get_veff(self, mol=None, dm=None, dm_last=0, vhf_last=0, hermi=1):
'''Dirac-Coulomb'''
if mol is None: mol = self.mol
if dm is None: dm = self.make_rdm1()
if self.direct_scf:
ddm = numpy.array(dm, copy=False) - numpy.array(dm_last, copy=False)
vj, vk = self.get_jk(mol, ddm, hermi=hermi)
return numpy.array(vhf_last, copy=False) + vj - vk
else:
vj, vk = self.get_jk(mol, dm, hermi=hermi)
return vj - vk
def scf(self, dm0=None):
cput0 = (time.clock(), time.time())
self.build()
self.dump_flags()
self.converged, self.e_tot, \
self.mo_energy, self.mo_coeff, self.mo_occ \
= kernel(self, self.conv_tol, self.conv_tol_grad,
dm0=dm0, callback=self.callback)
logger.timer(self, 'SCF', *cput0)
self._finalize()
return self.e_tot
def analyze(self, verbose=None):
if verbose is None: verbose = self.verbose
return analyze(self, verbose)
def x2c(self):
import pyscf.scf.x2c
x2chf = pyscf.scf.x2c.UHF(self.mol)
x2chf.__dict__.update(self.__dict__)
return x2chf
class HF1e(UHF):
def scf(self, *args):
logger.info(self, '\n')
logger.info(self, '******** 1 electron system ********')
self.converged = True
h1e = self.get_hcore(self.mol)
s1e = self.get_ovlp(self.mol)
self.mo_energy, self.mo_coeff = self.eig(h1e, s1e)
self.mo_occ = numpy.zeros_like(self.mo_energy)
n2c = len(self.mo_occ) // 2
self.mo_occ[n2c] = 1
self.e_tot = self.mo_energy[n2c] + self.mol.energy_nuc()
return self.e_tot
class RHF(UHF):
'''Dirac-RHF'''
def __init__(self, mol):
if mol.nelectron.__mod__(2) != 0:
raise ValueError('Invalid electron number %i.' % mol.nelectron)
UHF.__init__(self, mol)
# full density matrix for RHF
def make_rdm1(self, mo_coeff=None, mo_occ=None):
r'''D/2 = \psi_i^\dag\psi_i = \psi_{Ti}^\dag\psi_{Ti}
D(UHF) = \psi_i^\dag\psi_i + \psi_{Ti}^\dag\psi_{Ti}
RHF average the density of spin up and spin down:
D(RHF) = (D(UHF) + T[D(UHF)])/2
'''
if mo_coeff is None: mo_coeff = self.mo_coeff
if mo_occ is None: mo_occ = self.mo_occ
dm = make_rdm1(mo_coeff, mo_occ)
return (dm + time_reversal_matrix(self.mol, dm)) * .5
def get_occ(self, mo_energy=None, mo_coeff=None):
if mo_energy is None: mo_energy = self.mo_energy
mol = self.mol
c = lib.param.LIGHT_SPEED
n4c = len(mo_energy)
n2c = n4c // 2
mo_occ = numpy.zeros(n2c * 2)
if mo_energy[n2c] > -1.999 * c**2:
mo_occ[n2c:n2c+mol.nelectron] = 1
else:
n = 0
for i, e in enumerate(mo_energy):
if e > -1.999 * c**2 and n < mol.nelectron:
mo_occ[i] = 1
n += 1
if self.verbose >= logger.INFO:
logger.info(self, 'HOMO %d = %.12g, LUMO %d = %.12g,',
(n2c+mol.nelectron)//2, mo_energy[n2c+mol.nelectron-1],
(n2c+mol.nelectron)//2+1, mo_energy[n2c+mol.nelectron])
logger.debug(self, 'NES mo_energy = %s', mo_energy[:n2c])
logger.debug(self, 'PES mo_energy = %s', mo_energy[n2c:])
return mo_occ
def _jk_triu_(vj, vk, hermi):
if hermi == 0:
if vj.ndim == 2:
vj = lib.hermi_triu(vj, 1)
else:
for i in range(vj.shape[0]):
vj[i] = lib.hermi_triu(vj[i], 1)
else:
if vj.ndim == 2:
vj = lib.hermi_triu(vj, hermi)
vk = lib.hermi_triu(vk, hermi)
else:
for i in range(vj.shape[0]):
vj[i] = lib.hermi_triu(vj[i], hermi)
vk[i] = lib.hermi_triu(vk[i], hermi)
return vj, vk
def _call_veff_llll(mol, dm, hermi=1, mf_opt=None):
if isinstance(dm, numpy.ndarray) and dm.ndim == 2:
n2c = dm.shape[0] // 2
dms = dm[:n2c,:n2c].copy()
else:
n2c = dm[0].shape[0] // 2
dms = []
for dmi in dm:
dms.append(dmi[:n2c,:n2c].copy())
vj, vk = _vhf.rdirect_mapdm('cint2e', 's8',
('ji->s2kl', 'jk->s1il'), dms, 1,
mol._atm, mol._bas, mol._env, mf_opt)
return _jk_triu_(vj, vk, hermi)
def _call_veff_ssll(mol, dm, hermi=1, mf_opt=None):
if isinstance(dm, numpy.ndarray) and dm.ndim == 2:
n_dm = 1
n2c = dm.shape[0] // 2
dmll = dm[:n2c,:n2c].copy()
dmsl = dm[n2c:,:n2c].copy()
dmss = dm[n2c:,n2c:].copy()
dms = (dmll, dmss, dmsl)
else:
n_dm = len(dm)
n2c = dm[0].shape[0] // 2
dms = [dmi[:n2c,:n2c].copy() for dmi in dm] \
+ [dmi[n2c:,n2c:].copy() for dmi in dm] \
+ [dmi[n2c:,:n2c].copy() for dmi in dm]
jks = ('lk->s2ij',) * n_dm \
+ ('ji->s2kl',) * n_dm \
+ ('jk->s1il',) * n_dm
c1 = .5 / lib.param.LIGHT_SPEED
vx = _vhf.rdirect_bindm('cint2e_spsp1', 's4', jks, dms, 1,
mol._atm, mol._bas, mol._env, mf_opt) * c1**2
vj = numpy.zeros((n_dm,n2c*2,n2c*2), dtype=numpy.complex)
vk = numpy.zeros((n_dm,n2c*2,n2c*2), dtype=numpy.complex)
vj[:,n2c:,n2c:] = vx[ :n_dm ,:,:]
vj[:,:n2c,:n2c] = vx[n_dm :n_dm*2,:,:]
vk[:,n2c:,:n2c] = vx[n_dm*2: ,:,:]
if n_dm == 1:
vj = vj.reshape(vj.shape[1:])
vk = vk.reshape(vk.shape[1:])
return _jk_triu_(vj, vk, hermi)
def _call_veff_ssss(mol, dm, hermi=1, mf_opt=None):
c1 = .5 / lib.param.LIGHT_SPEED
if isinstance(dm, numpy.ndarray) and dm.ndim == 2:
n2c = dm.shape[0] // 2
dms = dm[n2c:,n2c:].copy()
else:
n2c = dm[0].shape[0] // 2
dms = []
for dmi in dm:
dms.append(dmi[n2c:,n2c:].copy())
vj, vk = _vhf.rdirect_mapdm('cint2e_spsp1spsp2', 's8',
('ji->s2kl', 'jk->s1il'), dms, 1,
mol._atm, mol._bas, mol._env, mf_opt) * c1**4
return _jk_triu_(vj, vk, hermi)
def _call_veff_gaunt_breit(mol, dm, hermi=1, mf_opt=None, with_breit=False):
if with_breit:
intor_prefix = 'cint2e_breit_'
else:
intor_prefix = 'cint2e_'
if isinstance(dm, numpy.ndarray) and dm.ndim == 2:
n_dm = 1
n2c = dm.shape[0] // 2
dmls = dm[:n2c,n2c:].copy()
dmsl = dm[n2c:,:n2c].copy()
dmll = dm[:n2c,:n2c].copy()
dmss = dm[n2c:,n2c:].copy()
dms = [dmsl, dmsl, dmls, dmll, dmss]
else:
n_dm = len(dm)
n2c = dm[0].shape[0] // 2
dmll = [dmi[:n2c,:n2c].copy() for dmi in dm]
dmls = [dmi[:n2c,n2c:].copy() for dmi in dm]
dmsl = [dmi[n2c:,:n2c].copy() for dmi in dm]
dmss = [dmi[n2c:,n2c:].copy() for dmi in dm]
dms = dmsl + dmsl + dmls + dmll + dmss
vj = numpy.zeros((n_dm,n2c*2,n2c*2), dtype=numpy.complex)
vk = numpy.zeros((n_dm,n2c*2,n2c*2), dtype=numpy.complex)
jks = ('lk->s1ij',) * n_dm \
+ ('jk->s1il',) * n_dm
vx = _vhf.rdirect_bindm(intor_prefix+'ssp1ssp2', 's1', jks, dms[:n_dm*2], 1,
mol._atm, mol._bas, mol._env, mf_opt)
vj[:,:n2c,n2c:] = vx[:n_dm,:,:]
vk[:,:n2c,n2c:] = vx[n_dm:,:,:]
jks = ('lk->s1ij',) * n_dm \
+ ('li->s1kj',) * n_dm \
+ ('jk->s1il',) * n_dm
vx = _vhf.rdirect_bindm(intor_prefix+'ssp1sps2', 's1', jks, dms[n_dm*2:], 1,
mol._atm, mol._bas, mol._env, mf_opt)
vj[:,:n2c,n2c:]+= vx[ :n_dm ,:,:]
vk[:,n2c:,n2c:] = vx[n_dm :n_dm*2,:,:]
vk[:,:n2c,:n2c] = vx[n_dm*2: ,:,:]
if hermi == 1:
vj[:,n2c:,:n2c] = vj[:,:n2c,n2c:].transpose(0,2,1).conj()
vk[:,n2c:,:n2c] = vk[:,:n2c,n2c:].transpose(0,2,1).conj()
elif hermi == 2:
vj[:,n2c:,:n2c] = -vj[:,:n2c,n2c:].transpose(0,2,1).conj()
vk[:,n2c:,:n2c] = -vk[:,:n2c,n2c:].transpose(0,2,1).conj()
else:
raise NotImplementedError
if n_dm == 1:
vj = vj.reshape(n2c*2,n2c*2)
vk = vk.reshape(n2c*2,n2c*2)
c1 = .5 / lib.param.LIGHT_SPEED
if with_breit:
return vj*c1**2, vk*c1**2
else:
return -vj*c1**2, -vk*c1**2
def _proj_dmll(mol_nr, dm_nr, mol):
from pyscf.scf import addons
proj = addons.project_mo_nr2r(mol_nr, 1, mol)
n2c = proj.shape[0]
n4c = n2c * 2
dm = numpy.zeros((n4c,n4c), dtype=complex)
# *.5 because alpha and beta are summed in project_mo_nr2r
dm_ll = reduce(numpy.dot, (proj, dm_nr*.5, proj.T.conj()))
dm[:n2c,:n2c] = (dm_ll + time_reversal_matrix(mol, dm_ll)) * .5
return dm
if __name__ == '__main__':
import pyscf.gto
mol = pyscf.gto.Mole()
mol.verbose = 5
mol.output = 'out_dhf'
mol.atom.extend([['He', (0.,0.,0.)], ])
mol.basis = {
'He': [(0, 0, (1, 1)),
(0, 0, (3, 1)),
(1, 0, (1, 1)), ]}
mol.build()
##############
# SCF result
method = UHF(mol)
energy = method.scf() #-2.38146942868
print(energy)
method.with_gaunt = True
print(method.scf()) # -2.38138339005
method.with_breit = True
print(method.scf()) # -2.38138339005
|
python
|
import sys
import requests
import time
import datetime
# API URLS
URL_STATS_PROVIDER = 'https://api.nicehash.com/api?method=stats.provider&addr={}'
URL_BTC_PRICE = 'https://min-api.cryptocompare.com/data/pricemulti?fsyms=BTC&tsyms={}'
WALLET = '1P5PNW6Wd53QiZLdCs9EXNHmuPTX3rD6hW' #Dummy wallet. To be overwritten
###############################################################################
def req_url(url):
try:
response = requests.get(url).json()
except Exception as e:
print("Error using API: {0:s}".format(str(e)))
return None
return response
###############################################################################
def get_btc_price(fiat='USD'):
response = req_url(URL_BTC_PRICE.format(fiat))
if response is None:
return None
curr_price = response['BTC'][fiat]
return curr_price
def get_balances(wallet=WALLET):
response = req_url(URL_STATS_PROVIDER.format(wallet))
if response is None:
return None
if 'error' in response['result']:
print(response['result']['error'])
return None
curr_stats = response['result']['stats']
num_resp = len(curr_stats)
dictlist = [dict() for x in range(35)]
for idx_algo in range(35):
if (idx_algo == 0):
algo_str = 'Scrypt'
elif (idx_algo == 1):
algo_str = 'SHA256'
elif (idx_algo == 2):
algo_str = 'ScryptNf'
elif (idx_algo == 3):
algo_str = 'X11'
elif (idx_algo == 4):
algo_str = 'X13'
elif (idx_algo == 5):
algo_str = 'Keccak'
elif (idx_algo == 6):
algo_str = 'X15'
elif (idx_algo == 7):
algo_str = 'Nist5'
elif (idx_algo == 8):
algo_str = 'NeoScrypt'
elif (idx_algo == 9):
algo_str = 'Lyra2RE'
elif (idx_algo == 10):
algo_str = 'WhirlpoolX'
elif (idx_algo == 11):
algo_str = 'Qubit'
elif (idx_algo == 12):
algo_str = 'Quark'
elif (idx_algo == 13):
algo_str = 'Axiom'
elif (idx_algo == 14):
algo_str = 'Lyra2REv2'
elif (idx_algo == 15):
algo_str = 'ScryptJaneNf16'
elif (idx_algo == 16):
algo_str = 'Blake256r8'
elif (idx_algo == 17):
algo_str = 'Blake256r14'
elif (idx_algo == 18):
algo_str = 'Blake256r8vnl'
elif (idx_algo == 19):
algo_str = 'Hodl'
elif (idx_algo == 20):
algo_str = 'DaggerHashimoto'
elif (idx_algo == 21):
algo_str = 'Decred'
elif (idx_algo == 22):
algo_str = 'CryptoNight'
elif (idx_algo == 23):
algo_str = 'Lbry'
elif (idx_algo == 24):
algo_str = 'Equihash'
elif (idx_algo == 25):
algo_str = 'Pascal'
elif (idx_algo == 26):
algo_str = 'X11Gost'
elif (idx_algo == 27):
algo_str = 'Sia'
elif (idx_algo == 28):
algo_str = 'Blake2s'
elif (idx_algo == 29):
algo_str = 'Skunk'
elif (idx_algo == 30):
algo_str = 'CryptoNightV7'
elif (idx_algo == 31):
algo_str = 'CryptoNightHeavy'
elif (idx_algo == 32):
algo_str = 'Lyra2Z'
elif (idx_algo == 33):
algo_str = 'X16R'
else:
algo_str = 'other'
dictlist[idx_algo]['algo_str'] = algo_str
dictlist[idx_algo]['balance'] = float(0)
for idx_resp in range(num_resp):
current_resp = curr_stats[idx_resp]
if( (current_resp['algo']<=33) and (current_resp['algo']>=0) ):
dictlist[current_resp['algo']]['balance'] = float(current_resp['balance'])
else:
dictlist[34]['balance'] = dictlist[34]['balance']+float(current_resp['balance'])
return dictlist
|
python
|
import copy
import argparse
import os
from PIL import Image
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.models as models
from LossFunction import ContentLoss, StyleLoss
plt.switch_backend('agg')
def image_loader(image_name, transform, device):
image = Image.open(image_name)
image = transform(image).unsqueeze(0)
return image.to(device, torch.float)
class Normalization(nn.Module):
def __init__(self, mean, std):
super(Normalization, self).__init__()
self.mean = torch.tensor(mean).view(-1, 1, 1)
self.std = torch.tensor(std).view(-1, 1, 1)
def forward(self, img):
return (img - self.mean) / self.std
def get_style_model_and_losses(cnn, normalization_mean, normalization_std,
style_img, content_img,
content_layers, style_layers, device):
'''
to add content loss and style loss layers after convolution layer by creating a new Sequential module
'''
cnn = copy.deepcopy(cnn)
content_loss_list = []
style_loss_list = []
normalization = Normalization(normalization_mean, normalization_std).to(device)
model = nn.Sequential(normalization)
i = 0
for layer in cnn.children():
if isinstance(layer, nn.Conv2d):
i += 1
name = 'conv_{}'.format(i)
elif isinstance(layer, nn.ReLU):
name = 'relu_{}'.format(i)
layer = nn.ReLU(inplace=False)
elif isinstance(layer, nn.MaxPool2d):
name = 'pool_{}'.format(i)
elif isinstance(layer, nn.BatchNorm2d):
name = 'bn_{}'.format(i)
else:
raise RuntimeError('Unrecognized layer: {}'.format(layer.__class__.__name__))
model.add_module(name, layer)
if name in content_layers:
# add content loss
target = model(content_img).detach()
content_loss = ContentLoss(target)
model.add_module("content_loss_{}".format(i), content_loss)
content_loss_list.append(content_loss)
if name in style_layers:
# add style loss
target_feature = model(style_img).detach()
style_loss = StyleLoss(target_feature)
model.add_module("style_loss_{}".format(i), style_loss)
style_loss_list.append(style_loss)
for i in range(len(model) - 1, -1, -1):
if isinstance(model[i], ContentLoss) or isinstance(model[i], StyleLoss):
break
model = model[:(i + 1)]
return model, style_loss_list, content_loss_list
def get_input_optimizer(input_img):
'''
L-BFGS algorithm to run our gradient descent
to train the input image in order to minimise the content/style losses
'''
optimizer = optim.LBFGS([input_img.requires_grad_()])
return optimizer
def run(cnn, content_layers_default, style_layers_default, content_img, style_img, input_img, device,
num_steps=300, style_weight=10000, content_weight=1):
"""
the function to perform neural transfer
"""
style_loss_list = []
content_loss_list = []
cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406]).to(device)
cnn_normalization_std = torch.tensor([0.229, 0.224, 0.225]).to(device)
model, style_losses, content_losses = get_style_model_and_losses(
cnn, cnn_normalization_mean, cnn_normalization_std, style_img, content_img,
content_layers_default, style_layers_default, device)
optimizer = get_input_optimizer(input_img)
epoch = [0]
while epoch[0] <= num_steps:
def closure():
input_img.data.clamp_(0, 1)
optimizer.zero_grad()
model(input_img)
style_score = 0
content_score = 0
for sl in style_losses:
style_score += sl.loss
for cl in content_losses:
content_score += cl.loss
style_score *= style_weight
content_score *= content_weight
loss = style_score + content_score
loss.backward()
epoch[0] += 1
if epoch[0] % 10 == 0:
style_loss_list.append(style_score.item())
content_loss_list.append(content_score.item())
if epoch[0] % 50 == 0:
print("epoch {}: Style Loss : {:4f} Content Loss: {:4f}".format(epoch[0], style_score.item(), content_score.item()))
return style_score + content_score
optimizer.step(closure)
input_img.data.clamp_(0, 1)
return input_img, style_loss_list, content_loss_list
def style_transfer(style_img, content_img, outputpath='./result.png', num_steps=500, style_weight=100000, content_weight=1, name='test', loss_dir='losses'):
'''
the main function of neural style transfer
:param style_img: the image with target style you want to transfer to
:param content_img: the original image, to transfer its style while reserve its content
:param outputpath: the path to save image with transferred style
:param num_steps: number of steps to update parameters
:param style_weight: weight of style
:param content_weight: weight of loss
'''
imsize = 512
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
transform = transforms.Compose([
transforms.Resize(imsize),
transforms.CenterCrop(imsize),
transforms.ToTensor()
])
style_img = image_loader(style_img, transform, device)
content_img = image_loader(content_img, transform, device)
# use the features module of pretrained vgg19
# need the output of the individual convolution layers to measure content and style loss.
cnn = models.vgg19(pretrained=True).features.to(device).eval()
# desired depth layers to compute style/content losses :
content_layers_default = ['conv_4']
style_layers_default = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']
input_img = content_img.clone()
output, style_loss, content_loss = run(cnn, content_layers_default, style_layers_default, content_img, style_img, input_img, device,
num_steps=num_steps, style_weight=style_weight, content_weight=content_weight)
output = output.detach().cpu().numpy().squeeze(0).transpose([1,2,0])
plt.imsave(outputpath, output)
plt.clf()
x = [i*10 for i in range(len(style_loss))]
plt.plot(x, style_loss, label='style_loss')
plt.plot(x, content_loss, label='content_loss')
plt.xlabel('steps')
plt.ylabel('loss')
plt.legend()
plt.savefig(os.path.join(loss_dir, "loss" + name))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--style_img_path',
default='./data/style/style3.jpg',
help='path of style image',
type=str)
parser.add_argument('--content_img_dir',
default='./data/content',
help='directory of content images',
type=str)
parser.add_argument('--result_dir',
default='./results',
help='directory to save results',
type=str)
parser.add_argument('--num_steps',
default=500,
help='number of steps to update',
type=int)
parser.add_argument('--style_weight',
default=100000,
help='weight of style',
type=int)
parser.add_argument('--content_weight',
default=1,
help='weight of content',
type=int)
args = parser.parse_args()
style_img_path = args.style_img_path
content_img_dir = args.content_img_dir
result_dir = args.result_dir
num_steps = args.num_steps
style_weight = args.style_weight
content_weight = args.content_weight
if not os.path.isdir(result_dir):
os.mkdir(result_dir)
loss_dir = os.path.join(result_dir, 'losses')
if not os.path.isdir(loss_dir):
os.mkdir(loss_dir)
for img in os.listdir(content_img_dir):
content_img_path = os.path.join(content_img_dir, img)
outputpath = os.path.join(result_dir, 'result-' + img)
style_transfer(style_img_path, content_img_path, outputpath=outputpath, num_steps=num_steps, style_weight=style_weight, content_weight=content_weight, name=img, loss_dir=loss_dir)
|
python
|
import os
from dotenv import load_dotenv
dirname = os.path.dirname(__file__)
try:
#print(os.path.join(dirname, '.env'))
load_dotenv(dotenv_path=os.path.join(dirname, '.env'))
except FileNotFoundError:
pass
|
python
|
#!/home/alvaro/.miniconda3/envs/fazip/bin/python
from fazip import fazip
import sys
import getpass
def do_extraction(zipname):
host, user, pwd = fazip.get_mysql_info()
db = fazip.connect_to_db(user, pwd, host)
fazip.unzip_files(db, zipname)
db.close()
def do_compression(zipname, files):
host, user, pwd = fazip.get_mysql_info()
db = fazip.connect_to_db(user, pwd, host)
str_files = " ".join(files)
fazip.zip_files(db, zipname, str_files)
db.close()
def do_list(zipname):
print("TODO")
pass
def do_add_user(database):
print("\n[fazip] Enter the new user's name")
user = input("[fazip]> ")
if fazip.add_user_db(database, user):
print("[fazip] User successfully registered!")
else:
print("[fazip] User already registered :(")
def do_edit_user(database):
print("\n[fazip] Enter the user's name you want to edit")
user = input("[fazip]> ")
if fazip.modify_user_db(database, user):
print("[fazip] User successfully edited!")
else:
print("[fazip] User doesn't exists :(")
def do_remove_user(database):
print("\n[fazip] Enter the user's name you want to remove")
user = input("[fazip]> ")
if fazip.remove_user_db(database, user):
print("[fazip] User successfully removed!")
else:
print("[fazip] User doesn't exists :(")
def do_list_users(database):
users = fazip.get_users_db(database)
if users:
print("\n[fazip] Those are the registered users:")
for user in users:
print("=> {}".format(user))
else:
print("\n[fazip] There are no users registered :(")
def do_change_pass(database):
print("[fazip] WARNING: You will not be able to extract files" +
"from previously created zip files")
verify = input("[fazip] Proceed (y/[n])? ")
if verify == 'y' or verify == 'yes':
oldp = getpass.getpass("[fazip] Enter your actual password: ")
if fazip.get_password_zip(database) == oldp:
first = getpass.getpass("[fazip] Enter your new password: ")
second = getpass.getpass("[fazip] Enter your new password again: ")
if first == second:
fazip.set_password_zip(database, first)
print("\n[fazip] Password changed successfully!")
print("[fazip] Please log in again")
sys.exit()
else:
print("[fazip] Those passwords didn't match.")
else:
print("[fazip] Access denied")
sys.exit()
def print_menu(options):
print(" ====================================")
print("|| \t CONFIGURATION MENU\t ||")
print(" ====================================")
print("\t-> {}) Exit".format(options['exit_menu']))
print("\t-> {}) List users".format(options['list_users']))
print("\t-> {}) Add user".format(options['add_user']))
print("\t-> {}) Edit user".format(options['edit_user']))
print("\t-> {}) Remove user".format(options['remove_user']))
print("\t-> {}) Change password (CRITICAL)".format(options['change_pass']))
def menu(options, selection):
pass
def do_configuration(login=False):
host, user, pwd = fazip.get_mysql_info()
db = fazip.connect_to_db(user, pwd, host)
options = {'exit_menu': 0, 'list_users': 1, 'add_user': 2,
'edit_user': 3, 'remove_user': 4, 'change_pass': 5}
if not login:
pwd = getpass.getpass("Enter password: ")
if not fazip.get_password_zip(db) == pwd:
print("[fazip] Access denied")
sys.exit()
print_menu(options)
try:
selection = int(input("\n[fazip]> "))
except Exception:
selection = -1
print("[fazip] Seriously? That's not a number -.-")
if options['exit_menu'] <= selection <= options['change_pass']:
if selection == options['add_user']:
do_add_user(db)
elif selection == options['edit_user']:
do_edit_user(db)
elif selection == options['remove_user']:
do_remove_user(db)
elif selection == options['list_users']:
do_list_users(db)
elif selection == options['exit_menu']:
sys.exit()
elif selection == options['change_pass']:
do_change_pass(db)
db.close()
else:
print("[fazip] Wrong answer")
do_configuration(True)
def print_help(redirected=False):
if redirected:
print("[fazip] It looks like you missed something :(")
else:
print("[fazip] Help:")
print("\n- Extract an existing zip file with original structure:")
print("\n fazip x <zipfile>")
print("\n- List the contents of a zip file:")
print("\n fazip l <zipfile>")
print("\n- Archive a file or directory:")
print("\n fazip a <zipfile> <files>")
print("\n- Configuration menu:")
print("\n fazip config")
print("\n- Display this help:")
print("\n fazip h")
print()
def main():
if 2 < len(sys.argv) == 3 and sys.argv[1] == 'x':
do_extraction(sys.argv[2])
elif 2 < len(sys.argv) == 3 and sys.argv[1] == 'l':
do_list(sys.argv[2])
elif len(sys.argv) >= 4 and sys.argv[1] == 'a':
do_compression(sys.argv[2], sys.argv[3:])
elif len(sys.argv) == 2 and sys.argv[1] == 'config':
print(fazip.utils.HEADER)
do_configuration()
elif len(sys.argv) == 2 and sys.argv[1] == 'h':
print_help()
else:
print_help(True)
if __name__ == '__main__':
main()
|
python
|
import sys, os
sys.path.append(os.pardir)
from lark import Lark, Token
from parsing import grammar, cool_ast, preprocess
from parsing.cool_transformer import ToCoolASTTransformer
from checksemantic.checksemantics import CheckSemanticsVisitor
from checksemantic.scope import Scope
from cool import fetch_program
from code_gen.transpilator import codeVisitor
from code_gen.visitorMips import MIPS
def compile(parser, program, exception_on_syntax=False, CIL = False, SPIM = False):
try:
preprocessed = preprocess.preprocess_program(program)
try:
tree = parser.parse(preprocessed)
ast = ToCoolASTTransformer().transform(tree)
checkSemanticVisitor = CheckSemanticsVisitor()
scope = Scope()
errors = []
t = checkSemanticVisitor.visit(ast, scope, errors)
if t:
try:
cv = codeVisitor()
cv.visit(ast)
#For static code
static_code = os.path.join(os.pardir, 'code_gen', 'staticMipsCode.s')
mips = MIPS(cv.code, cv.data, static_code)
code_lines = mips.generate()
return t, errors, code_lines
except errors:
print(errors)
return t, errors, []
except:
print('Syntax error!')
if exception_on_syntax:
raise Exception("")
return False, [], []
except:
print('Error when preprocessing!')
if exception_on_syntax:
raise Exception("")
return False, [], []
def walk(dir, parser, exception_on_sintax = False, SPIM = False):
for (curr_dir, sub_dirs, files) in os.walk(dir):
for f in files:
print(f)
program = fetch_program(os.path.join(curr_dir,f))
#print(f)
print('===================================')
result, errors, mips_code = compile(parser, program, exception_on_sintax, False,)
if not result:
for e in errors:
print(e)
print('On file %s'%(f))
else:
print('%s: Succesfully compiled!'%(f))
if SPIM:
file_name = f[:f.index('.')] + '.s'
path = os.path.join('Output', file_name)
with open(path, 'w') as f:
f.write(mips_code)
print('===================================')
if __name__ == '__main__':
parser = Lark(grammar.grammar, start='program')
if len(sys.argv) == 3 and sys.argv[1] == '-p':
program = fetch_program(sys.argv[2])
result, errors, mips_code = compile(parser, program, False) #Cambiar a falso antes de entregar
if not result:
for e in errors:
print(e)
print('On file %s'%(sys.argv[2]))
else:
print('%s: Succesfully compiled!'%(sys.argv[2]))
file_name = sys.argv[2].split(os.sep)[-1]
file_name = file_name[:file_name.index('.')] + '.s'
path = os.path.join('Output', file_name)
with open(path, 'w') as f:
f.write(mips_code)
elif len(sys.argv) == 3 and sys.argv[1] == '-r':
walk(sys.argv[2], parser, True, True) #Cambiar a falso antes de entregar
elif len(sys.argv) == 1:
walk(os.path.join('.', 'test'), parser, True) #Cambiar a falso antes de entregar
else:
print("Usage: python test.py [<option> <path>]")
print()
print('Available options:')
print("-p: To compile a single file. <path> must be a the path of a file.")
print("-r: To compile all the files in a directory tree. <path> must be a the path of a directory.")
print()
print("When using without options all the files on directory tree rooted at './test' are compiled")
|
python
|
#!/usr/bin/env python
# -*- animation -*-
"""
Visualize a complex-valued function
"""
import numpy as np
import gr
def domain_colors(w, n):
H = np.mod(np.angle(w) / (2 * np.pi) + 1, 1)
m = 0.7
M = 1
isol = m + (M - m) * (H * n - np.floor(H * n))
modul = np.absolute(w)
Logm = np.log(modul)
Logm = np.nan_to_num(Logm) * n / (2 * np.pi)
modc = m + (M - m) * (Logm - np.floor(Logm))
V = modc * isol
S = 0.9 * np.ones_like(H, float)
HSV = np.dstack((H, S, V))
return HSV
def func_vals(f, re, im, N):
# evaluates the complex function at the nodes of the grid
# re and im are tuples defining the rectangular region
# N is the number of nodes per unit interval
l = re[1] - re[0]
h = im[1] - im[0]
resL = N * l # horizontal resolution
resH = N * h # vertical resolution
x = np.linspace(re[0], re[1], resL)
y = np.linspace(im[0], im[1], resH)
x, y = np.meshgrid(x, y)
z = x + 1j * y
w = f(z)
return w
def plot_domain(color_func, f, re=[-1, 1], im=[-1, 1], N=100, n=15):
w = func_vals(f, re, im, N)
domc = color_func(w, n) * 255
width, height = domc.shape[:2]
domc = np.append(domc, np.ones((width, height, 1)) * 255, axis=2)
domc = domc.astype(np.uint8)
domc = domc.view('<i4')
gr.clearws()
gr.setviewport(0, 1, 0, 1)
gr.drawimage(0, 1, 0, 1, width, height, domc, model=gr.MODEL_HSV)
gr.updatews()
f = lambda z: (z**2 - 1) * (z - 2 - 1j)**2 / (z**2 + 2 + 2j)
for n in range(5, 30):
plot_domain(domain_colors, f, re=(-3, 3), im=(-3, 3), n=n)
|
python
|